Index: Makefile.vex.am =================================================================== --- Makefile.vex.am (revision 15046) +++ Makefile.vex.am (working copy) @@ -55,9 +55,10 @@ priv/host_arm_defs.h \ priv/host_arm64_defs.h \ priv/host_s390_defs.h \ + priv/host_mips_defs.h \ priv/s390_disasm.h \ priv/s390_defs.h \ - priv/host_mips_defs.h + priv/vexbackend_info.h BUILT_SOURCES = pub/libvex_guest_offsets.h CLEANFILES = pub/libvex_guest_offsets.h @@ -151,7 +152,8 @@ priv/host_s390_isel.c \ priv/s390_disasm.c \ priv/host_mips_defs.c \ - priv/host_mips_isel.c + priv/host_mips_isel.c \ + priv/vexbackend_info.c LIBVEX_CFLAGS = \ -Wbad-function-cast \ Index: coregrind/m_machine.c =================================================================== --- coregrind/m_machine.c (revision 15046) +++ coregrind/m_machine.c (working copy) @@ -719,6 +719,9 @@ HChar vstr[13]; vstr[0] = 0; + vai.LibVEX_Chain = &chainXDirect_X86; + vai.LibVEX_UnChain = &unchainXDirect_X86; + vai.LibVEX_PatchProfInc = &patchProfInc_X86; if (!VG_(has_cpuid)()) /* we can't do cpuid at all. Give up. */ return False; @@ -818,6 +821,10 @@ HChar vstr[13]; vstr[0] = 0; + vai.LibVEX_Chain = chainXDirect_AMD64; + vai.LibVEX_UnChain = &unchainXDirect_AMD64; + vai.LibVEX_PatchProfInc = &patchProfInc_AMD64; + if (!VG_(has_cpuid)()) /* we can't do cpuid at all. Give up. */ return False; @@ -934,6 +941,10 @@ volatile Bool have_isa_2_07; Int r; + vai.LibVEX_Chain = chainXDirect_PPC32; + vai.LibVEX_UnChain = &unchainXDirect_PPC32; + vai.LibVEX_PatchProfInc = &patchProfInc_PPC32; + /* This is a kludge. Really we ought to back-convert saved_act into a toK_t using VG_(convert_sigaction_fromK_to_toK), but since that's a no-op on all ppc32 platforms so far supported, @@ -1089,6 +1100,10 @@ volatile Bool have_isa_2_07; Int r; + vai.LibVEX_Chain = chainXDirect_PPC64; + vai.LibVEX_UnChain = &unchainXDirect_PPC64; + vai.LibVEX_PatchProfInc = &patchProfInc_PPC64; + /* This is a kludge. Really we ought to back-convert saved_act into a toK_t using VG_(convert_sigaction_fromK_to_toK), but since that's a no-op on all ppc64 platforms so far supported, @@ -1240,6 +1255,10 @@ identification yet. Keeping fingers crossed. */ model = VG_(get_machine_model)(); + vai.LibVEX_Chain = chainXDirect_S390; + vai.LibVEX_UnChain = &unchainXDirect_S390; + vai.LibVEX_PatchProfInc = &patchProfInc_S390; + /* Unblock SIGILL and stash away the old action for that signal */ VG_(sigemptyset)(&tmp_set); VG_(sigaddset)(&tmp_set, VKI_SIGILL); @@ -1372,6 +1391,10 @@ volatile Int archlevel; Int r; + vai.LibVEX_Chain = chainXDirect_ARM; + vai.LibVEX_UnChain = &unchainXDirect_ARM; + vai.LibVEX_PatchProfInc = &patchProfInc_ARM; + /* This is a kludge. Really we ought to back-convert saved_act into a toK_t using VG_(convert_sigaction_fromK_to_toK), but since that's a no-op on all ppc64 platforms so far supported, @@ -1478,6 +1501,10 @@ va = VexArchARM64; vai.endness = VexEndnessLE; + vai.LibVEX_Chain = chainXDirect_ARM64; + vai.LibVEX_UnChain = &unchainXDirect_ARM64; + vai.LibVEX_PatchProfInc = &patchProfInc_ARM64; + /* So far there are no variants. */ vai.hwcaps = 0; @@ -1508,6 +1535,10 @@ if (model == -1) return False; + vai.LibVEX_Chain = chainXDirect_MIPS32; + vai.LibVEX_UnChain = &unchainXDirect_MIPS32; + vai.LibVEX_PatchProfInc = &patchProfInc_MIPS32; + vai.hwcaps = model; # if defined(VKI_LITTLE_ENDIAN) @@ -1600,6 +1631,10 @@ if (model == -1) return False; + vai.LibVEX_Chain = chainXDirect_MIPS64; + vai.LibVEX_UnChain = &unchainXDirect_MIPS64; + vai.LibVEX_PatchProfInc = &patchProfInc_MIPS64; + vai.hwcaps = model; # if defined(VKI_LITTLE_ENDIAN) Index: coregrind/m_transtab.c =================================================================== --- coregrind/m_transtab.c (revision 15046) +++ coregrind/m_transtab.c (working copy) @@ -817,8 +817,8 @@ /* Get VEX to do the patching itself. We have to hand it off since it is host-dependent. */ VexInvalRange vir - = LibVEX_Chain( - arch_host, endness_host, + = archinfo_host.LibVEX_Chain( + endness_host, from__patch_addr, VG_(fnptr_to_fnentry)( to_fastEP ? &VG_(disp_cp_chain_me_to_fastEP) @@ -862,7 +862,7 @@ addresses of the destination block (that is, the block that owns this InEdge). */ __attribute__((noinline)) -static void unchain_one ( VexArch arch_host, VexEndness endness_host, +static void unchain_one ( VexArchInfo *archinfo_host, VexEndness endness_host, InEdge* ie, void* to_fastEPaddr, void* to_slowEPaddr ) { @@ -887,8 +887,9 @@ // place_to_jump_to_EXPECTED really is the current dst, and // asserts if it isn't. VexInvalRange vir - = LibVEX_UnChain( arch_host, endness_host, place_to_patch, - place_to_jump_to_EXPECTED, disp_cp_chain_me ); + = archinfo_host->LibVEX_UnChain( + endness_host, place_to_patch, + place_to_jump_to_EXPECTED, disp_cp_chain_me ); VG_(invalidate_icache)( (void*)vir.start, vir.len ); } @@ -898,6 +899,7 @@ any chained jumps to this block. */ static void unchain_in_preparation_for_deletion ( VexArch arch_host, + VexArchInfo *archinfo_host, VexEndness endness_host, SECno here_sNo, TTEno here_tteNo ) { @@ -918,7 +920,7 @@ // Undo the chaining. UChar* here_slow_EP = (UChar*)here_tte->tcptr; UChar* here_fast_EP = here_slow_EP + evCheckSzB; - unchain_one(arch_host, endness_host, ie, here_fast_EP, here_slow_EP); + unchain_one(archinfo_host, endness_host, ie, here_fast_EP, here_slow_EP); // Find the corresponding entry in the "from" node's out_edges, // and remove it. TTEntry* from_tte = index_tte(ie->from_sNo, ie->from_tteNo); @@ -1510,7 +1512,7 @@ sec->tt[ei].entry, sec->tt[ei].vge ); } - unchain_in_preparation_for_deletion(arch_host, + unchain_in_preparation_for_deletion(arch_host, &archinfo_host, endness_host, sno, ei); } else { vg_assert(sec->tt[ei].n_tte2ec == 0); @@ -1704,9 +1706,10 @@ VG_(machine_get_VexArchInfo)( &arch_host, &archinfo_host ); VexEndness endness_host = archinfo_host.endness; VexInvalRange vir - = LibVEX_PatchProfInc( arch_host, endness_host, - dstP + offs_profInc, - §ors[y].tt[tteix].usage.prof.count ); + = archinfo_host.LibVEX_PatchProfInc( + endness_host, + dstP + offs_profInc, + §ors[y].tt[tteix].usage.prof.count ); VG_(invalidate_icache)( (void*)vir.start, vir.len ); } @@ -1854,7 +1857,8 @@ /* Delete a tt entry, and update all the eclass data accordingly. */ static void delete_tte ( /*MOD*/Sector* sec, SECno secNo, TTEno tteno, - VexArch arch_host, VexEndness endness_host ) + VexArch arch_host, VexArchInfo *archinfo_host, + VexEndness endness_host ) { Int i, ec_num, ec_idx; TTEntry* tte; @@ -1868,7 +1872,8 @@ vg_assert(tte->n_tte2ec >= 1 && tte->n_tte2ec <= 3); /* Unchain .. */ - unchain_in_preparation_for_deletion(arch_host, endness_host, secNo, tteno); + unchain_in_preparation_for_deletion(arch_host, archinfo_host, + endness_host, secNo, tteno); /* Deal with the ec-to-tte links first. */ for (i = 0; i < tte->n_tte2ec; i++) { @@ -1925,6 +1930,7 @@ Addr guest_start, ULong range, Int ec, VexArch arch_host, + VexArchInfo *archinfo_host, VexEndness endness_host ) { Int i; @@ -1949,7 +1955,7 @@ if (overlaps( guest_start, range, &tte->vge )) { anyDeld = True; - delete_tte( sec, secNo, tteno, arch_host, endness_host ); + delete_tte( sec, secNo, tteno, arch_host, archinfo_host, endness_host ); } } @@ -1965,6 +1971,7 @@ Bool delete_translations_in_sector ( /*MOD*/Sector* sec, SECno secNo, Addr guest_start, ULong range, VexArch arch_host, + VexArchInfo *archinfo_host, VexEndness endness_host ) { TTEno i; @@ -1974,7 +1981,7 @@ if (sec->tt[i].status == InUse && overlaps( guest_start, range, &sec->tt[i].vge )) { anyDeld = True; - delete_tte( sec, secNo, i, arch_host, endness_host ); + delete_tte( sec, secNo, i, arch_host, archinfo_host, endness_host ); } } @@ -2051,11 +2058,11 @@ continue; anyDeleted |= delete_translations_in_sector_eclass( sec, sno, guest_start, range, ec, - arch_host, endness_host + arch_host, &archinfo_host, endness_host ); anyDeleted |= delete_translations_in_sector_eclass( sec, sno, guest_start, range, ECLASS_MISC, - arch_host, endness_host + arch_host, &archinfo_host, endness_host ); } @@ -2072,7 +2079,7 @@ continue; anyDeleted |= delete_translations_in_sector( sec, sno, guest_start, range, - arch_host, endness_host + arch_host, &archinfo_host, endness_host ); } Index: coregrind/link_tool_exe_linux.in =================================================================== --- coregrind/link_tool_exe_linux.in (revision 15046) +++ coregrind/link_tool_exe_linux.in (working copy) @@ -85,6 +85,7 @@ } else { $extra_args = "-static -Wl,@FLAG_T_TEXT@=$ala"; } +# add -Wl,-M in extra_args to see which symbol drags a .o. my $cmd = join(" ", @ARGV, $extra_args); Index: VEX/priv/host_arm_defs.c =================================================================== --- VEX/priv/host_arm_defs.c (revision 3108) +++ VEX/priv/host_arm_defs.c (working copy) @@ -4768,10 +4768,7 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_ARM ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ) +DECL_LibVEX_UnChainFn(ARM) { vassert(endness_host == VexEndnessLE); @@ -4832,9 +4829,7 @@ /* Patch the counter address into a profile inc point, as previously created by the ARMin_ProfInc case for emit_ARMInstr. */ -VexInvalRange patchProfInc_ARM ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ) +DECL_LibVEX_PatchProfIncFn(ARM) { vassert(endness_host == VexEndnessLE); vassert(sizeof(ULong*) == 4); Index: VEX/priv/host_arm_defs.h =================================================================== --- VEX/priv/host_arm_defs.h (revision 3108) +++ VEX/priv/host_arm_defs.h (working copy) @@ -1058,23 +1058,6 @@ host_EvC_COUNTER. */ extern Int evCheckSzB_ARM (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ); - -extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ); - - #endif /* ndef __VEX_HOST_ARM_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/host_s390_defs.c =================================================================== --- VEX/priv/host_s390_defs.c (revision 3108) +++ VEX/priv/host_s390_defs.c (working copy) @@ -9979,30 +9979,24 @@ /* Patch the counter address into CODE_TO_PATCH as previously generated by s390_insn_profinc_emit. */ -VexInvalRange -patchProfInc_S390(VexEndness endness_host, - void *code_to_patch, const ULong *location_of_counter) +DECL_LibVEX_PatchProfIncFn(S390) { vassert(sizeof(ULong *) == 8); - s390_tchain_verify_load64(code_to_patch, S390_REGNO_TCHAIN_SCRATCH, 0); + s390_tchain_verify_load64(place_to_patch, S390_REGNO_TCHAIN_SCRATCH, 0); - UChar *p = s390_tchain_patch_load64(code_to_patch, + UChar *p = s390_tchain_patch_load64(place_to_patch, (Addr)location_of_counter); - UInt len = p - (UChar *)code_to_patch; - VexInvalRange vir = { (HWord)code_to_patch, len }; + UInt len = p - (UChar *)place_to_patch; + VexInvalRange vir = { (HWord)place_to_patch, len }; return vir; } /* NB: what goes on here has to be very closely coordinated with the s390_insn_xdirect_emit code above. */ -VexInvalRange -chainXDirect_S390(VexEndness endness_host, - void *place_to_chain, - const void *disp_cp_chain_me_EXPECTED, - const void *place_to_jump_to) +DECL_LibVEX_ChainFn(S390) { vassert(endness_host == VexEndnessBE); @@ -10087,11 +10081,7 @@ /* NB: what goes on here has to be very closely coordinated with the s390_insn_xdirect_emit code above. */ -VexInvalRange -unchainXDirect_S390(VexEndness endness_host, - void *place_to_unchain, - const void *place_to_jump_to_EXPECTED, - const void *disp_cp_chain_me) +DECL_LibVEX_UnChainFn(S390) { vassert(endness_host == VexEndnessBE); Index: VEX/priv/host_s390_defs.h =================================================================== --- VEX/priv/host_s390_defs.h (revision 3108) +++ VEX/priv/host_s390_defs.h (working copy) @@ -749,22 +749,6 @@ /* Return the number of bytes of code needed for an event check */ Int evCheckSzB_S390(void); -/* Perform a chaining and unchaining of an XDirect jump. */ -VexInvalRange chainXDirect_S390(VexEndness endness_host, - void *place_to_chain, - const void *disp_cp_chain_me_EXPECTED, - const void *place_to_jump_to); - -VexInvalRange unchainXDirect_S390(VexEndness endness_host, - void *place_to_unchain, - const void *place_to_jump_to_EXPECTED, - const void *disp_cp_chain_me); - -/* Patch the counter location into an existing ProfInc point. */ -VexInvalRange patchProfInc_S390(VexEndness endness_host, - void *code_to_patch, - const ULong *location_of_counter); - /* KLUDGE: See detailled comment in host_s390_defs.c. */ extern UInt s390_host_hwcaps; Index: VEX/priv/host_mips_defs.c =================================================================== --- VEX/priv/host_mips_defs.c (revision 3108) +++ VEX/priv/host_mips_defs.c (working copy) @@ -3901,11 +3901,11 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange chainXDirect_MIPS ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to, - Bool mode64 ) +static VexInvalRange chainXDirect_MIPS ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to, + Bool mode64 ) { vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE); /* What we're expecting to see is: @@ -3947,13 +3947,38 @@ return vir; } +VexInvalRange chainXDirect_MIPS32 ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to ) +{ + return chainXDirect_MIPS (endness_host, + place_to_chain, + disp_cp_chain_me_EXPECTED, + place_to_jump_to, + False // mode64 + ); +} +VexInvalRange chainXDirect_MIPS64 ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to) +{ + return chainXDirect_MIPS (endness_host, + place_to_chain, + disp_cp_chain_me_EXPECTED, + place_to_jump_to, + True // mode64 + ); +} + /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me, - Bool mode64 ) +static VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host, + void* place_to_unchain, + const void* place_to_jump_to_EXPECTED, + const void* disp_cp_chain_me, + Bool mode64 ) { vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE); /* What we're expecting to see is: @@ -3992,13 +4017,32 @@ VexInvalRange vir = {(HWord)place_to_unchain, len}; return vir; } +DECL_LibVEX_UnChainFn(MIPS32) +{ + return unchainXDirect_MIPS(endness_host, + place_to_unchain, + place_to_jump_to_EXPECTED, + disp_cp_chain_me, + False /* mode64 */ + ); +} +DECL_LibVEX_UnChainFn(MIPS64) +{ + return unchainXDirect_MIPS(endness_host, + place_to_unchain, + place_to_jump_to_EXPECTED, + disp_cp_chain_me, + True /* mode64 */ + ); +} + /* Patch the counter address into a profile inc point, as previously created by the Min_ProfInc case for emit_MIPSInstr. */ -VexInvalRange patchProfInc_MIPS ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter, - Bool mode64 ) +static VexInvalRange patchProfInc_MIPS ( VexEndness endness_host, + void* place_to_patch, + const ULong* location_of_counter, + Bool mode64 ) { vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE); if (mode64) { @@ -4032,6 +4076,22 @@ VexInvalRange vir = {(HWord)p, 8}; return vir; } +DECL_LibVEX_PatchProfIncFn(MIPS32) +{ + return patchProfInc_MIPS (endness_host, + place_to_patch, + location_of_counter, + False /* mode64 */ + ); +} +DECL_LibVEX_PatchProfIncFn(MIPS64) +{ + return patchProfInc_MIPS (endness_host, + place_to_patch, + location_of_counter, + True /* mode64 */ + ); +} /*---------------------------------------------------------------*/ Index: VEX/priv/host_mips_defs.h =================================================================== --- VEX/priv/host_mips_defs.h (revision 3108) +++ VEX/priv/host_mips_defs.h (working copy) @@ -717,25 +717,6 @@ worst case we will merely assert at startup. */ extern Int evCheckSzB_MIPS (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to, - Bool mode64 ); - -extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me, - Bool mode64 ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter, - Bool mode64 ); - #endif /* ndef __VEX_HOST_MIPS_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/host_ppc_defs.c =================================================================== --- VEX/priv/host_ppc_defs.c (revision 3108) +++ VEX/priv/host_ppc_defs.c (working copy) @@ -5831,11 +5831,11 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange chainXDirect_PPC ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to, - Bool mode64 ) +static VexInvalRange chainXDirect_PPC ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to, + Bool mode64 ) { if (mode64) { vassert((endness_host == VexEndnessBE) || @@ -5882,14 +5882,39 @@ return vir; } +VexInvalRange chainXDirect_PPC32 ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to ) +{ + return chainXDirect_PPC (endness_host, + place_to_chain, + disp_cp_chain_me_EXPECTED, + place_to_jump_to, + False // mode64 + ); +} +VexInvalRange chainXDirect_PPC64 ( VexEndness endness_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to) +{ + return chainXDirect_PPC (endness_host, + place_to_chain, + disp_cp_chain_me_EXPECTED, + place_to_jump_to, + True // mode64 + ); +} + /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_PPC ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me, - Bool mode64 ) +static VexInvalRange unchainXDirect_PPC ( VexEndness endness_host, + void* place_to_unchain, + const void* place_to_jump_to_EXPECTED, + const void* disp_cp_chain_me, + Bool mode64 ) { if (mode64) { vassert((endness_host == VexEndnessBE) || @@ -5935,14 +5960,32 @@ VexInvalRange vir = {(HWord)place_to_unchain, len}; return vir; } +DECL_LibVEX_UnChainFn(PPC32) +{ + return unchainXDirect_PPC(endness_host, + place_to_unchain, + place_to_jump_to_EXPECTED, + disp_cp_chain_me, + False /* mode64 */ + ); +} +DECL_LibVEX_UnChainFn(PPC64) +{ + return unchainXDirect_PPC(endness_host, + place_to_unchain, + place_to_jump_to_EXPECTED, + disp_cp_chain_me, + True /* mode64 */ + ); +} /* Patch the counter address into a profile inc point, as previously created by the Pin_ProfInc case for emit_PPCInstr. */ -VexInvalRange patchProfInc_PPC ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter, - Bool mode64 ) +static VexInvalRange patchProfInc_PPC ( VexEndness endness_host, + void* place_to_patch, + const ULong* location_of_counter, + Bool mode64 ) { if (mode64) { vassert((endness_host == VexEndnessBE) || @@ -5986,6 +6029,22 @@ VexInvalRange vir = {(HWord)place_to_patch, len}; return vir; } +DECL_LibVEX_PatchProfIncFn(PPC32) +{ + return patchProfInc_PPC (endness_host, + place_to_patch, + location_of_counter, + False /* mode64 */ + ); +} +DECL_LibVEX_PatchProfIncFn(PPC64) +{ + return patchProfInc_PPC (endness_host, + place_to_patch, + location_of_counter, + True /* mode64 */ + ); +} /*---------------------------------------------------------------*/ Index: VEX/priv/host_ppc_defs.h =================================================================== --- VEX/priv/host_ppc_defs.h (revision 3108) +++ VEX/priv/host_ppc_defs.h (working copy) @@ -1142,26 +1142,6 @@ host_EvC_COUNTER. */ extern Int evCheckSzB_PPC (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to, - Bool mode64 ); - -extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me, - Bool mode64 ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter, - Bool mode64 ); - - #endif /* ndef __VEX_HOST_PPC_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/host_arm64_defs.c =================================================================== --- VEX/priv/host_arm64_defs.c (revision 3108) +++ VEX/priv/host_arm64_defs.c (working copy) @@ -5298,10 +5298,7 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ) +DECL_LibVEX_UnChainFn(ARM64) { vassert(endness_host == VexEndnessLE); @@ -5341,9 +5338,7 @@ /* Patch the counter address into a profile inc point, as previously created by the ARM64in_ProfInc case for emit_ARM64Instr. */ -VexInvalRange patchProfInc_ARM64 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ) +DECL_LibVEX_PatchProfIncFn(ARM64) { vassert(sizeof(ULong*) == 8); vassert(endness_host == VexEndnessLE); Index: VEX/priv/host_arm64_defs.h =================================================================== --- VEX/priv/host_arm64_defs.h (revision 3108) +++ VEX/priv/host_arm64_defs.h (working copy) @@ -966,23 +966,6 @@ host_EvC_COUNTER. */ extern Int evCheckSzB_ARM64 (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ); - -extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_ARM64 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ); - - #endif /* ndef __VEX_HOST_ARM64_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/host_amd64_defs.c =================================================================== --- VEX/priv/host_amd64_defs.c (revision 3108) +++ VEX/priv/host_amd64_defs.c (working copy) @@ -3830,10 +3830,7 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ) +DECL_LibVEX_UnChainFn(AMD64) { vassert(endness_host == VexEndnessLE); @@ -3897,9 +3894,7 @@ /* Patch the counter address into a profile inc point, as previously created by the Ain_ProfInc case for emit_AMD64Instr. */ -VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ) +DECL_LibVEX_PatchProfIncFn(AMD64) { vassert(endness_host == VexEndnessLE); vassert(sizeof(ULong*) == 8); Index: VEX/priv/host_amd64_defs.h =================================================================== --- VEX/priv/host_amd64_defs.h (revision 3108) +++ VEX/priv/host_amd64_defs.h (working copy) @@ -807,23 +807,6 @@ worst case we will merely assert at startup. */ extern Int evCheckSzB_AMD64 (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ); - -extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ); - - #endif /* ndef __VEX_HOST_AMD64_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/main_main.c =================================================================== --- VEX/priv/main_main.c (revision 3108) +++ VEX/priv/main_main.c (working copy) @@ -70,6 +70,7 @@ #include "host_generic_simd128.h" +#include "vexbackend_info.h" /* This file contains the top level interface to the library. */ @@ -215,32 +216,10 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) { - /* This the bundle of functions we need to do the back-end stuff - (insn selection, reg-alloc, assembly) whilst being insulated - from the target instruction set. */ - Bool (*isMove) ( const HInstr*, HReg*, HReg* ); - void (*getRegUsage) ( HRegUsage*, const HInstr*, Bool ); - void (*mapRegs) ( HRegRemap*, HInstr*, Bool ); - void (*genSpill) ( HInstr**, HInstr**, HReg, Int, Bool ); - void (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ); - HInstr* (*directReload) ( HInstr*, HReg, Short ); - void (*ppInstr) ( const HInstr*, Bool ); - void (*ppReg) ( HReg ); - HInstrArray* (*iselSB) ( const IRSB*, VexArch, const VexArchInfo*, - const VexAbiInfo*, Int, Int, Bool, Bool, - Addr ); - Int (*emit) ( /*MB_MOD*/Bool*, - UChar*, Int, const HInstr*, Bool, VexEndness, - const void*, const void*, const void*, - const void* ); - IRExpr* (*specHelper) ( const HChar*, IRExpr**, IRStmt**, Int ); - Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates ); + VexBackendInfo vbi; const RRegUniverse* rRegUniv = NULL; - DisOneInstrFn disInstrFn; - - VexGuestLayout* guest_layout; IRSB* irsb; HInstrArray* vcode; HInstrArray* rcode; @@ -253,20 +232,8 @@ Bool mode64, chainingAllowed; Addr max_ga; - guest_layout = NULL; - isMove = NULL; - getRegUsage = NULL; - mapRegs = NULL; - genSpill = NULL; - genReload = NULL; - directReload = NULL; - ppInstr = NULL; - ppReg = NULL; - iselSB = NULL; - emit = NULL; - specHelper = NULL; - preciseMemExnsFn = NULL; - disInstrFn = NULL; + LibVEX_default_backend_info (&vbi); + guest_word_type = Ity_INVALID; host_word_type = Ity_INVALID; offB_CMSTART = 0; @@ -299,25 +266,26 @@ /* First off, check that the guest and host insn sets are supported. */ - switch (vta->arch_host) { +//TBD switch (vta->arch_host) { - case VexArchX86: +//TBD case VexArchX86: mode64 = False; - rRegUniv = getRRegUniverse_X86(); - isMove = (__typeof__(isMove)) isMove_X86Instr; - getRegUsage = (__typeof__(getRegUsage)) getRegUsage_X86Instr; - mapRegs = (__typeof__(mapRegs)) mapRegs_X86Instr; - genSpill = (__typeof__(genSpill)) genSpill_X86; - genReload = (__typeof__(genReload)) genReload_X86; - directReload = (__typeof__(directReload)) directReload_X86; - ppInstr = (__typeof__(ppInstr)) ppX86Instr; - ppReg = (__typeof__(ppReg)) ppHRegX86; - iselSB = iselSB_X86; - emit = (__typeof__(emit)) emit_X86Instr; + vbi.getRRegUniverse = (__typeof__(vbi.getRRegUniverse)) getRRegUniverse_X86; + vbi.isMove = (__typeof__(vbi.isMove)) isMove_X86Instr; + vbi.getRegUsage = (__typeof__(vbi.getRegUsage)) getRegUsage_X86Instr; + vbi.mapRegs = (__typeof__(vbi.mapRegs)) mapRegs_X86Instr; + vbi.genSpill = (__typeof__(vbi.genSpill)) genSpill_X86; + vbi.genReload = (__typeof__(vbi.genReload)) genReload_X86; + vbi.directReload = (__typeof__(vbi.directReload)) directReload_X86; + vbi.ppInstr = (__typeof__(vbi.ppInstr)) ppX86Instr; + vbi.ppReg = (__typeof__(vbi.ppReg)) ppHRegX86; + vbi.iselSB = iselSB_X86; + vbi.emit = (__typeof__(vbi.emit)) emit_X86Instr; host_word_type = Ity_I32; vassert(vta->archinfo_host.endness == VexEndnessLE); - break; +//TBD break; +#if 0 case VexArchAMD64: mode64 = True; rRegUniv = getRRegUniverse_AMD64(); @@ -454,21 +422,24 @@ default: vpanic("LibVEX_Translate: unsupported host insn set"); - } +#endif +//TBD } + rRegUniv = vbi.getRRegUniverse(); + // Are the host's hardware capabilities feasible. The function will // not return if hwcaps are infeasible in some sense. check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps); - switch (vta->arch_guest) { +//TBD switch (vta->arch_guest) { - case VexArchX86: - preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns; - disInstrFn = disInstr_X86; - specHelper = guest_x86_spechelper; +// case VexArchX86: + vbi.preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns; + vbi.disInstrFn = disInstr_X86; + vbi.specHelper = guest_x86_spechelper; guest_sizeB = sizeof(VexGuestX86State); guest_word_type = Ity_I32; - guest_layout = &x86guest_layout; + vbi.guest_layout = &x86guest_layout; offB_CMSTART = offsetof(VexGuestX86State,guest_CMSTART); offB_CMLEN = offsetof(VexGuestX86State,guest_CMLEN); offB_GUEST_IP = offsetof(VexGuestX86State,guest_EIP); @@ -480,8 +451,9 @@ vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4); vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN ) == 4); vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4); - break; +//TBD break; +#if 0 case VexArchAMD64: preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns; disInstrFn = disInstr_AMD64; @@ -648,7 +620,8 @@ default: vpanic("LibVEX_Translate: unsupported guest insn set"); - } +#endif +//TBD } // Are the guest's hardware capabilities feasible. The function will // not return if hwcaps are infeasible in some sense. @@ -688,7 +661,7 @@ &res.n_guest_instrs, &pxControl, vta->callback_opaque, - disInstrFn, + vbi.disInstrFn, vta->guest_bytes, vta->guest_bytes_addr, vta->chase_into_ok, @@ -751,7 +724,7 @@ vexAllocSanityCheck(); /* Clean it up, hopefully a lot. */ - irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, pxControl, + irsb = do_iropt_BB ( irsb, vbi.specHelper, vbi.preciseMemExnsFn, pxControl, vta->guest_bytes_addr, vta->arch_guest ); sanityCheckIRSB( irsb, "after initial iropt", @@ -770,7 +743,7 @@ /* Get the thing instrumented. */ if (vta->instrument1) irsb = vta->instrument1(vta->callback_opaque, - irsb, guest_layout, + irsb, vbi.guest_layout, vta->guest_extents, &vta->archinfo_host, guest_word_type, host_word_type); @@ -778,7 +751,7 @@ if (vta->instrument2) irsb = vta->instrument2(vta->callback_opaque, - irsb, guest_layout, + irsb, vbi.guest_layout, vta->guest_extents, &vta->archinfo_host, guest_word_type, host_word_type); @@ -816,7 +789,7 @@ /* Turn it into virtual-registerised code. Build trees -- this also throws away any dead bindings. */ - max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl ); + max_ga = ado_treebuild_BB( irsb, vbi.preciseMemExnsFn, pxControl ); if (vta->finaltidy) { irsb = vta->finaltidy(irsb); @@ -849,14 +822,14 @@ irsb->offsIP properly. */ vassert(irsb->offsIP >= 16); - vcode = iselSB ( irsb, vta->arch_host, - &vta->archinfo_host, - &vta->abiinfo_both, - offB_HOST_EvC_COUNTER, - offB_HOST_EvC_FAILADDR, - chainingAllowed, - vta->addProfInc, - max_ga ); + vcode = vbi.iselSB ( irsb, vta->arch_host, + &vta->archinfo_host, + &vta->abiinfo_both, + offB_HOST_EvC_COUNTER, + offB_HOST_EvC_FAILADDR, + chainingAllowed, + vta->addProfInc, + max_ga ); vexAllocSanityCheck(); @@ -866,7 +839,7 @@ if (vex_traceflags & VEX_TRACE_VCODE) { for (i = 0; i < vcode->arr_used; i++) { vex_printf("%3d ", i); - ppInstr(vcode->arr[i], mode64); + vbi.ppInstr(vcode->arr[i], mode64); vex_printf("\n"); } vex_printf("\n"); @@ -874,10 +847,10 @@ /* Register allocate. */ rcode = doRegisterAllocation ( vcode, rRegUniv, - isMove, getRegUsage, mapRegs, - genSpill, genReload, directReload, + vbi.isMove, vbi.getRegUsage, vbi.mapRegs, + vbi.genSpill, vbi.genReload, vbi.directReload, guest_sizeB, - ppInstr, ppReg, mode64 ); + vbi.ppInstr, vbi.ppReg, mode64 ); vexAllocSanityCheck(); @@ -887,7 +860,7 @@ "------------------------\n\n"); for (i = 0; i < rcode->arr_used; i++) { vex_printf("%3d ", i); - ppInstr(rcode->arr[i], mode64); + vbi.ppInstr(rcode->arr[i], mode64); vex_printf("\n"); } vex_printf("\n"); @@ -912,16 +885,16 @@ HInstr* hi = rcode->arr[i]; Bool hi_isProfInc = False; if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) { - ppInstr(hi, mode64); + vbi.ppInstr(hi, mode64); vex_printf("\n"); } - j = emit( &hi_isProfInc, - insn_bytes, sizeof insn_bytes, hi, - mode64, vta->archinfo_host.endness, - vta->disp_cp_chain_me_to_slowEP, - vta->disp_cp_chain_me_to_fastEP, - vta->disp_cp_xindir, - vta->disp_cp_xassisted ); + j = vbi.emit( &hi_isProfInc, + insn_bytes, sizeof insn_bytes, hi, + mode64, vta->archinfo_host.endness, + vta->disp_cp_chain_me_to_slowEP, + vta->disp_cp_chain_me_to_fastEP, + vta->disp_cp_xindir, + vta->disp_cp_xassisted ); if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) { for (k = 0; k < j; k++) vex_printf("%02x ", (UInt)insn_bytes[k]); @@ -968,189 +941,28 @@ } -/* --------- Chain/Unchain XDirects. --------- */ +static Int evCheckSzB[10] = + { + 1234567890, // VexArch_INVALID + 8, // VexArchX86 + 8, // VexArchAMD64 + 24, // VexArchARM + 24, // VexArchARM64 + 1234567891, // VexArchS390X // TBD this is dynamic s390_host_has_gie + 28, // VexArchPPC32 + 28, // VexArchPPC64 + 28, // VexArchMIPS32 + 28 // VexArchMIPS64 + }; -VexInvalRange LibVEX_Chain ( VexArch arch_host, - VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ) -{ - switch (arch_host) { - case VexArchX86: - return chainXDirect_X86(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to); - case VexArchAMD64: - return chainXDirect_AMD64(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to); - case VexArchARM: - return chainXDirect_ARM(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to); - case VexArchARM64: - return chainXDirect_ARM64(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to); - case VexArchS390X: - return chainXDirect_S390(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to); - case VexArchPPC32: - return chainXDirect_PPC(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to, False/*!mode64*/); - case VexArchPPC64: - return chainXDirect_PPC(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to, True/*mode64*/); - case VexArchMIPS32: - return chainXDirect_MIPS(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to, False/*!mode64*/); - case VexArchMIPS64: - return chainXDirect_MIPS(endness_host, - place_to_chain, - disp_cp_chain_me_EXPECTED, - place_to_jump_to, True/*!mode64*/); - default: - vassert(0); - } -} - -VexInvalRange LibVEX_UnChain ( VexArch arch_host, - VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ) -{ - switch (arch_host) { - case VexArchX86: - return unchainXDirect_X86(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me); - case VexArchAMD64: - return unchainXDirect_AMD64(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me); - case VexArchARM: - return unchainXDirect_ARM(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me); - case VexArchARM64: - return unchainXDirect_ARM64(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me); - case VexArchS390X: - return unchainXDirect_S390(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me); - case VexArchPPC32: - return unchainXDirect_PPC(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me, False/*!mode64*/); - case VexArchPPC64: - return unchainXDirect_PPC(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me, True/*mode64*/); - case VexArchMIPS32: - return unchainXDirect_MIPS(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me, False/*!mode64*/); - case VexArchMIPS64: - return unchainXDirect_MIPS(endness_host, - place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me, True/*!mode64*/); - default: - vassert(0); - } -} - Int LibVEX_evCheckSzB ( VexArch arch_host ) { - static Int cached = 0; /* DO NOT MAKE NON-STATIC */ - if (UNLIKELY(cached == 0)) { - switch (arch_host) { - case VexArchX86: - cached = evCheckSzB_X86(); break; - case VexArchAMD64: - cached = evCheckSzB_AMD64(); break; - case VexArchARM: - cached = evCheckSzB_ARM(); break; - case VexArchARM64: - cached = evCheckSzB_ARM64(); break; - case VexArchS390X: - cached = evCheckSzB_S390(); break; - case VexArchPPC32: - case VexArchPPC64: - cached = evCheckSzB_PPC(); break; - case VexArchMIPS32: - case VexArchMIPS64: - cached = evCheckSzB_MIPS(); break; - default: - vassert(0); - } - } - return cached; + /* The below array must give the values in the same order + as the VexArch enum. The value below will be cross-checked + by the various emit_Instr in host__defs.c */ + return evCheckSzB[arch_host - VexArch_INVALID]; } -VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host, - VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ) -{ - switch (arch_host) { - case VexArchX86: - return patchProfInc_X86(endness_host, place_to_patch, - location_of_counter); - case VexArchAMD64: - return patchProfInc_AMD64(endness_host, place_to_patch, - location_of_counter); - case VexArchARM: - return patchProfInc_ARM(endness_host, place_to_patch, - location_of_counter); - case VexArchARM64: - return patchProfInc_ARM64(endness_host, place_to_patch, - location_of_counter); - case VexArchS390X: - return patchProfInc_S390(endness_host, place_to_patch, - location_of_counter); - case VexArchPPC32: - return patchProfInc_PPC(endness_host, place_to_patch, - location_of_counter, False/*!mode64*/); - case VexArchPPC64: - return patchProfInc_PPC(endness_host, place_to_patch, - location_of_counter, True/*mode64*/); - case VexArchMIPS32: - return patchProfInc_MIPS(endness_host, place_to_patch, - location_of_counter, False/*!mode64*/); - case VexArchMIPS64: - return patchProfInc_MIPS(endness_host, place_to_patch, - location_of_counter, True/*!mode64*/); - default: - vassert(0); - } -} - - /* --------- Emulation warnings. --------- */ const HChar* LibVEX_EmNote_string ( VexEmNote ew ) @@ -1253,6 +1065,7 @@ vex_bzero(vai, sizeof(*vai)); vai->hwcaps = 0; vai->endness = VexEndness_INVALID; + vai->LibVEX_Chain = NULL; vai->ppc_icache_line_szB = 0; vai->ppc_dcbz_szB = 0; vai->ppc_dcbzl_szB = 0; @@ -1560,8 +1373,8 @@ /* This function will not return iff the hwcaps don't pass the test. */ static void check_hwcaps ( VexArch arch, UInt hwcaps ) { - switch (arch) { - case VexArchX86: { +//TBD switch (arch) { +//TBD case VexArchX86: { if (hwcaps == 0) return; // baseline /* Monotonic: SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */ @@ -1580,8 +1393,8 @@ } } invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n"); - } - +//TBD } +#if 0 case VexArchAMD64: { /* SSE3 and CX16 are orthogonal and > baseline, although we really don't expect to come across anything which can do SSE3 but can't @@ -1733,7 +1546,8 @@ default: vpanic("unknown architecture"); - } +#endif +//TBD } } Index: VEX/priv/host_x86_defs.c =================================================================== --- VEX/priv/host_x86_defs.c (revision 3108) +++ VEX/priv/host_x86_defs.c (working copy) @@ -3344,10 +3344,7 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange chainXDirect_X86 ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ) +DECL_LibVEX_ChainFn(X86) { vassert(endness_host == VexEndnessLE); @@ -3392,10 +3389,7 @@ /* NB: what goes on here has to be very closely coordinated with the emitInstr case for XDirect, above. */ -VexInvalRange unchainXDirect_X86 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ) +DECL_LibVEX_UnChainFn(X86) { vassert(endness_host == VexEndnessLE); @@ -3438,9 +3432,7 @@ /* Patch the counter address into a profile inc point, as previously created by the Xin_ProfInc case for emit_X86Instr. */ -VexInvalRange patchProfInc_X86 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ) +DECL_LibVEX_PatchProfIncFn(X86) { vassert(endness_host == VexEndnessLE); vassert(sizeof(ULong*) == 4); Index: VEX/priv/vexbackend_info.c =================================================================== --- VEX/priv/vexbackend_info.c (revision 0) +++ VEX/priv/vexbackend_info.c (revision 0) @@ -0,0 +1,24 @@ +#include "main_util.h" +#include "host_generic_regs.h" +#include "guest_generic_bb_to_IR.h" +#include "libvex.h" +#include "vexbackend_info.h" + +void LibVEX_default_backend_info(/*OUT*/VexBackendInfo* vbi ) +{ + vbi->isMove = NULL; + vbi->getRegUsage = NULL; + vbi->mapRegs = NULL; + vbi->genSpill = NULL; + vbi->genReload = NULL; + vbi->directReload = NULL; + vbi->ppInstr = NULL; + vbi->ppReg = NULL; + vbi->iselSB = NULL; + vbi->emit = NULL; + vbi->specHelper = NULL; + vbi->preciseMemExnsFn = NULL; + vbi->disInstrFn = NULL; + vbi->guest_layout = NULL; +} + Index: VEX/priv/host_x86_defs.h =================================================================== --- VEX/priv/host_x86_defs.h (revision 3108) +++ VEX/priv/host_x86_defs.h (working copy) @@ -752,23 +752,6 @@ worst case we will merely assert at startup. */ extern Int evCheckSzB_X86 (void); -/* Perform a chaining and unchaining of an XDirect jump. */ -extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ); - -extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ); - -/* Patch the counter location into an existing ProfInc point. */ -extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ); - - #endif /* ndef __VEX_HOST_X86_DEFS_H */ /*---------------------------------------------------------------*/ Index: VEX/priv/vexbackend_info.h =================================================================== --- VEX/priv/vexbackend_info.h (revision 0) +++ VEX/priv/vexbackend_info.h (revision 0) @@ -0,0 +1,32 @@ + +typedef struct { + /* This the bundle of functions we need to do the back-end stuff + (insn selection, reg-alloc, assembly) whilst being insulated + from the target instruction set. */ + const RRegUniverse* (*getRRegUniverse) ( void ); + Bool (*isMove) ( const HInstr*, HReg*, HReg* ); + void (*getRegUsage) ( HRegUsage*, const HInstr*, Bool ); + void (*mapRegs) ( HRegRemap*, HInstr*, Bool ); + void (*genSpill) ( HInstr**, HInstr**, HReg, Int, Bool ); + void (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ); + HInstr* (*directReload) ( HInstr*, HReg, Short ); + void (*ppInstr) ( const HInstr*, Bool ); + void (*ppReg) ( HReg ); + HInstrArray* (*iselSB) ( const IRSB*, VexArch, const VexArchInfo*, + const VexAbiInfo*, Int, Int, Bool, Bool, + Addr ); + Int (*emit) ( /*MB_MOD*/Bool*, + UChar*, Int, const HInstr*, Bool, VexEndness, + const void*, const void*, const void*, + const void* ); + IRExpr* (*specHelper) ( const HChar*, IRExpr**, IRStmt**, Int ); + Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates ); + + DisOneInstrFn disInstrFn; + + VexGuestLayout* guest_layout; + +} VexBackendInfo; + + +void LibVEX_default_backend_info(/*OUT*/VexBackendInfo* vbi ); Index: VEX/pub/libvex.h =================================================================== --- VEX/pub/libvex.h (revision 3108) +++ VEX/pub/libvex.h (working copy) @@ -273,7 +273,115 @@ Bool icaches_maintain_coherence; } VexCacheInfo; +/*-------------------------------------------------------*/ +/*--- Patch existing translations ---*/ +/*-------------------------------------------------------*/ +/* A host address range that was modified by functions LibVEX_ChainFn/LibVEX_UnChainFn + below. Callers must request I-cache syncing after the call as appropriate. */ +typedef + struct { + HWord start; + HWord len; /* always > 0 */ + } + VexInvalRange; + +/* Chain an XDirect jump located at place_to_chain so it jumps to + place_to_jump_to. It is expected (and checked) that this site + currently contains a call to the dispatcher specified by + disp_cp_chain_me_EXPECTED. */ +typedef VexInvalRange (*LibVEX_ChainFn) ( + VexEndness endhess_host, + void* place_to_chain, + const void* disp_cp_chain_me_EXPECTED, + const void* place_to_jump_to ); + +/* Declare an arch specific named version of a LibVEX_ChainFn */ +#define DECL_LibVEX_ChainFn(arch) \ + VexInvalRange chainXDirect_##arch \ + ( VexEndness endness_host, \ + void* place_to_chain, \ + const void* disp_cp_chain_me_EXPECTED, \ + const void* place_to_jump_to ) + +/* Undo an XDirect jump located at place_to_unchain, so it is + converted back into a call to disp_cp_chain_me. It is expected + (and checked) that this site currently contains a jump directly to + the address specified by place_to_jump_to_EXPECTED. */ +typedef VexInvalRange (*LibVEX_UnChainFn) ( + VexEndness endness_host, + void* place_to_unchain, + const void* place_to_jump_to_EXPECTED, + const void* disp_cp_chain_me ); +/* Declare an arch specific named version of a LibVEX_UnChainFn */ +#define DECL_LibVEX_UnChainFn(arch) \ + VexInvalRange unchainXDirect_##arch \ + ( VexEndness endness_host, \ + void* place_to_unchain, \ + const void* place_to_jump_to_EXPECTED, \ + const void* disp_cp_chain_me ) + + +/* Returns a constant -- the size of the event check that is put at + the start of every translation. This makes it possible to + calculate the fast entry point address if the slow entry point + address is known (the usual case), or vice versa. */ +extern +Int LibVEX_evCheckSzB ( VexArch arch_host ); + + +/* Patch the counter location into an existing ProfInc point. The + specified point is checked to make sure it is plausible. */ +typedef VexInvalRange (*LibVEX_PatchProfIncFn) ( + VexEndness endness_host, + void* place_to_patch, + const ULong* location_of_counter ); + +/* Declare an arch specific named version of a LibVEX_ChainFn */ +#define DECL_LibVEX_PatchProfIncFn(arch) \ + VexInvalRange patchProfInc_##arch \ + ( VexEndness endness_host, \ + void* place_to_patch, \ + const ULong* location_of_counter ) + + +DECL_LibVEX_ChainFn(X86); +DECL_LibVEX_UnChainFn(X86); +DECL_LibVEX_PatchProfIncFn(X86); + +DECL_LibVEX_ChainFn(AMD64); +DECL_LibVEX_UnChainFn(AMD64); +DECL_LibVEX_PatchProfIncFn(AMD64); + +DECL_LibVEX_ChainFn(PPC32); +DECL_LibVEX_UnChainFn(PPC32); +DECL_LibVEX_PatchProfIncFn(PPC32); + +DECL_LibVEX_ChainFn(PPC64); +DECL_LibVEX_UnChainFn(PPC64); +DECL_LibVEX_PatchProfIncFn(PPC64); + +DECL_LibVEX_ChainFn(S390); +DECL_LibVEX_UnChainFn(S390); +DECL_LibVEX_PatchProfIncFn(S390); + +DECL_LibVEX_ChainFn(ARM); +DECL_LibVEX_UnChainFn(ARM); +DECL_LibVEX_PatchProfIncFn(ARM); + +DECL_LibVEX_ChainFn(ARM64); +DECL_LibVEX_UnChainFn(ARM64); +DECL_LibVEX_PatchProfIncFn(ARM64); + +DECL_LibVEX_ChainFn(MIPS32); +DECL_LibVEX_UnChainFn(MIPS32); +DECL_LibVEX_PatchProfIncFn(MIPS32); + +DECL_LibVEX_ChainFn(MIPS64); +DECL_LibVEX_UnChainFn(MIPS64); +DECL_LibVEX_PatchProfIncFn(MIPS64); + + /* This struct is a bit of a hack, but is needed to carry misc important bits of info about an arch. Fields which are meaningless or ignored for the platform in question should be set to zero. @@ -282,10 +390,15 @@ typedef struct { - /* The following three fields are mandatory. */ + /* The following four fields are mandatory. */ UInt hwcaps; VexEndness endness; VexCacheInfo hwcache_info; + LibVEX_ChainFn LibVEX_Chain; + LibVEX_UnChainFn LibVEX_UnChain; + LibVEX_PatchProfIncFn LibVEX_PatchProfInc; + + /* PPC32/PPC64 only: size of instruction cache line */ Int ppc_icache_line_szB; /* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions @@ -761,58 +874,6 @@ /*-------------------------------------------------------*/ -/*--- Patch existing translations ---*/ -/*-------------------------------------------------------*/ - -/* A host address range that was modified by the functions below. - Callers must request I-cache syncing after the call as appropriate. */ -typedef - struct { - HWord start; - HWord len; /* always > 0 */ - } - VexInvalRange; - -/* Chain an XDirect jump located at place_to_chain so it jumps to - place_to_jump_to. It is expected (and checked) that this site - currently contains a call to the dispatcher specified by - disp_cp_chain_me_EXPECTED. */ -extern -VexInvalRange LibVEX_Chain ( VexArch arch_host, - VexEndness endhess_host, - void* place_to_chain, - const void* disp_cp_chain_me_EXPECTED, - const void* place_to_jump_to ); - -/* Undo an XDirect jump located at place_to_unchain, so it is - converted back into a call to disp_cp_chain_me. It is expected - (and checked) that this site currently contains a jump directly to - the address specified by place_to_jump_to_EXPECTED. */ -extern -VexInvalRange LibVEX_UnChain ( VexArch arch_host, - VexEndness endness_host, - void* place_to_unchain, - const void* place_to_jump_to_EXPECTED, - const void* disp_cp_chain_me ); - -/* Returns a constant -- the size of the event check that is put at - the start of every translation. This makes it possible to - calculate the fast entry point address if the slow entry point - address is known (the usual case), or vice versa. */ -extern -Int LibVEX_evCheckSzB ( VexArch arch_host ); - - -/* Patch the counter location into an existing ProfInc point. The - specified point is checked to make sure it is plausible. */ -extern -VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host, - VexEndness endness_host, - void* place_to_patch, - const ULong* location_of_counter ); - - -/*-------------------------------------------------------*/ /*--- Show accumulated statistics ---*/ /*-------------------------------------------------------*/