|
From: <sv...@va...> - 2014-07-24 12:42:16
|
Author: sewardj
Date: Thu Jul 24 12:42:03 2014
New Revision: 2910
Log:
Improve infrastructure for dealing with endianness in VEX. This patch
removes all decisions about endianness from VEX. Instead, it requires
that the LibVEX_* calls pass in information about the guest or host
endianness (depending on context) and in turn it passes that info
through to all the places that need it:
* the front ends (xx_toIR.c)
* the back ends (xx_isel.c)
* the patcher functions (Chain, UnChain, PatchProfInc)
Mostly it is boring and ugly plumbing. As far as types go, there is a
new type "VexEndness" that carries the endianness. This also makes it
possible to stop using Bools to indicate endianness. VexArchInfo has
a new field of type VexEndness. Apart from that, no other changes in
types.
Followups: MIPS front and back ends have not yet been fixed up to use
the passed-in endianness information. Currently they assume that the
endianness of both host and guest is the same as the endianness of the
target for which VEX is being compiled.
Modified:
trunk/priv/guest_amd64_defs.h
trunk/priv/guest_amd64_toIR.c
trunk/priv/guest_arm64_defs.h
trunk/priv/guest_arm64_toIR.c
trunk/priv/guest_arm_defs.h
trunk/priv/guest_arm_toIR.c
trunk/priv/guest_generic_bb_to_IR.c
trunk/priv/guest_generic_bb_to_IR.h
trunk/priv/guest_mips_defs.h
trunk/priv/guest_mips_toIR.c
trunk/priv/guest_ppc_defs.h
trunk/priv/guest_ppc_toIR.c
trunk/priv/guest_s390_defs.h
trunk/priv/guest_s390_toIR.c
trunk/priv/guest_x86_defs.h
trunk/priv/guest_x86_toIR.c
trunk/priv/host_amd64_defs.c
trunk/priv/host_amd64_defs.h
trunk/priv/host_amd64_isel.c
trunk/priv/host_arm64_defs.c
trunk/priv/host_arm64_defs.h
trunk/priv/host_arm64_isel.c
trunk/priv/host_arm_defs.c
trunk/priv/host_arm_defs.h
trunk/priv/host_arm_isel.c
trunk/priv/host_mips_defs.c
trunk/priv/host_mips_defs.h
trunk/priv/host_mips_isel.c
trunk/priv/host_ppc_defs.c
trunk/priv/host_ppc_defs.h
trunk/priv/host_ppc_isel.c
trunk/priv/host_s390_defs.c
trunk/priv/host_s390_defs.h
trunk/priv/host_s390_isel.c
trunk/priv/host_x86_defs.c
trunk/priv/host_x86_defs.h
trunk/priv/host_x86_isel.c
trunk/priv/main_main.c
trunk/pub/libvex.h
Modified: trunk/priv/guest_amd64_defs.h
==============================================================================
--- trunk/priv/guest_amd64_defs.h (original)
+++ trunk/priv/guest_amd64_defs.h Thu Jul 24 12:42:03 2014
@@ -60,7 +60,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_amd64_toIR.c
==============================================================================
--- trunk/priv/guest_amd64_toIR.c (original)
+++ trunk/priv/guest_amd64_toIR.c Thu Jul 24 12:42:03 2014
@@ -185,7 +185,7 @@
that we don't have to pass them around endlessly. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
@@ -975,7 +975,7 @@
static IRExpr* getIRegCL ( void )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return IRExpr_Get( OFFB_RCX, Ity_I8 );
}
@@ -984,7 +984,7 @@
static void putIRegAH ( IRExpr* e )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
stmt( IRStmt_Put( OFFB_RAX+1, e ) );
}
@@ -1006,7 +1006,7 @@
static IRExpr* getIRegRAX ( Int sz )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 1: return IRExpr_Get( OFFB_RAX, Ity_I8 );
case 2: return IRExpr_Get( OFFB_RAX, Ity_I16 );
@@ -1019,7 +1019,7 @@
static void putIRegRAX ( Int sz, IRExpr* e )
{
IRType ty = typeOfIRExpr(irsb->tyenv, e);
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 8: vassert(ty == Ity_I64);
stmt( IRStmt_Put( OFFB_RAX, e ));
@@ -1054,7 +1054,7 @@
static IRExpr* getIRegRDX ( Int sz )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 1: return IRExpr_Get( OFFB_RDX, Ity_I8 );
case 2: return IRExpr_Get( OFFB_RDX, Ity_I16 );
@@ -1066,7 +1066,7 @@
static void putIRegRDX ( Int sz, IRExpr* e )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
switch (sz) {
case 8: stmt( IRStmt_Put( OFFB_RDX, e ));
@@ -1108,7 +1108,7 @@
static IRExpr* getIReg32 ( UInt regno )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return unop(Iop_64to32,
IRExpr_Get( integerGuestReg64Offset(regno),
Ity_I64 ));
@@ -1132,7 +1132,7 @@
static IRExpr* getIReg16 ( UInt regno )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return IRExpr_Get( integerGuestReg64Offset(regno),
Ity_I16 );
}
@@ -1253,7 +1253,7 @@
static UInt offsetIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
{
UInt reg;
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(IS_VALID_PFX(pfx));
vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
reg = gregOfRexRM( pfx, mod_reg_rm );
@@ -1332,7 +1332,7 @@
static UInt offsetIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
{
UInt reg;
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(IS_VALID_PFX(pfx));
vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
reg = eregOfRexRM( pfx, mod_reg_rm );
@@ -1401,7 +1401,7 @@
static Int xmmGuestRegOffset ( UInt xmmreg )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return ymmGuestRegOffset( xmmreg );
}
@@ -1411,7 +1411,7 @@
static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
}
@@ -1419,7 +1419,7 @@
static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
}
@@ -1427,7 +1427,7 @@
static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
}
@@ -1435,7 +1435,7 @@
static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return ymmGuestRegOffset( ymmreg ) + 16 * laneno;
}
@@ -1443,7 +1443,7 @@
static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return ymmGuestRegOffset( ymmreg ) + 8 * laneno;
}
@@ -1451,7 +1451,7 @@
static Int ymmGuestRegLane32offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return ymmGuestRegOffset( ymmreg ) + 4 * laneno;
}
@@ -31745,7 +31745,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
Int i, x1, x2;
@@ -31756,7 +31756,7 @@
vassert(guest_arch == VexArchAMD64);
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_RIP_curr_instr = guest_IP;
guest_RIP_bbstart = guest_IP - delta;
Modified: trunk/priv/guest_arm64_defs.h
==============================================================================
--- trunk/priv/guest_arm64_defs.h (original)
+++ trunk/priv/guest_arm64_defs.h Thu Jul 24 12:42:03 2014
@@ -50,7 +50,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_arm64_toIR.c
==============================================================================
--- trunk/priv/guest_arm64_toIR.c (original)
+++ trunk/priv/guest_arm64_toIR.c Thu Jul 24 12:42:03 2014
@@ -119,9 +119,10 @@
not change during translation of the instruction.
*/
-/* CONST: is the host bigendian? We need to know this in order to do
- sub-register accesses to the SIMD/FP registers correctly. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? We need to know this in
+ order to do sub-register accesses to the SIMD/FP registers
+ correctly. */
+static VexEndness host_endness;
/* CONST: The guest address for the instruction currently being
translated. */
@@ -1227,7 +1228,7 @@
has the lowest offset. */
static Int offsetQRegLane ( UInt qregNo, IRType laneTy, UInt laneNo )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
Int base = offsetQReg128(qregNo);
/* Since the host is little-endian, the least significant lane
will be at the lowest address. */
@@ -10355,7 +10356,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
@@ -10365,7 +10366,7 @@
vassert(guest_arch == VexArchARM64);
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_PC_curr_instr = (Addr64)guest_IP;
/* Sanity checks */
Modified: trunk/priv/guest_arm_defs.h
==============================================================================
--- trunk/priv/guest_arm_defs.h (original)
+++ trunk/priv/guest_arm_defs.h Thu Jul 24 12:42:03 2014
@@ -52,7 +52,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_arm_toIR.c
==============================================================================
--- trunk/priv/guest_arm_toIR.c (original)
+++ trunk/priv/guest_arm_toIR.c Thu Jul 24 12:42:03 2014
@@ -123,10 +123,10 @@
not change during translation of the instruction.
*/
-/* CONST: is the host bigendian? This has to do with float vs double
- register accesses on VFP, but it's complex and not properly thought
- out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? This has to do with float vs
+ double register accesses on VFP, but it's complex and not properly
+ thought out. */
+static VexEndness host_endness;
/* CONST: The guest address for the instruction currently being
translated. This is the real, "decoded" address (not subject
@@ -849,11 +849,11 @@
Int off;
vassert(fregNo < 32);
off = doubleGuestRegOffset(fregNo >> 1);
- if (host_is_bigendian) {
- vassert(0);
- } else {
+ if (host_endness == VexEndnessLE) {
if (fregNo & 1)
off += 4;
+ } else {
+ vassert(0);
}
return off;
}
@@ -21976,7 +21976,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
@@ -21985,9 +21985,9 @@
/* Set globals (see top of this file) */
vassert(guest_arch == VexArchARM);
- irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
- __curr_is_Thumb = isThumb;
+ irsb = irsb_IN;
+ host_endness = host_endness_IN;
+ __curr_is_Thumb = isThumb;
if (isThumb) {
guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED - 1;
Modified: trunk/priv/guest_generic_bb_to_IR.c
==============================================================================
--- trunk/priv/guest_generic_bb_to_IR.c (original)
+++ trunk/priv/guest_generic_bb_to_IR.c Thu Jul 24 12:42:03 2014
@@ -186,7 +186,7 @@
/*IN*/ UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
- /*IN*/ Bool host_bigendian,
+ /*IN*/ VexEndness host_endness,
/*IN*/ Bool sigill_diag,
/*IN*/ VexArch arch_guest,
/*IN*/ VexArchInfo* archinfo_guest,
@@ -362,7 +362,7 @@
arch_guest,
archinfo_guest,
abiinfo_both,
- host_bigendian,
+ host_endness,
sigill_diag );
/* stay sane ... */
Modified: trunk/priv/guest_generic_bb_to_IR.h
==============================================================================
--- trunk/priv/guest_generic_bb_to_IR.h (original)
+++ trunk/priv/guest_generic_bb_to_IR.h Thu Jul 24 12:42:03 2014
@@ -152,8 +152,8 @@
/* ABI info for both guest and host */
/*IN*/ VexAbiInfo* abiinfo,
- /* Is the host bigendian? */
- /*IN*/ Bool host_bigendian,
+ /* The endianness of the host */
+ /*IN*/ VexEndness host_endness,
/* Should diagnostics be printed for illegal instructions? */
/*IN*/ Bool sigill_diag
@@ -176,7 +176,7 @@
/*IN*/ UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
- /*IN*/ Bool host_bigendian,
+ /*IN*/ VexEndness host_endness,
/*IN*/ Bool sigill_diag,
/*IN*/ VexArch arch_guest,
/*IN*/ VexArchInfo* archinfo_guest,
Modified: trunk/priv/guest_mips_defs.h
==============================================================================
--- trunk/priv/guest_mips_defs.h (original)
+++ trunk/priv/guest_mips_defs.h Thu Jul 24 12:42:03 2014
@@ -51,7 +51,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_mips_toIR.c
==============================================================================
--- trunk/priv/guest_mips_toIR.c (original)
+++ trunk/priv/guest_mips_toIR.c Thu Jul 24 12:42:03 2014
@@ -49,10 +49,10 @@
that we don't have to pass them around endlessly. CONST means does
not change during translation of the instruction. */
-/* CONST: is the host bigendian? This has to do with float vs double
- register accesses on VFP, but it's complex and not properly thought
- out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? This has to do with float vs
+ double register accesses on VFP, but it's complex and not properly
+ thought out. */
+static VexEndness host_endness;
/* Pointer to the guest code area. */
static UChar *guest_code;
@@ -17202,7 +17202,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
@@ -17217,7 +17217,7 @@
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
#if defined(VGP_mips32_linux)
guest_PC_curr_instr = (Addr32)guest_IP;
#elif defined(VGP_mips64_linux)
Modified: trunk/priv/guest_ppc_defs.h
==============================================================================
--- trunk/priv/guest_ppc_defs.h (original)
+++ trunk/priv/guest_ppc_defs.h Thu Jul 24 12:42:03 2014
@@ -61,7 +61,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_ppc_toIR.c
==============================================================================
--- trunk/priv/guest_ppc_toIR.c (original)
+++ trunk/priv/guest_ppc_toIR.c Thu Jul 24 12:42:03 2014
@@ -154,7 +154,7 @@
given insn. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area. */
static UChar* guest_code;
@@ -1039,7 +1039,7 @@
// jrs: probably not necessary; only matters if we reference sub-parts
// of the ppc registers, but that isn't the case
// later: this might affect Altivec though?
- vassert(host_is_bigendian);
+ vassert(host_endness == VexEndnessBE);
switch (archreg) {
case 0: return offsetofPPCGuestState(guest_GPR0);
@@ -19941,7 +19941,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
IRType ty;
@@ -19973,7 +19973,7 @@
/* Set globals (see top of this file) */
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
guest_CIA_bbstart = mkSzAddr(ty, guest_IP - delta);
Modified: trunk/priv/guest_s390_defs.h
==============================================================================
--- trunk/priv/guest_s390_defs.h (original)
+++ trunk/priv/guest_s390_defs.h Thu Jul 24 12:42:03 2014
@@ -50,7 +50,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_s390_toIR.c
==============================================================================
--- trunk/priv/guest_s390_toIR.c (original)
+++ trunk/priv/guest_s390_toIR.c Thu Jul 24 12:42:03 2014
@@ -16565,13 +16565,13 @@
VexArch guest_arch,
VexArchInfo *archinfo,
VexAbiInfo *abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag_IN)
{
vassert(guest_arch == VexArchS390X);
/* The instruction decoder requires a big-endian machine. */
- vassert(host_bigendian == True);
+ vassert(host_endness == VexEndnessBE);
/* Set globals (see top of this file) */
guest_IA_curr_instr = guest_IP;
Modified: trunk/priv/guest_x86_defs.h
==============================================================================
--- trunk/priv/guest_x86_defs.h (original)
+++ trunk/priv/guest_x86_defs.h Thu Jul 24 12:42:03 2014
@@ -60,7 +60,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
Modified: trunk/priv/guest_x86_toIR.c
==============================================================================
--- trunk/priv/guest_x86_toIR.c (original)
+++ trunk/priv/guest_x86_toIR.c Thu Jul 24 12:42:03 2014
@@ -195,7 +195,7 @@
given insn. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
@@ -452,7 +452,7 @@
vassert(archreg < 8);
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
if (sz == 4 || sz == 2 || (sz == 1 && archreg < 4)) {
switch (archreg) {
@@ -515,7 +515,7 @@
static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
}
@@ -523,7 +523,7 @@
static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
}
@@ -531,7 +531,7 @@
static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
}
@@ -15421,7 +15421,7 @@
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
Int i, x1, x2;
@@ -15432,7 +15432,7 @@
vassert(guest_arch == VexArchX86);
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_EIP_curr_instr = (Addr32)guest_IP;
guest_EIP_bbstart = (Addr32)toUInt(guest_IP - delta);
Modified: trunk/priv/host_amd64_defs.c
==============================================================================
--- trunk/priv/host_amd64_defs.c (original)
+++ trunk/priv/host_amd64_defs.c Thu Jul 24 12:42:03 2014
@@ -2265,7 +2265,7 @@
Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, AMD64Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -3499,7 +3499,7 @@
p = doAMode_M(p, fake(4), i->Ain.EvCheck.amFailAddr);
vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
/* And crosscheck .. */
- vassert(evCheckSzB_AMD64() == 8);
+ vassert(evCheckSzB_AMD64(endness_host) == 8);
goto done;
}
@@ -3542,7 +3542,7 @@
/* How big is an event check? See case for Ain_EvCheck in
emit_AMD64Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_AMD64 ( void )
+Int evCheckSzB_AMD64 ( VexEndness endness_host )
{
return 8;
}
@@ -3550,10 +3550,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movabsq $disp_cp_chain_me_EXPECTED, %r11
call *%r11
@@ -3636,10 +3639,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is either:
(general case)
movabsq $place_to_jump_to_EXPECTED, %r11
@@ -3700,9 +3706,11 @@
/* Patch the counter address into a profile inc point, as previously
created by the Ain_ProfInc case for emit_AMD64Instr. */
-VexInvalRange patchProfInc_AMD64 ( void* place_to_patch,
+VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter )
{
+ vassert(endness_host == VexEndnessLE);
vassert(sizeof(ULong*) == 8);
UChar* p = (UChar*)place_to_patch;
vassert(p[0] == 0x49);
Modified: trunk/priv/host_amd64_defs.h
==============================================================================
--- trunk/priv/host_amd64_defs.h (original)
+++ trunk/priv/host_amd64_defs.h Thu Jul 24 12:42:03 2014
@@ -754,8 +754,10 @@
extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* );
extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf, AMD64Instr* i,
+ UChar* buf, Int nbuf,
+ AMD64Instr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -782,19 +784,22 @@
and so assumes that they are both <= 128, and so can use the short
offset encoding. This is all checked with assertions, so in the
worst case we will merely assert at startup. */
-extern Int evCheckSzB_AMD64 ( void );
+extern Int evCheckSzB_AMD64 ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_AMD64 ( void* place_to_patch,
+extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter );
Modified: trunk/priv/host_amd64_isel.c
==============================================================================
--- trunk/priv/host_amd64_isel.c (original)
+++ trunk/priv/host_amd64_isel.c Thu Jul 24 12:42:03 2014
@@ -4877,6 +4877,9 @@
| VEX_HWCAPS_AMD64_BMI
| VEX_HWCAPS_AMD64_AVX2)));
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
Modified: trunk/priv/host_arm64_defs.c
==============================================================================
--- trunk/priv/host_arm64_defs.c (original)
+++ trunk/priv/host_arm64_defs.c Thu Jul 24 12:42:03 2014
@@ -4104,7 +4104,7 @@
Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARM64Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -6866,7 +6866,7 @@
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_ARM64() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_ARM64(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
@@ -6917,7 +6917,7 @@
/* How big is an event check? See case for ARM64in_EvCheck in
emit_ARM64Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_ARM64 ( void )
+Int evCheckSzB_ARM64 ( VexEndness endness_host )
{
return 24;
}
@@ -6925,10 +6925,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw x9, disp_cp_chain_me_to_EXPECTED[15:0]
movk x9, disp_cp_chain_me_to_EXPECTED[31:15], lsl 16
@@ -6968,10 +6971,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw x9, place_to_jump_to_EXPECTED[15:0]
movk x9, place_to_jump_to_EXPECTED[31:15], lsl 16
@@ -7009,7 +7015,8 @@
//ZZ /* Patch the counter address into a profile inc point, as previously
//ZZ created by the ARMin_ProfInc case for emit_ARMInstr. */
-//ZZ VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+//ZZ VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ void* place_to_patch,
//ZZ ULong* location_of_counter )
//ZZ {
//ZZ vassert(sizeof(ULong*) == 4);
Modified: trunk/priv/host_arm64_defs.h
==============================================================================
--- trunk/priv/host_arm64_defs.h (original)
+++ trunk/priv/host_arm64_defs.h Thu Jul 24 12:42:03 2014
@@ -843,6 +843,7 @@
extern Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARM64Instr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -867,19 +868,22 @@
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM64 ( void );
+extern Int evCheckSzB_ARM64 ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
//ZZ /* Patch the counter location into an existing ProfInc point. */
-//ZZ extern VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+//ZZ extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ void* place_to_patch,
//ZZ ULong* location_of_counter );
Modified: trunk/priv/host_arm64_isel.c
==============================================================================
--- trunk/priv/host_arm64_isel.c (original)
+++ trunk/priv/host_arm64_isel.c Thu Jul 24 12:42:03 2014
@@ -7106,6 +7106,9 @@
/* sanity ... */
vassert(arch_host == VexArchARM64);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* guard against unexpected space regressions */
vassert(sizeof(ARM64Instr) <= 32);
Modified: trunk/priv/host_arm_defs.c
==============================================================================
--- trunk/priv/host_arm_defs.c (original)
+++ trunk/priv/host_arm_defs.c Thu Jul 24 12:42:03 2014
@@ -2971,7 +2971,7 @@
Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARMInstr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -4644,7 +4644,7 @@
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_ARM() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_ARM(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
@@ -4695,7 +4695,7 @@
/* How big is an event check? See case for ARMin_EvCheck in
emit_ARMInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_ARM ( void )
+Int evCheckSzB_ARM ( VexEndness endness_host )
{
return 24;
}
@@ -4703,10 +4703,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw r12, lo16(disp_cp_chain_me_to_EXPECTED)
movt r12, hi16(disp_cp_chain_me_to_EXPECTED)
@@ -4783,10 +4786,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
(general case)
movw r12, lo16(place_to_jump_to_EXPECTED)
@@ -4844,9 +4850,11 @@
/* Patch the counter address into a profile inc point, as previously
created by the ARMin_ProfInc case for emit_ARMInstr. */
-VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter )
{
+ vassert(endness_host == VexEndnessLE);
vassert(sizeof(ULong*) == 4);
UInt* p = (UInt*)place_to_patch;
vassert(0 == (3 & (HWord)p));
Modified: trunk/priv/host_arm_defs.h
==============================================================================
--- trunk/priv/host_arm_defs.h (original)
+++ trunk/priv/host_arm_defs.h Thu Jul 24 12:42:03 2014
@@ -1027,6 +1027,7 @@
extern Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARMInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -1051,19 +1052,22 @@
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM ( void );
+extern Int evCheckSzB_ARM ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter );
Modified: trunk/priv/host_arm_isel.c
==============================================================================
--- trunk/priv/host_arm_isel.c (original)
+++ trunk/priv/host_arm_isel.c Thu Jul 24 12:42:03 2014
@@ -6331,6 +6331,9 @@
/* sanity ... */
vassert(arch_host == VexArchARM);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* guard against unexpected space regressions */
vassert(sizeof(ARMInstr) <= 28);
Modified: trunk/priv/host_mips_defs.c
==============================================================================
--- trunk/priv/host_mips_defs.c (original)
+++ trunk/priv/host_mips_defs.c Thu Jul 24 12:42:03 2014
@@ -2920,6 +2920,7 @@
Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, MIPSInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -4229,7 +4230,7 @@
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_MIPS(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
@@ -4315,7 +4316,7 @@
/* How big is an event check? See case for Min_EvCheck in
emit_MIPSInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_MIPS ( void )
+Int evCheckSzB_MIPS ( VexEndness endness_host )
{
UInt kInstrSize = 4;
return 7*kInstrSize;
@@ -4323,11 +4324,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 )
{
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
/* What we're expecting to see is:
move r9, disp_cp_chain_me_to_EXPECTED
jalr r9
@@ -4369,11 +4372,13 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 )
{
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
/* What we're expecting to see is:
move r9, place_to_jump_to_EXPECTED
jalr r9
@@ -4413,13 +4418,16 @@
/* Patch the counter address into a profile inc point, as previously
created by the Min_ProfInc case for emit_MIPSInstr. */
-VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
+VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter, Bool mode64 )
{
- if (mode64)
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+ if (mode64) {
vassert(sizeof(ULong*) == 8);
- else
+ } else {
vassert(sizeof(ULong*) == 4);
+ }
UChar* p = (UChar*)place_to_patch;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
Modified: trunk/priv/host_mips_defs.h
==============================================================================
--- trunk/priv/host_mips_defs.h (original)
+++ trunk/priv/host_mips_defs.h Thu Jul 24 12:42:03 2014
@@ -715,6 +715,7 @@
extern Int emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, MIPSInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -741,25 +742,28 @@
and so assumes that they are both <= 128, and so can use the short
offset encoding. This is all checked with assertions, so in the
worst case we will merely assert at startup. */
-extern Int evCheckSzB_MIPS ( void );
+extern Int evCheckSzB_MIPS ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 );
-extern VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
+extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 );
-#endif /* ndef __LIBVEX_HOST_MIPS_HDEFS_H */
+#endif /* ndef __VEX_HOST_MIPS_DEFS_H */
/*---------------------------------------------------------------*/
/*--- end host-mips_defs.h ---*/
Modified: trunk/priv/host_mips_isel.c
==============================================================================
--- trunk/priv/host_mips_isel.c (original)
+++ trunk/priv/host_mips_isel.c Thu Jul 24 12:42:03 2014
@@ -4173,6 +4173,10 @@
|| VEX_PRID_COMP_BROADCOM == hwcaps_host
|| VEX_PRID_COMP_NETLOGIC);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE
+ || archinfo_host->endness == VexEndnessBE);
+
mode64 = arch_host != VexArchMIPS32;
#if (__mips_fpr==64)
fp_mode64 = ((VEX_MIPS_REV(hwcaps_host) == VEX_PRID_CPU_32FPR)
Modified: trunk/priv/host_ppc_defs.c
==============================================================================
--- trunk/priv/host_ppc_defs.c (original)
+++ trunk/priv/host_ppc_defs.c Thu Jul 24 12:42:03 2014
@@ -3720,7 +3720,7 @@
*/
Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, PPCInstr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -5707,7 +5707,7 @@
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_PPC() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_PPC(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
@@ -5772,7 +5772,7 @@
/* How big is an event check? See case for Pin_EvCheck in
emit_PPCInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_PPC ( void )
+Int evCheckSzB_PPC ( VexEndness endness_host )
{
return 28;
}
@@ -5780,11 +5780,18 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
/* What we're expecting to see is:
imm32/64-fixed r30, disp_cp_chain_me_to_EXPECTED
mtctr r30
@@ -5825,11 +5832,18 @@
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
/* What we're expecting to see is:
imm32/64-fixed r30, place_to_jump_to_EXPECTED
mtctr r30
@@ -5870,10 +5884,17 @@
/* Patch the counter address into a profile inc point, as previously
created by the Pin_ProfInc case for emit_PPCInstr. */
-VexInvalRange patchProfInc_PPC ( void* place_to_patch,
+VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
UChar* p = (UChar*)place_to_patch;
vassert(0 == (3 & (HWord)p));
Modified: trunk/priv/host_ppc_defs.h
==============================================================================
--- trunk/priv/host_ppc_defs.h (original)
+++ trunk/priv/host_ppc_defs.h Thu Jul 24 12:42:03 2014
@@ -1138,6 +1138,7 @@
extern Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, PPCInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -1162,21 +1163,24 @@
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_PPC ( void );
+extern Int evCheckSzB_PPC ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 );
-extern VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_PPC ( void* place_to_patch,
+extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 );
Modified: trunk/priv/host_ppc_isel.c
==============================================================================
--- trunk/priv/host_ppc_isel.c (original)
+++ trunk/priv/host_ppc_isel.c Thu Jul 24 12:42:03 2014
@@ -5920,6 +5920,9 @@
vassert((hwcaps_host & mask64) == 0);
}
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessBE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
Modified: trunk/priv/host_s390_defs.c
==============================================================================
--- trunk/priv/host_s390_defs.c (original)
+++ trunk/priv/host_s390_defs.c Thu Jul 24 12:42:03 2014
@@ -9831,7 +9831,8 @@
The dispatch counter is a 32-bit value. */
static UChar *
-s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn)
+s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn,
+ VexEndness endness_host)
{
s390_amode *amode;
UInt b, d;
@@ -9867,7 +9868,7 @@
/* Make sure the size of the generated code is identical to the size
returned by evCheckSzB_S390 */
- vassert(evCheckSzB_S390() == code_end - code_begin);
+ vassert(evCheckSzB_S390(endness_host) == code_end - code_begin);
return buf;
}
@@ -9896,7 +9897,8 @@
Int
emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
- Bool mode64, void *disp_cp_chain_me_to_slowEP,
+ Bool mode64, VexEndness endness_host,
+ void *disp_cp_chain_me_to_slowEP,
void *disp_cp_chain_me_to_fastEP, void *disp_cp_xindir,
void *disp_cp_xassisted)
{
@@ -10057,7 +10059,7 @@
break;
case S390_INSN_EVCHECK:
- end = s390_insn_evcheck_emit(buf, insn);
+ end = s390_insn_evcheck_emit(buf, insn, endness_host);
break;
case S390_INSN_XDIRECT:
@@ -10087,7 +10089,7 @@
/* Return the number of bytes emitted for an S390_INSN_EVCHECK.
See s390_insn_evcheck_emit */
Int
-evCheckSzB_S390(void)
+evCheckSzB_S390(VexEndness endness_host)
{
return s390_host_has_gie ? 18 : 24;
}
@@ -10096,7 +10098,8 @@
/* Patch the counter address into CODE_TO_PATCH as previously
generated by s390_insn_profinc_emit. */
VexInvalRange
-patchProfInc_S390(void *code_to_patch, ULong *location_of_counter)
+patchProfInc_S390(VexEndness endness_host,
+ void *code_to_patch, ULong *location_of_counter)
{
vassert(sizeof(ULong *) == 8);
@@ -10114,10 +10117,13 @@
/* NB: what goes on here has to be very closely coordinated with the
s390_insn_xdirect_emit code above. */
VexInvalRange
-chainXDirect_S390(void *place_to_chain,
+chainXDirect_S390(VexEndness endness_host,
+ void *place_to_chain,
void *disp_cp_chain_me_EXPECTED,
void *place_to_jump_to)
{
+ vassert(endness_host == VexEndnessBE);
+
/* What we're expecting to see @ PLACE_TO_CHAIN is:
load tchain_scratch, #disp_cp_chain_me_EXPECTED
@@ -10199,10 +10205,13 @@
/* NB: what goes on here has to be very closely coordinated with the
s390_insn_xdirect_emit code above. */
VexInvalRange
-unchainXDirect_S390(void *place_to_unchain,
+unchainXDirect_S390(VexEndness endness_host,
+ void *place_to_unchain,
void *place_to_jump_to_EXPECTED,
void *disp_cp_chain_me)
{
+ vassert(endness_host == VexEndnessBE);
+
/* What we're expecting to see @ PLACE_TO_UNCHAIN:
load tchain_scratch, #place_to_jump_to_EXPECTED
Modified: trunk/priv/host_s390_defs.h
==============================================================================
--- trunk/priv/host_s390_defs.h (original)
+++ trunk/priv/host_s390_defs.h Thu Jul 24 12:42:03 2014
@@ -736,7 +736,7 @@
void mapRegs_S390Instr ( HRegRemap *, s390_insn *, Bool );
Bool isMove_S390Instr ( s390_insn *, HReg *, HReg * );
Int emit_S390Instr ( Bool *, UChar *, Int, s390_insn *, Bool,
- void *, void *, void *, void *);
+ VexEndness, void *, void *, void *, void *);
void getAllocableRegs_S390( Int *, HReg **, Bool );
void genSpill_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
void genReload_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
@@ -745,19 +745,22 @@
Int, Int, Bool, Bool, Addr64);
/* Return the number of bytes of code needed for an event check */
-Int evCheckSzB_S390(void);
+Int evCheckSzB_S390(VexEndness endness_host);
/* Perform a chaining and unchaining of an XDirect jump. */
-VexInvalRange chainXDirect_S390(void *place_to_chain,
+VexInvalRange chainXDirect_S390(VexEndness endness_host,
+ void *place_to_chain,
void *disp_cp_chain_me_EXPECTED,
void *place_to_jump_to);
-VexInvalRange unchainXDirect_S390(void *place_to_unchain,
+VexInvalRange unchainXDirect_S390(VexEndness endness_host,
+ void *place_to_unchain,
void *place_to_jump_to_EXPECTED,
void *disp_cp_chain_me);
/* Patch the counter location into an existing ProfInc point. */
-VexInvalRange patchProfInc_S390(void *code_to_patch,
+VexInvalRange patchProfInc_S390(VexEndness endness_host,
+ void *code_to_patch,
ULong *location_of_counter);
/* KLUDGE: See detailled comment in host_s390_defs.c. */
Modified: trunk/priv/host_s390_isel.c
==============================================================================
--- trunk/priv/host_s390_isel.c (original)
+++ trunk/priv/host_s390_isel.c Thu Jul 24 12:42:03 2014
@@ -4094,6 +4094,9 @@
/* Do some sanity checks */
vassert((VEX_HWCAPS_S390X(hwcaps_host) & ~(VEX_HWCAPS_S390X_ALL)) == 0);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessBE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
Modified: trunk/priv/host_x86_defs.c
==============================================================================
--- trunk/priv/host_x86_defs.c (original)
+++ trunk/priv/host_x86_defs.c Thu Jul 24 12:42:03 2014
@@ -2102,7 +2102,7 @@
Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, X86Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
@@ -3291,7 +3291,7 @@
p = doAMode_M(p, fake(4), i->Xin.EvCheck.amFailAddr);
vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
/* And crosscheck .. */
- vassert(evCheckSzB_X86() == 8);
+ vassert(evCheckSzB_X86(endness_host) == 8);
goto done;
}
@@ -3336,7 +3336,7 @@
/* How big is an event check? See case for Xin_EvCheck in
emit_X86Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckS...
[truncated message content] |