|
From: <sv...@va...> - 2005-06-30 23:33:40
|
Author: sewardj
Date: 2005-07-01 00:33:37 +0100 (Fri, 01 Jul 2005)
New Revision: 4072
Log:
Track Vex API change (r1239, introduction of endianness-indications in
IR loads and stores.)
Modified:
trunk/cachegrind/cg_main.c
trunk/memcheck/mc_include.h
trunk/memcheck/mc_main.c
trunk/memcheck/mc_translate.c
Modified: trunk/cachegrind/cg_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/cachegrind/cg_main.c 2005-06-30 23:32:30 UTC (rev 4071)
+++ trunk/cachegrind/cg_main.c 2005-06-30 23:33:37 UTC (rev 4072)
@@ -433,22 +433,23 @@
=20
case Ist_Tmp: {
IRExpr* data =3D st->Ist.Tmp.data;
- if (data->tag =3D=3D Iex_LDle) {
- IRExpr* aexpr =3D data->Iex.LDle.addr;
+ if (data->tag =3D=3D Iex_Load) {
+ IRExpr* aexpr =3D data->Iex.Load.addr;
tl_assert( isIRAtom(aexpr) );
-
+ // Note also, endianness info is ignored. I guess that's not
+ // interesting.
// XXX: repe cmpsb does two loads... the first one is ignored h=
ere!
//tl_assert( NULL =3D=3D *loadAddrExpr ); // XXX: ???
*loadAddrExpr =3D aexpr;
- *dataSize =3D sizeofIRType(data->Iex.LDle.ty);
+ *dataSize =3D sizeofIRType(data->Iex.Load.ty);
}
addStmtToIRBB( bbOut, st );
break;
}
=20
- case Ist_STle: {
- IRExpr* data =3D st->Ist.STle.data;
- IRExpr* aexpr =3D st->Ist.STle.addr;
+ case Ist_Store: {
+ IRExpr* data =3D st->Ist.Store.data;
+ IRExpr* aexpr =3D st->Ist.Store.addr;
tl_assert( isIRAtom(aexpr) );
tl_assert( NULL =3D=3D *storeAddrExpr ); // XXX: ???
*storeAddrExpr =3D aexpr;
Modified: trunk/memcheck/mc_include.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_include.h 2005-06-30 23:32:30 UTC (rev 4071)
+++ trunk/memcheck/mc_include.h 2005-06-30 23:33:37 UTC (rev 4072)
@@ -60,15 +60,15 @@
extern void MC_(helperc_value_check1_fail) ( void );
extern void MC_(helperc_value_check0_fail) ( void );
=20
-extern VG_REGPARM(1) void MC_(helperc_STOREV8) ( Addr, ULong );
-extern VG_REGPARM(2) void MC_(helperc_STOREV4) ( Addr, UWord );
-extern VG_REGPARM(2) void MC_(helperc_STOREV2) ( Addr, UWord );
-extern VG_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UWord );
+extern VG_REGPARM(1) void MC_(helperc_STOREV8le) ( Addr, ULong );
+extern VG_REGPARM(2) void MC_(helperc_STOREV4le) ( Addr, UWord );
+extern VG_REGPARM(2) void MC_(helperc_STOREV2le) ( Addr, UWord );
+extern VG_REGPARM(2) void MC_(helperc_STOREV1le) ( Addr, UWord );
=20
-extern VG_REGPARM(1) UWord MC_(helperc_LOADV1) ( Addr );
-extern VG_REGPARM(1) UWord MC_(helperc_LOADV2) ( Addr );
-extern VG_REGPARM(1) UWord MC_(helperc_LOADV4) ( Addr );
-extern VG_REGPARM(1) ULong MC_(helperc_LOADV8) ( Addr );
+extern VG_REGPARM(1) UWord MC_(helperc_LOADV1le) ( Addr );
+extern VG_REGPARM(1) UWord MC_(helperc_LOADV2le) ( Addr );
+extern VG_REGPARM(1) UWord MC_(helperc_LOADV4le) ( Addr );
+extern VG_REGPARM(1) ULong MC_(helperc_LOADV8le) ( Addr );
=20
extern void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len );
=20
Modified: trunk/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_main.c 2005-06-30 23:32:30 UTC (rev 4071)
+++ trunk/memcheck/mc_main.c 2005-06-30 23:33:37 UTC (rev 4072)
@@ -1473,9 +1473,9 @@
/* ------------------------ Size =3D 8 ------------------------ */
=20
VG_REGPARM(1)
-ULong MC_(helperc_LOADV8) ( Addr aA )
+ULong MC_(helperc_LOADV8le) ( Addr aA )
{
- PROF_EVENT(200, "helperc_LOADV8");
+ PROF_EVENT(200, "helperc_LOADV8le");
=20
# if VG_DEBUG_MEMORY >=3D 2
return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
@@ -1488,7 +1488,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(201, "helperc_LOADV8-slow1");
+ PROF_EVENT(201, "helperc_LOADV8le-slow1");
return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
}
=20
@@ -1509,7 +1509,7 @@
return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
} else {
/* Slow but general case. */
- PROF_EVENT(202, "helperc_LOADV8-slow2");
+ PROF_EVENT(202, "helperc_LOADV8le-slow2");
return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
}
=20
@@ -1517,9 +1517,9 @@
}
=20
VG_REGPARM(1)
-void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
+void MC_(helperc_STOREV8le) ( Addr aA, ULong vbytes )
{
- PROF_EVENT(210, "helperc_STOREV8");
+ PROF_EVENT(210, "helperc_STOREV8le");
=20
# if VG_DEBUG_MEMORY >=3D 2
mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
@@ -1532,7 +1532,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(211, "helperc_STOREV8-slow1");
+ PROF_EVENT(211, "helperc_STOREV8le-slow1");
mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
return;
}
@@ -1555,7 +1555,7 @@
((ULong*)(sm->vbyte))[ v_off >> 3 ] =3D vbytes;
} else {
/* Slow but general case. */
- PROF_EVENT(212, "helperc_STOREV8-slow2");
+ PROF_EVENT(212, "helperc_STOREV8le-slow2");
mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
}
# endif
@@ -1564,9 +1564,9 @@
/* ------------------------ Size =3D 4 ------------------------ */
=20
VG_REGPARM(1)
-UWord MC_(helperc_LOADV4) ( Addr aA )
+UWord MC_(helperc_LOADV4le) ( Addr aA )
{
- PROF_EVENT(220, "helperc_LOADV4");
+ PROF_EVENT(220, "helperc_LOADV4le");
=20
# if VG_DEBUG_MEMORY >=3D 2
return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
@@ -1579,7 +1579,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(221, "helperc_LOADV4-slow1");
+ PROF_EVENT(221, "helperc_LOADV4le-slow1");
return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
}
=20
@@ -1607,7 +1607,7 @@
return ret;
} else {
/* Slow but general case. */
- PROF_EVENT(222, "helperc_LOADV4-slow2");
+ PROF_EVENT(222, "helperc_LOADV4le-slow2");
return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
}
=20
@@ -1615,9 +1615,9 @@
}
=20
VG_REGPARM(2)
-void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
+void MC_(helperc_STOREV4le) ( Addr aA, UWord vbytes )
{
- PROF_EVENT(230, "helperc_STOREV4");
+ PROF_EVENT(230, "helperc_STOREV4le");
=20
# if VG_DEBUG_MEMORY >=3D 2
mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
@@ -1630,7 +1630,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(231, "helperc_STOREV4-slow1");
+ PROF_EVENT(231, "helperc_STOREV4le-slow1");
mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
return;
}
@@ -1654,7 +1654,7 @@
((UInt*)(sm->vbyte))[ v_off >> 2 ] =3D (UInt)vbytes;
} else {
/* Slow but general case. */
- PROF_EVENT(232, "helperc_STOREV4-slow2");
+ PROF_EVENT(232, "helperc_STOREV4le-slow2");
mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
}
# endif
@@ -1663,9 +1663,9 @@
/* ------------------------ Size =3D 2 ------------------------ */
=20
VG_REGPARM(1)
-UWord MC_(helperc_LOADV2) ( Addr aA )
+UWord MC_(helperc_LOADV2le) ( Addr aA )
{
- PROF_EVENT(240, "helperc_LOADV2");
+ PROF_EVENT(240, "helperc_LOADV2le");
=20
# if VG_DEBUG_MEMORY >=3D 2
return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
@@ -1678,7 +1678,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(241, "helperc_LOADV2-slow1");
+ PROF_EVENT(241, "helperc_LOADV2le-slow1");
return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
}
=20
@@ -1703,7 +1703,7 @@
(UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
} else {
/* Slow but general case. */
- PROF_EVENT(242, "helperc_LOADV2-slow2");
+ PROF_EVENT(242, "helperc_LOADV2le-slow2");
return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
}
=20
@@ -1711,9 +1711,9 @@
}
=20
VG_REGPARM(2)
-void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
+void MC_(helperc_STOREV2le) ( Addr aA, UWord vbytes )
{
- PROF_EVENT(250, "helperc_STOREV2");
+ PROF_EVENT(250, "helperc_STOREV2le");
=20
# if VG_DEBUG_MEMORY >=3D 2
mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
@@ -1726,7 +1726,7 @@
naturally aligned, or 'a' exceeds the range covered by the
primary map. Either way we defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(251, "helperc_STOREV2-slow1");
+ PROF_EVENT(251, "helperc_STOREV2le-slow1");
mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
return;
}
@@ -1747,7 +1747,7 @@
((UShort*)(sm->vbyte))[ v_off >> 1 ] =3D (UShort)vbytes;
} else {
/* Slow but general case. */
- PROF_EVENT(252, "helperc_STOREV2-slow2");
+ PROF_EVENT(252, "helperc_STOREV2le-slow2");
mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
}
# endif
@@ -1756,9 +1756,9 @@
/* ------------------------ Size =3D 1 ------------------------ */
=20
VG_REGPARM(1)
-UWord MC_(helperc_LOADV1) ( Addr aA )
+UWord MC_(helperc_LOADV1le) ( Addr aA )
{
- PROF_EVENT(260, "helperc_LOADV1");
+ PROF_EVENT(260, "helperc_LOADV1le");
=20
# if VG_DEBUG_MEMORY >=3D 2
return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
@@ -1771,7 +1771,7 @@
exceeds the range covered by the primary map. In which case we
defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(261, "helperc_LOADV1-slow1");
+ PROF_EVENT(261, "helperc_LOADV1le-slow1");
return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
}
=20
@@ -1796,7 +1796,7 @@
(UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
} else {
/* Slow but general case. */
- PROF_EVENT(262, "helperc_LOADV1-slow2");
+ PROF_EVENT(262, "helperc_LOADV1le-slow2");
return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
}
# endif
@@ -1804,9 +1804,9 @@
=20
=20
VG_REGPARM(2)
-void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
+void MC_(helperc_STOREV1le) ( Addr aA, UWord vbyte )
{
- PROF_EVENT(270, "helperc_STOREV1");
+ PROF_EVENT(270, "helperc_STOREV1le");
=20
# if VG_DEBUG_MEMORY >=3D 2
mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
@@ -1818,7 +1818,7 @@
exceeds the range covered by the primary map. In which case we
defer to the slow-path case. */
if (EXPECTED_NOT_TAKEN(a & mask)) {
- PROF_EVENT(271, "helperc_STOREV1-slow1");
+ PROF_EVENT(271, "helperc_STOREV1le-slow1");
mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
return;
}
@@ -1839,7 +1839,7 @@
lives in is addressible. */
((UChar*)(sm->vbyte))[ v_off ] =3D (UChar)vbyte;
} else {
- PROF_EVENT(272, "helperc_STOREV1-slow2");
+ PROF_EVENT(272, "helperc_STOREV1le-slow2");
mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
}
=20
Modified: trunk/memcheck/mc_translate.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_translate.c 2005-06-30 23:32:30 UTC (rev 4071)
+++ trunk/memcheck/mc_translate.c 2005-06-30 23:33:37 UTC (rev 4072)
@@ -1936,7 +1936,9 @@
=20
/* Worker function; do not call directly. */
static
-IRAtom* expr2vbits_LDle_WRK ( MCEnv* mce, IRType ty, IRAtom* addr, UInt =
bias )
+IRAtom* expr2vbits_Load_WRK ( MCEnv* mce,=20
+ IREndness end, IRType ty,=20
+ IRAtom* addr, UInt bias )
{
void* helper;
Char* hname;
@@ -1945,6 +1947,7 @@
IRAtom* addrAct;
=20
tl_assert(isOriginalAtom(mce,addr));
+ tl_assert(end =3D=3D Iend_LE || end =3D=3D Iend_BE);
=20
/* First, emit a definedness test for the address. This also sets
the address (shadow) to 'defined' following the test. */
@@ -1953,21 +1956,26 @@
/* Now cook up a call to the relevant helper function, to read the
data V bits from shadow memory. */
ty =3D shadowType(ty);
- switch (ty) {
- case Ity_I64: helper =3D &MC_(helperc_LOADV8);
- hname =3D "MC_(helperc_LOADV8)";
- break;
- case Ity_I32: helper =3D &MC_(helperc_LOADV4);
- hname =3D "MC_(helperc_LOADV4)";
- break;
- case Ity_I16: helper =3D &MC_(helperc_LOADV2);
- hname =3D "MC_(helperc_LOADV2)";
- break;
- case Ity_I8: helper =3D &MC_(helperc_LOADV1);
- hname =3D "MC_(helperc_LOADV1)";
- break;
- default: ppIRType(ty);
- VG_(tool_panic)("memcheck:do_shadow_LDle");
+
+ if (end =3D=3D Iend_LE) { =20
+ switch (ty) {
+ case Ity_I64: helper =3D &MC_(helperc_LOADV8le);
+ hname =3D "MC_(helperc_LOADV8le)";
+ break;
+ case Ity_I32: helper =3D &MC_(helperc_LOADV4le);
+ hname =3D "MC_(helperc_LOADV4le)";
+ break;
+ case Ity_I16: helper =3D &MC_(helperc_LOADV2le);
+ hname =3D "MC_(helperc_LOADV2le)";
+ break;
+ case Ity_I8: helper =3D &MC_(helperc_LOADV1le);
+ hname =3D "MC_(helperc_LOADV1le)";
+ break;
+ default: ppIRType(ty);
+ VG_(tool_panic)("memcheck:do_shadow_Load(LE)");
+ }
+ } else {
+ VG_(tool_panic)("memcheck:do_shadow_Load(BE):bigendian not impleme=
nted");
}
=20
/* Generate the actual address into addrAct. */
@@ -1997,23 +2005,32 @@
=20
=20
static
-IRAtom* expr2vbits_LDle ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias=
)
+IRAtom* expr2vbits_Load ( MCEnv* mce,=20
+ IREndness end, IRType ty,=20
+ IRAtom* addr, UInt bias )
{
IRAtom *v64hi, *v64lo;
+ tl_assert(end =3D=3D Iend_LE || end =3D=3D Iend_BE);
switch (shadowType(ty)) {
case Ity_I8:=20
case Ity_I16:=20
case Ity_I32:=20
case Ity_I64:
- return expr2vbits_LDle_WRK(mce, ty, addr, bias);
+ return expr2vbits_Load_WRK(mce, end, ty, addr, bias);
case Ity_V128:
- v64lo =3D expr2vbits_LDle_WRK(mce, Ity_I64, addr, bias);
- v64hi =3D expr2vbits_LDle_WRK(mce, Ity_I64, addr, bias+8);
+ if (end =3D=3D Iend_LE) {
+ v64lo =3D expr2vbits_Load_WRK(mce, end, Ity_I64, addr, bias)=
;
+ v64hi =3D expr2vbits_Load_WRK(mce, end, Ity_I64, addr, bias+=
8);
+ } else {
+ tl_assert(0 /* awaiting test case */);
+ v64hi =3D expr2vbits_Load_WRK(mce, end, Ity_I64, addr, bias)=
;
+ v64lo =3D expr2vbits_Load_WRK(mce, end, Ity_I64, addr, bias+=
8);
+ }
return assignNew( mce,=20
Ity_V128,=20
binop(Iop_64HLtoV128, v64hi, v64lo));
default:
- VG_(tool_panic)("expr2vbits_LDle");
+ VG_(tool_panic)("expr2vbits_Load");
}
}
=20
@@ -2073,9 +2090,10 @@
case Iex_Unop:
return expr2vbits_Unop( mce, e->Iex.Unop.op, e->Iex.Unop.arg );
=20
- case Iex_LDle:
- return expr2vbits_LDle( mce, e->Iex.LDle.ty,=20
- e->Iex.LDle.addr, 0/*addr bias*/ )=
;
+ case Iex_Load:
+ return expr2vbits_Load( mce, e->Iex.Load.end,
+ e->Iex.Load.ty,=20
+ e->Iex.Load.addr, 0/*addr bias*/ )=
;
=20
case Iex_CCall:
return mkLazyN( mce, e->Iex.CCall.args,=20
@@ -2142,25 +2160,27 @@
obviously not both. */
=20
static=20
-void do_shadow_STle ( MCEnv* mce,=20
- IRAtom* addr, UInt bias,
- IRAtom* data, IRAtom* vdata )
+void do_shadow_Store ( MCEnv* mce,=20
+ IREndness end,
+ IRAtom* addr, UInt bias,
+ IRAtom* data, IRAtom* vdata )
{
IROp mkAdd;
IRType ty, tyAddr;
IRDirty *di, *diLo64, *diHi64;
IRAtom *addrAct, *addrLo64, *addrHi64;
IRAtom *vdataLo64, *vdataHi64;
- IRAtom *eBias, *eBias0, *eBias8;
+ IRAtom *eBias, *eBiasLo64, *eBiasHi64;
void* helper =3D NULL;
Char* hname =3D NULL;
=20
tyAddr =3D mce->hWordTy;
mkAdd =3D tyAddr=3D=3DIty_I32 ? Iop_Add32 : Iop_Add64;
tl_assert( tyAddr =3D=3D Ity_I32 || tyAddr =3D=3D Ity_I64 );
+ tl_assert( end =3D=3D Iend_LE || end =3D=3D Iend_BE );
=20
di =3D diLo64 =3D diHi64 =3D NULL;
- eBias =3D eBias0 =3D eBias8 =3D NULL;
+ eBias =3D eBiasLo64 =3D eBiasHi64 =3D NULL;
addrAct =3D addrLo64 =3D addrHi64 =3D NULL;
vdataLo64 =3D vdataHi64 =3D NULL;
=20
@@ -2184,36 +2204,52 @@
=20
/* Now decide which helper function to call to write the data V
bits into shadow memory. */
- switch (ty) {
- case Ity_V128: /* we'll use the helper twice */
- case Ity_I64: helper =3D &MC_(helperc_STOREV8);
- hname =3D "MC_(helperc_STOREV8)";
- break;
- case Ity_I32: helper =3D &MC_(helperc_STOREV4);
- hname =3D "MC_(helperc_STOREV4)";
- break;
- case Ity_I16: helper =3D &MC_(helperc_STOREV2);
- hname =3D "MC_(helperc_STOREV2)";
- break;
- case Ity_I8: helper =3D &MC_(helperc_STOREV1);
- hname =3D "MC_(helperc_STOREV1)";
- break;
- default: VG_(tool_panic)("memcheck:do_shadow_STle");
+ if (end =3D=3D Iend_LE) {
+ switch (ty) {
+ case Ity_V128: /* we'll use the helper twice */
+ case Ity_I64: helper =3D &MC_(helperc_STOREV8le);
+ hname =3D "MC_(helperc_STOREV8le)";
+ break;
+ case Ity_I32: helper =3D &MC_(helperc_STOREV4le);
+ hname =3D "MC_(helperc_STOREV4le)";
+ break;
+ case Ity_I16: helper =3D &MC_(helperc_STOREV2le);
+ hname =3D "MC_(helperc_STOREV2le)";
+ break;
+ case Ity_I8: helper =3D &MC_(helperc_STOREV1le);
+ hname =3D "MC_(helperc_STOREV1le)";
+ break;
+ default: VG_(tool_panic)("memcheck:do_shadow_Store(LE)");
+ }
+ } else {
+ VG_(tool_panic)("memcheck:do_shadow_Store(BE):bigendian not implem=
ented");
}
=20
if (ty =3D=3D Ity_V128) {
=20
/* V128-bit case */
/* See comment in next clause re 64-bit regparms */
- eBias0 =3D tyAddr=3D=3DIty_I32 ? mkU32(bias) : mkU64(bias);
- addrLo64 =3D assignNew(mce, tyAddr, binop(mkAdd, addr, eBias0) );
+ /* also, need to be careful about endianness */
+
+ Int offLo64, offHi64;
+ if (end =3D=3D Iend_LE) {
+ offLo64 =3D 0;
+ offHi64 =3D 8;
+ } else {
+ tl_assert(0 /* awaiting test case */);
+ offLo64 =3D 8;
+ offHi64 =3D 0;
+ }
+
+ eBiasLo64 =3D tyAddr=3D=3DIty_I32 ? mkU32(bias+offLo64) : mkU64(bi=
as+offLo64);
+ addrLo64 =3D assignNew(mce, tyAddr, binop(mkAdd, addr, eBiasLo64)=
);
vdataLo64 =3D assignNew(mce, Ity_I64, unop(Iop_V128to64, vdata));
diLo64 =3D unsafeIRDirty_0_N(=20
1/*regparms*/, hname, helper,=20
mkIRExprVec_2( addrLo64, vdataLo64 ));
=20
- eBias8 =3D tyAddr=3D=3DIty_I32 ? mkU32(bias+8) : mkU64(bias+8);
- addrHi64 =3D assignNew(mce, tyAddr, binop(mkAdd, addr, eBias8) );
+ eBiasHi64 =3D tyAddr=3D=3DIty_I32 ? mkU32(bias+offHi64) : mkU64(bi=
as+offHi64);
+ addrHi64 =3D assignNew(mce, tyAddr, binop(mkAdd, addr, eBiasHi64)=
);
vdataHi64 =3D assignNew(mce, Ity_I64, unop(Iop_V128HIto64, vdata))=
;
diHi64 =3D unsafeIRDirty_0_N(=20
1/*regparms*/, hname, helper,=20
@@ -2273,11 +2309,21 @@
static
void do_shadow_Dirty ( MCEnv* mce, IRDirty* d )
{
- Int i, n, offset, toDo, gSz, gOff;
- IRAtom *src, *here, *curr;
- IRType tyAddr, tySrc, tyDst;
- IRTemp dst;
+ Int i, n, offset, toDo, gSz, gOff;
+ IRAtom *src, *here, *curr;
+ IRType tyAddr, tySrc, tyDst;
+ IRTemp dst;
+ IREndness end;
=20
+ /* What's the native endianness? We need to know this. */
+# if defined(VKI_BIG_ENDIAN)
+ end =3D Iend_BE;
+# elif defined(VKI_LITTLE_ENDIAN)
+ end =3D Iend_LE;
+# else
+# error "Unknown endianness"
+# endif
+
/* First check the guard. */
complainIfUndefined(mce, d->guard);
=20
@@ -2351,11 +2397,14 @@
if (d->mFx =3D=3D Ifx_Read || d->mFx =3D=3D Ifx_Modify) {
offset =3D 0;
toDo =3D d->mSize;
- /* chew off 32-bit chunks */
+ /* chew off 32-bit chunks. We don't care about the endianness
+ since it's all going to be condensed down to a single bit,
+ but nevertheless choose an endianness which is hopefully
+ native to the platform. */
while (toDo >=3D 4) {
here =3D mkPCastTo(=20
mce, Ity_I32,
- expr2vbits_LDle ( mce, Ity_I32,=20
+ expr2vbits_Load ( mce, end, Ity_I32,=20
d->mAddr, d->mSize - toDo )
);
curr =3D mkUifU32(mce, here, curr);
@@ -2365,7 +2414,7 @@
while (toDo >=3D 2) {
here =3D mkPCastTo(=20
mce, Ity_I32,
- expr2vbits_LDle ( mce, Ity_I16,=20
+ expr2vbits_Load ( mce, end, Ity_I16,=20
d->mAddr, d->mSize - toDo )
);
curr =3D mkUifU32(mce, here, curr);
@@ -2413,22 +2462,23 @@
}
}
=20
- /* Outputs: memory that we write or modify. */
+ /* Outputs: memory that we write or modify. Same comments about
+ endianness as above apply. */
if (d->mFx =3D=3D Ifx_Write || d->mFx =3D=3D Ifx_Modify) {
offset =3D 0;
toDo =3D d->mSize;
/* chew off 32-bit chunks */
while (toDo >=3D 4) {
- do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
- NULL, /* original data */
- mkPCastTo( mce, Ity_I32, curr ) );
+ do_shadow_Store( mce, end, d->mAddr, d->mSize - toDo,
+ NULL, /* original data */
+ mkPCastTo( mce, Ity_I32, curr ) );
toDo -=3D 4;
}
/* chew off 16-bit chunks */
while (toDo >=3D 2) {
- do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
- NULL, /* original data */
- mkPCastTo( mce, Ity_I16, curr ) );
+ do_shadow_Store( mce, end, d->mAddr, d->mSize - toDo,
+ NULL, /* original data */
+ mkPCastTo( mce, Ity_I16, curr ) );
toDo -=3D 2;
}
tl_assert(toDo =3D=3D 0); /* also need to handle 1-byte excess */
@@ -2517,8 +2567,8 @@
return isBogusAtom(e->Iex.Mux0X.cond)
|| isBogusAtom(e->Iex.Mux0X.expr0)
|| isBogusAtom(e->Iex.Mux0X.exprX);
- case Iex_LDle:=20
- return isBogusAtom(e->Iex.LDle.addr);
+ case Iex_Load:=20
+ return isBogusAtom(e->Iex.Load.addr);
case Iex_CCall:
for (i =3D 0; e->Iex.CCall.args[i]; i++)
if (isBogusAtom(e->Iex.CCall.args[i]))
@@ -2542,9 +2592,9 @@
case Ist_PutI:
return isBogusAtom(st->Ist.PutI.ix)=20
|| isBogusAtom(st->Ist.PutI.data);
- case Ist_STle:
- return isBogusAtom(st->Ist.STle.addr)=20
- || isBogusAtom(st->Ist.STle.data);
+ case Ist_Store:
+ return isBogusAtom(st->Ist.Store.addr)=20
+ || isBogusAtom(st->Ist.Store.data);
case Ist_Exit:
return isBogusAtom(st->Ist.Exit.guard);
case Ist_AbiHint:
@@ -2649,10 +2699,11 @@
st->Ist.PutI.data );
break;
=20
- case Ist_STle:
- do_shadow_STle( &mce, st->Ist.STle.addr, 0/* addr bias */,
- st->Ist.STle.data,
- NULL /* shadow data */ );
+ case Ist_Store:
+ do_shadow_Store( &mce, st->Ist.Store.end,
+ st->Ist.Store.addr, 0/* addr bias */,
+ st->Ist.Store.data,
+ NULL /* shadow data */ );
break;
=20
case Ist_Exit:
|