|
From: <sv...@va...> - 2005-05-03 12:20:19
|
Author: sewardj
Date: 2005-05-03 13:20:15 +0100 (Tue, 03 May 2005)
New Revision: 1159
Modified:
trunk/priv/guest-x86/toIR.c
trunk/pub/libvex_ir.h
Log:
x86 guest: generate Iop_Neg* in the x86->IR phase. Intent is to
ensure that the non-shadow (real) computation done by the program will
fail if Iop_Neg* is incorrectly handled somehow. Until this point,
Iop_Neg* is only generated by memcheck and so it will not be obvious
if it is mishandled. IOW, this commit enhances verifiability of the
x86-IR-x86 pipeline.
Modified: trunk/priv/guest-x86/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/guest-x86/toIR.c 2005-05-03 09:09:27 UTC (rev 1158)
+++ trunk/priv/guest-x86/toIR.c 2005-05-03 12:20:15 UTC (rev 1159)
@@ -848,7 +848,7 @@
|| op8 =3D=3D Iop_Or8 || op8 =3D=3D Iop_And8 || op8 =3D=3D Io=
p_Xor8
|| op8 =3D=3D Iop_Shl8 || op8 =3D=3D Iop_Shr8 || op8 =3D=3D I=
op_Sar8
|| op8 =3D=3D Iop_CmpEQ8 || op8 =3D=3D Iop_CmpNE8
- || op8 =3D=3D Iop_Not8 );
+ || op8 =3D=3D Iop_Not8 || op8 =3D=3D Iop_Neg8);
adj =3D ty=3D=3DIty_I8 ? 0 : (ty=3D=3DIty_I16 ? 1 : 2);
return adj + op8;
}
@@ -2761,7 +2761,7 @@
dst1 =3D newTemp(ty);
assign(dst0, mkU(ty,0));
assign(src, getIReg(sz,eregOfRM(modrm)));
- assign(dst1, binop(mkSizedOp(ty,Iop_Sub8), mkexpr(dst0), mke=
xpr(src)));
+ assign(dst1, unop(mkSizedOp(ty,Iop_Neg8), mkexpr(src)));
setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
putIReg(sz, eregOfRM(modrm), mkexpr(dst1));
DIP("neg%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm=
)));
@@ -2810,7 +2810,6 @@
DIP("test%c $0x%x, %s\n", nameISize(sz), d32, dis_buf);
break;
}
- /* probably OK, but awaiting test case */
case 2: /* NOT */
storeLE( mkexpr(addr), unop(mkSizedOp(ty,Iop_Not8), mkexpr(t=
1)));
DIP("not%c %s\n", nameISize(sz), dis_buf);
@@ -2821,7 +2820,7 @@
dst1 =3D newTemp(ty);
assign(dst0, mkU(ty,0));
assign(src, mkexpr(t1));
- assign(dst1, binop(mkSizedOp(ty,Iop_Sub8), mkexpr(dst0), mke=
xpr(src)));
+ assign(dst1, unop(mkSizedOp(ty,Iop_Neg8), mkexpr(src)));
setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
storeLE( mkexpr(addr), mkexpr(dst1) );
DIP("neg%c %s\n", nameISize(sz), dis_buf);
Modified: trunk/pub/libvex_ir.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/pub/libvex_ir.h 2005-05-03 09:09:27 UTC (rev 1158)
+++ trunk/pub/libvex_ir.h 2005-05-03 12:20:15 UTC (rev 1159)
@@ -200,8 +200,9 @@
=20
typedef
enum {=20
- /* Do not change this ordering. The IR generators
- rely on (eg) Iop_Add64 =3D=3D IopAdd8 + 3. */
+ /* -- Do not change this ordering. The IR generators rely on
+ (eg) Iop_Add64 =3D=3D IopAdd8 + 3. -- */
+
Iop_INVALID=3D0x13000,
Iop_Add8, Iop_Add16, Iop_Add32, Iop_Add64,
Iop_Sub8, Iop_Sub16, Iop_Sub32, Iop_Sub64,
@@ -218,6 +219,10 @@
Iop_CmpNE8, Iop_CmpNE16, Iop_CmpNE32, Iop_CmpNE64,
/* Tags for unary ops */
Iop_Not8, Iop_Not16, Iop_Not32, Iop_Not64,
+ Iop_Neg8, Iop_Neg16, Iop_Neg32, Iop_Neg64,
+
+ /* -- Ordering not important after here. -- */
+
/* Widening multiplies */
Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64,
Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64,
@@ -229,7 +234,6 @@
zero. You must ensure they are never given a zero argument.
*/
=20
- /* Ordering not important after here. */
Iop_CmpLT32S, Iop_CmpLT64S,
Iop_CmpLE32S, Iop_CmpLE64S,
Iop_CmpLT32U, Iop_CmpLT64U,
@@ -237,7 +241,6 @@
=20
/* As a sop to Valgrind-Memcheck, the following are useful. */
Iop_CmpNEZ8, Iop_CmpNEZ16, Iop_CmpNEZ32, Iop_CmpNEZ64,
- Iop_Neg8, Iop_Neg16, Iop_Neg32, Iop_Neg64,
=20
/* Division */
/* TODO: clarify semantics wrt rounding, negative values, whatever=
*/
|