|
From: <sv...@va...> - 2005-04-26 23:34:42
|
Author: sewardj
Date: 2005-04-27 00:34:34 +0100 (Wed, 27 Apr 2005)
New Revision: 1144
Modified:
trunk/priv/ir/irdefs.c
trunk/priv/ir/iropt.c
trunk/pub/libvex_ir.h
Log:
Add a few new primops which allow for more concise expression of
the instrumentation Memcheck generates:
* CmpNEZ{8,16,32,64}, which are equivalent to CmpNE<sz> with one
argument zero
* Neg{8,16,32,64}, which is equivalent to Sub<sz> with the first
argument zero
For 64-bit platforms, add these primops. This gives a complete set of
primops for conversions between the integral types (I8, I16, I32,
I64), so that a widening/narrowing from any type to any other type can
be achieved in a single primop:
* Iop_8Uto64, Iop_8Sto64, Iop_16Uto64, Iop_16Sto64
* Iop_64to8, Iop_64to16
Modified: trunk/priv/ir/irdefs.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/ir/irdefs.c 2005-04-26 01:53:48 UTC (rev 1143)
+++ trunk/priv/ir/irdefs.c 2005-04-26 23:34:34 UTC (rev 1144)
@@ -176,6 +176,16 @@
case Iop_CmpLT64U: vex_printf("CmpLT64U"); return;
case Iop_CmpLE64U: vex_printf("CmpLE64U"); return;
=20
+ case Iop_CmpNEZ8: vex_printf("CmpNEZ8"); return;
+ case Iop_CmpNEZ16: vex_printf("CmpNEZ16"); return;
+ case Iop_CmpNEZ32: vex_printf("CmpNEZ32"); return;
+ case Iop_CmpNEZ64: vex_printf("CmpNEZ64"); return;
+
+ case Iop_Neg8: vex_printf("Neg8"); return;
+ case Iop_Neg16: vex_printf("Neg16"); return;
+ case Iop_Neg32: vex_printf("Neg32"); return;
+ case Iop_Neg64: vex_printf("Neg64"); return;
+
case Iop_DivU32: vex_printf("DivU32"); return;
case Iop_DivS32: vex_printf("DivS32"); return;
=20
@@ -1206,6 +1216,8 @@
*t_dst =3D (_td); *t_arg1 =3D (_ta1); *t_arg2 =3D (_ta2); break
# define COMPARISON(_ta) \
*t_dst =3D Ity_I1; *t_arg1 =3D *t_arg2 =3D (_ta); break;
+# define UNARY_COMPARISON(_ta) \
+ *t_dst =3D Ity_I1; *t_arg1 =3D (_ta); break;
=20
*t_dst =3D Ity_INVALID;
*t_arg1 =3D Ity_INVALID;
@@ -1259,11 +1271,15 @@
case Iop_Shl64: case Iop_Shr64: case Iop_Sar64:
BINARY(Ity_I64, Ity_I64,Ity_I8);
=20
- case Iop_Not8: UNARY(Ity_I8,Ity_I8);
- case Iop_Not16: UNARY(Ity_I16,Ity_I16);
- case Iop_Not32: UNARY(Ity_I32,Ity_I32);
+ case Iop_Not8: case Iop_Neg8:
+ UNARY(Ity_I8,Ity_I8);
+ case Iop_Not16: case Iop_Neg16:
+ UNARY(Ity_I16,Ity_I16);
+ case Iop_Not32: case Iop_Neg32:
+ UNARY(Ity_I32,Ity_I32);
=20
- case Iop_Not64: =20
+ case Iop_Neg64:
+ case Iop_Not64:
case Iop_CmpNEZ32x2: case Iop_CmpNEZ16x4: case Iop_CmpNEZ8x8:
UNARY(Ity_I64,Ity_I64);
=20
@@ -1280,6 +1296,11 @@
case Iop_CmpLT64U: case Iop_CmpLE64U:
COMPARISON(Ity_I64);
=20
+ case Iop_CmpNEZ8: UNARY_COMPARISON(Ity_I8);
+ case Iop_CmpNEZ16: UNARY_COMPARISON(Ity_I16);
+ case Iop_CmpNEZ32: UNARY_COMPARISON(Ity_I32);
+ case Iop_CmpNEZ64: UNARY_COMPARISON(Ity_I64);
+
case Iop_MullU8: case Iop_MullS8:
BINARY(Ity_I16, Ity_I8,Ity_I8);
case Iop_MullU16: case Iop_MullS16:
@@ -1456,6 +1477,7 @@
# undef UNARY
# undef BINARY
# undef COMPARISON
+# undef UNARY_COMPARISON
}
=20
=20
Modified: trunk/priv/ir/iropt.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/ir/iropt.c 2005-04-26 01:53:48 UTC (rev 1143)
+++ trunk/priv/ir/iropt.c 2005-04-26 23:34:34 UTC (rev 1144)
@@ -846,6 +846,20 @@
vpanic("notBool");
}
=20
+/* Make a zero which has the same type as the result of the given
+ primop. */
+static IRExpr* mkZeroForXor ( IROp op )
+{
+ switch (op) {
+ case Iop_Xor8: return IRExpr_Const(IRConst_U8(0));
+ case Iop_Xor16: return IRExpr_Const(IRConst_U16(0));
+ case Iop_Xor32: return IRExpr_Const(IRConst_U32(0));
+ case Iop_Xor64: return IRExpr_Const(IRConst_U64(0));
+ default: vpanic("mkZeroForXor: bad primop");
+ }
+}
+
+
static IRExpr* fold_Expr ( IRExpr* e )
{
Int shift;
@@ -928,6 +942,15 @@
notBool(e->Iex.Unop.arg->Iex.Const.con->Ico.U1)));
break;
=20
+ case Iop_Neg32:
+ e2 =3D IRExpr_Const(IRConst_U32(
+ - (e->Iex.Unop.arg->Iex.Const.con->Ico.U32)));
+ break;
+ case Iop_Neg8:
+ e2 =3D IRExpr_Const(IRConst_U8(
+ - (e->Iex.Unop.arg->Iex.Const.con->Ico.U8)));
+ break;
+
case Iop_64to32: {
ULong w64 =3D e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
w64 &=3D 0x00000000FFFFFFFFULL;
@@ -946,6 +969,24 @@
& e->Iex.Unop.arg->Iex.Const.con->Ico.U32));
break;
=20
+ case Iop_CmpNEZ8:
+ e2 =3D IRExpr_Const(IRConst_U1(toBool(
+ 0 !=3D=20
+ (0xFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U8)
+ )));
+ break;
+ case Iop_CmpNEZ32:
+ e2 =3D IRExpr_Const(IRConst_U1(toBool(
+ 0 !=3D=20
+ (0xFFFFFFFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U3=
2)
+ )));
+ break;
+ case Iop_CmpNEZ64:
+ e2 =3D IRExpr_Const(IRConst_U1(toBool(
+ 0ULL !=3D e->Iex.Unop.arg->Iex.Const.con->Ico.U64
+ )));
+ break;
+
default:=20
goto unhandled;
}
@@ -1235,6 +1276,13 @@
e2 =3D e->Iex.Binop.arg1;
} else
=20
+ /* Or16(x,0) =3D=3D> x */
+ if ((e->Iex.Binop.op =3D=3D Iop_Or16)
+ && e->Iex.Binop.arg2->tag =3D=3D Iex_Const
+ && e->Iex.Binop.arg2->Iex.Const.con->Ico.U16 =3D=3D 0) {
+ e2 =3D e->Iex.Binop.arg1;
+ } else
+
/* Or32/Add32(x,0) =3D=3D> x */
if ((e->Iex.Binop.op =3D=3D Iop_Add32 || e->Iex.Binop.op =3D=3D=
Iop_Or32)
&& e->Iex.Binop.arg2->tag =3D=3D Iex_Const
@@ -1277,6 +1325,16 @@
e2 =3D e->Iex.Binop.arg1;
}
=20
+ /* Xor8/16/32/64(t,t) =3D=3D> 0, for some IRTemp t */
+ if ( (e->Iex.Binop.op =3D=3D Iop_Xor64
+ || e->Iex.Binop.op =3D=3D Iop_Xor32
+ || e->Iex.Binop.op =3D=3D Iop_Xor16
+ || e->Iex.Binop.op =3D=3D Iop_Xor8)
+ && sameIRTemps(e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+ vassert(0); /* awaiting test case */
+ e2 =3D mkZeroForXor(e->Iex.Binop.op);
+ }
+
}
}
=20
Modified: trunk/pub/libvex_ir.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/pub/libvex_ir.h 2005-04-26 01:53:48 UTC (rev 1143)
+++ trunk/pub/libvex_ir.h 2005-04-26 23:34:34 UTC (rev 1144)
@@ -235,6 +235,10 @@
Iop_CmpLT32U, Iop_CmpLT64U,
Iop_CmpLE32U, Iop_CmpLE64U,
=20
+ /* As a sop to Valgrind-Memcheck, the following are useful. */
+ Iop_CmpNEZ8, Iop_CmpNEZ16, Iop_CmpNEZ32, Iop_CmpNEZ64,
+ Iop_Neg8, Iop_Neg16, Iop_Neg32, Iop_Neg64,
+
/* Division */
/* TODO: clarify semantics wrt rounding, negative values, whatever=
*/
Iop_DivU32, // :: I32,I32 -> I32 (simple div, no mod)
@@ -248,11 +252,21 @@
// of which lo half is div and hi half is mod
Iop_DivModS128to64, // ditto, signed
=20
+ /* Integer conversions. Some of these are redundant (eg
+ Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
+ having a complete set reduces the typical dynamic size of IR
+ and makes the instruction selectors easier to write. */
+
/* Widening conversions */
- Iop_8Uto16, Iop_8Uto32, Iop_16Uto32, Iop_32Uto64,
- Iop_8Sto16, Iop_8Sto32, Iop_16Sto32, Iop_32Sto64,
+ Iop_8Uto16, Iop_8Uto32, Iop_8Uto64,
+ Iop_16Uto32, Iop_16Uto64,
+ Iop_32Uto64,
+ Iop_8Sto16, Iop_8Sto32, Iop_8Sto64,
+ Iop_16Sto32, Iop_16Sto64,
+ Iop_32Sto64,
+
/* Narrowing conversions */
- Iop_32to8,
+ Iop_64to8, Iop_32to8, Iop_64to16,
/* 8 <-> 16 bit conversions */
Iop_16to8, // :: I16 -> I8, low half
Iop_16HIto8, // :: I16 -> I8, high half
|