|
From: <sv...@va...> - 2006-12-29 01:54:39
|
Author: sewardj
Date: 2006-12-29 01:54:36 +0000 (Fri, 29 Dec 2006)
New Revision: 1714
Log:
Tidy up flags spec fn, and add a rule for INCW-CondZ.
Modified:
trunk/priv/guest-amd64/ghelpers.c
Modified: trunk/priv/guest-amd64/ghelpers.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/guest-amd64/ghelpers.c 2006-12-28 04:40:12 UTC (rev 1713)
+++ trunk/priv/guest-amd64/ghelpers.c 2006-12-29 01:54:36 UTC (rev 1714)
@@ -1044,25 +1044,6 @@
mkU64(1));
}
=20
-// if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ))=
{
-// /* byte sub/cmp, then NZ --> test dst!=3Dsrc */
-// return unop(Iop_32Uto64,
-// unop(Iop_1Uto32,
-// binop(Iop_CmpNE8,=20
-// unop(Iop_32to8,unop(Iop_64to32,cc_dep1)),
-// unop(Iop_32to8,unop(Iop_64to32,cc_dep2)))))=
;
-// }
-
-//.. if (isU32(cc_op, AMD64G_CC_OP_SUBB) && isU32(cond, X86CondNBE=
)) {
-//.. /* long sub/cmp, then NBE (unsigned greater than)
-//.. --> test src <u dst */
-//.. /* Note, args are opposite way round from the usual */
-//.. return unop(Iop_1Uto32,
-//.. binop(Iop_CmpLT32U,=20
-//.. binop(Iop_And32,cc_dep2,mkU32(0xFF)),
-//.. binop(Iop_And32,cc_dep1,mkU32(0xFF))));
-//.. }
-
/*---------------- LOGICQ ----------------*/
=20
if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondZ)) =
{
@@ -1102,11 +1083,6 @@
mkU64(0)));
}
=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_LOGICL) && isU32(cond, X86CondS=
)) {
-//.. /* long and/or/xor, then S --> test dst <s 0 */
-//.. return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(=
0)));
-//.. }
-
if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondLE))=
{
/* long and/or/xor, then LE
This is pretty subtle. LOGIC sets SF and ZF according to th=
e
@@ -1120,24 +1096,6 @@
mkU64(0)));
}
=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_LOGICL) && isU32(cond, X86CondB=
E)) {
-//.. /* long and/or/xor, then BE
-//.. LOGIC sets ZF according to the result and makes CF be z=
ero.
-//.. BE computes (CF | ZF), but CF is zero, so this reduces =
ZF=20
-//.. -- which will be 1 iff the result is zero. Hence ...
-//.. */
-//.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0=
)));
-//.. }
-//..=20
-//.. /*---------------- LOGICW ----------------*/
-//..=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_LOGICW) && isU32(cond, X86CondZ=
)) {
-//.. /* byte and/or/xor, then Z --> test dst=3D=3D0 */
-//.. return unop(Iop_1Uto32,
-//.. binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU=
32(0xFFFF)),=20
-//.. mkU32(0)));
-//.. }
-
/*---------------- LOGICB ----------------*/
=20
if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondZ)) =
{
@@ -1170,6 +1128,16 @@
mkU64(0)));
}
=20
+ /*---------------- INCW ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_INCW) && isU64(cond, AMD64CondZ)) {
+ /* 16-bit inc, then Z --> test dst =3D=3D 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64,=20
+ binop(Iop_Shl64,cc_dep1,mkU8(48)),=20
+ mkU64(0)));
+ }
+
/*---------------- DECL ----------------*/
=20
if (isU64(cc_op, AMD64G_CC_OP_DECL) && isU64(cond, AMD64CondZ)) {
@@ -1190,25 +1158,6 @@
mkU64(0)));
}
=20
-//.. /*---------------- DECL ----------------*/
-//..=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_DECL) && isU32(cond, X86CondZ))=
{
-//.. /* dec L, then Z --> test dst =3D=3D 0 */
-//.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0=
)));
-//.. }
-//..=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_DECL) && isU32(cond, X86CondS))=
{
-//.. /* dec L, then S --> compare DST <s 0 */
-//.. return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(=
0)));
-//.. }
-//..=20
-//.. /*---------------- SHRL ----------------*/
-//..=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_SHRL) && isU32(cond, X86CondZ))=
{
-//.. /* SHRL, then Z --> test dep1 =3D=3D 0 */
-//.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0=
)));
-//.. }
-
/*---------------- COPY ----------------*/
/* This can happen, as a result of amd64 FP compares: "comisd ... =
;
jbe" for example. */
@@ -1340,48 +1289,16 @@
/* If the thunk is dec or inc, the cflag is supplied as CC_NDEP=
. */
return cc_ndep;
}
-//.. if (isU64(cc_op, AMD64G_CC_OP_COPY)) {
-//.. /* cflag after COPY is stored in DEP1. */
-//.. return
-//.. binop(
-//.. Iop_And64,
-//.. binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_C)),
-//.. mkU64(1)
-//.. );
-//.. }
-//.. # if 0
-//.. if (cc_op->tag =3D=3D Iex_Const) {
-//.. vex_printf("CFLAG "); ppIRExpr(cc_op); vex_printf("\n");
-//.. }
-//.. # endif
=20
+# if 0
+ if (cc_op->tag =3D=3D Iex_Const) {
+ vex_printf("CFLAG "); ppIRExpr(cc_op); vex_printf("\n");
+ }
+# endif
+
return NULL;
}
=20
-//.. /* --------- specialising "x86g_calculate_rflags_all" --------- =
*/
-//..=20
-//.. if (vex_streq(function_name, "x86g_calculate_rflags_all")) {
-//.. /* specialise calls to above "calculate_rflags_all" function =
*/
-//.. IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
-//.. vassert(arity =3D=3D 4);
-//.. cc_op =3D args[0];
-//.. cc_dep1 =3D args[1];
-//.. cc_dep2 =3D args[2];
-//.. cc_ndep =3D args[3];
-//..=20
-//.. if (isU32(cc_op, AMD64G_CC_OP_COPY)) {
-//.. /* eflags after COPY are stored in DEP1. */
-//.. return
-//.. binop(
-//.. Iop_And32,
-//.. cc_dep1,
-//.. mkU32(AMD64G_CC_MASK_O | AMD64G_CC_MASK_S | AMD64G_C=
C_MASK_Z=20
-//.. | AMD64G_CC_MASK_A | AMD64G_CC_MASK_C | AMD64G=
_CC_MASK_P)
-//.. );
-//.. }
-//.. return NULL;
-//.. }
-
# undef unop
# undef binop
# undef mkU64
|