|
From: <sv...@va...> - 2005-05-11 15:37:58
|
Author: sewardj
Date: 2005-05-11 16:37:50 +0100 (Wed, 11 May 2005)
New Revision: 1185
Modified:
trunk/priv/host-amd64/hdefs.c
trunk/priv/host-amd64/hdefs.h
trunk/priv/host-amd64/isel.c
Log:
AMD64 backend cleanup: get rid of instruction variants which the insn
selector doesn't generate.
Modified: trunk/priv/host-amd64/hdefs.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-amd64/hdefs.c 2005-05-11 10:05:04 UTC (rev 1184)
+++ trunk/priv/host-amd64/hdefs.c 2005-05-11 15:37:50 UTC (rev 1185)
@@ -681,7 +681,7 @@
vassert(op !=3D Aalu_MUL);
return i;
}
-AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, AMD64RM* dst ) =
{
+AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
AMD64Instr* i =3D LibVEX_Alloc(sizeof(AMD64Instr));
i->tag =3D Ain_Sh64;
i->Ain.Sh64.op =3D op;
@@ -689,27 +689,25 @@
i->Ain.Sh64.dst =3D dst;
return i;
}
-AMD64Instr* AMD64Instr_Test64 ( AMD64RI* src, AMD64RM* dst ) {
- AMD64Instr* i =3D LibVEX_Alloc(sizeof(AMD64Instr));
- i->tag =3D Ain_Test64;
- i->Ain.Test64.src =3D src;
- i->Ain.Test64.dst =3D dst;
+AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) {
+ AMD64Instr* i =3D LibVEX_Alloc(sizeof(AMD64Instr));
+ i->tag =3D Ain_Test64;
+ i->Ain.Test64.imm32 =3D imm32;
+ i->Ain.Test64.dst =3D dst;
return i;
}
-AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, AMD64RM* dst ) {
+AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
AMD64Instr* i =3D LibVEX_Alloc(sizeof(AMD64Instr));
i->tag =3D Ain_Unary64;
i->Ain.Unary64.op =3D op;
i->Ain.Unary64.dst =3D dst;
return i;
}
-AMD64Instr* AMD64Instr_MulL ( Bool syned, Int sz, AMD64RM* src ) {
+AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) {
AMD64Instr* i =3D LibVEX_Alloc(sizeof(AMD64Instr));
i->tag =3D Ain_MulL;
i->Ain.MulL.syned =3D syned;
- i->Ain.MulL.sz =3D sz;
i->Ain.MulL.src =3D src;
- vassert(sz =3D=3D 2 || sz =3D=3D 4 || sz =3D=3D 8);
return i;
}
AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) {
@@ -1066,22 +1064,18 @@
vex_printf("%%cl,");=20
else=20
vex_printf("$%d,", (Int)i->Ain.Sh64.src);
- ppAMD64RM(i->Ain.Sh64.dst);
+ ppHRegAMD64(i->Ain.Sh64.dst);
return;
case Ain_Test64:
- vex_printf("testq ");
- ppAMD64RI(i->Ain.Test64.src);
- vex_printf(",");
- ppAMD64RM(i->Ain.Test64.dst);
+ vex_printf("testq $%d,", (Int)i->Ain.Test64.imm32);
+ ppHRegAMD64(i->Ain.Test64.dst);
return;
case Ain_Unary64:
vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op));
- ppAMD64RM(i->Ain.Unary64.dst);
+ ppHRegAMD64(i->Ain.Unary64.dst);
return;
case Ain_MulL:
- vex_printf("%cmul%s ",
- i->Ain.MulL.syned ? 's' : 'u',
- showAMD64ScalarSz(i->Ain.MulL.sz));
+ vex_printf("%cmulq ", i->Ain.MulL.syned ? 's' : 'u');
ppAMD64RM(i->Ain.MulL.src);
return;
case Ain_Div:
@@ -1386,16 +1380,15 @@
addRegUsage_AMD64AMode(u, i->Ain.Alu64M.dst);
return;
case Ain_Sh64:
- addRegUsage_AMD64RM(u, i->Ain.Sh64.dst, HRmModify);
+ addHRegUse(u, HRmModify, i->Ain.Sh64.dst);
if (i->Ain.Sh64.src =3D=3D 0)
addHRegUse(u, HRmRead, hregAMD64_RCX());
return;
case Ain_Test64:
- addRegUsage_AMD64RI(u, i->Ain.Test64.src);
- addRegUsage_AMD64RM(u, i->Ain.Test64.dst, HRmRead);
+ addHRegUse(u, HRmRead, i->Ain.Test64.dst);
return;
case Ain_Unary64:
- addRegUsage_AMD64RM(u, i->Ain.Unary64.dst, HRmModify);
+ addHRegUse(u, HRmModify, i->Ain.Unary64.dst);
return;
case Ain_MulL:
addRegUsage_AMD64RM(u, i->Ain.MulL.src, HRmRead);
@@ -1657,14 +1650,13 @@
mapRegs_AMD64AMode(m, i->Ain.Alu64M.dst);
return;
case Ain_Sh64:
- mapRegs_AMD64RM(m, i->Ain.Sh64.dst);
+ mapReg(m, &i->Ain.Sh64.dst);
return;
case Ain_Test64:
- mapRegs_AMD64RI(m, i->Ain.Test64.src);
- mapRegs_AMD64RM(m, i->Ain.Test64.dst);
+ mapReg(m, &i->Ain.Test64.dst);
return;
case Ain_Unary64:
- mapRegs_AMD64RM(m, i->Ain.Unary64.dst);
+ mapReg(m, &i->Ain.Unary64.dst);
return;
case Ain_MulL:
mapRegs_AMD64RM(m, i->Ain.MulL.src);
@@ -1827,14 +1819,7 @@
*dst =3D i->Ain.Alu64R.dst;
return True;
}
-//.. /* Moves between FP regs */
-//.. if (i->tag =3D=3D Xin_FpUnary) {
-//.. if (i->Xin.FpUnary.op !=3D Xfp_MOV)
-//.. return False;
-//.. *src =3D i->Xin.FpUnary.src;
-//.. *dst =3D i->Xin.FpUnary.dst;
-//.. return True;
-//.. }
+ /* Moves between vector regs */
if (i->tag =3D=3D Ain_SseReRg) {
if (i->Ain.SseReRg.op !=3D Asse_MOV)
return False;
@@ -1860,8 +1845,6 @@
switch (hregClass(rreg)) {
case HRcInt64:
return AMD64Instr_Alu64M ( Aalu_MOV, AMD64RI_Reg(rreg), am );
- //case HRcFlt64:
- // return AMD64Instr_FpLdSt ( False/*store*/, 8, rreg, am );
case HRcVec128:
return AMD64Instr_SseLdSt ( False/*store*/, 16, rreg, am );
default:=20
@@ -1879,8 +1862,6 @@
switch (hregClass(rreg)) {
case HRcInt64:
return AMD64Instr_Alu64R ( Aalu_MOV, AMD64RMI_Mem(am), rreg );
- //case HRcFlt64:
- // return AMD64Instr_FpLdSt ( True/*load*/, 8, rreg, am );
case HRcVec128:
return AMD64Instr_SseLdSt ( True/*load*/, 16, rreg, am );
default:=20
@@ -1925,19 +1906,6 @@
return toUChar(n);
}
=20
-
-
-//.. static UInt fregNo ( HReg r )
-//.. {
-//.. UInt n;
-//.. vassert(hregClass(r) =3D=3D HRcFlt64);
-//.. vassert(!hregIsVirtual(r));
-//.. n =3D hregNumber(r);
-//.. vassert(n <=3D 5);
-//.. return n;
-//.. }
-
-
/* Given an xmm (128bit V-class) register number, produce the
equivalent numbered register in 64-bit I-class. This is a bit of
fakery which facilitates using functions that work on integer
@@ -2455,91 +2423,64 @@
default: goto bad;
}
if (i->Ain.Sh64.src =3D=3D 0) {
- *p++ =3D rexAMode_R(fake(0),=20
- i->Ain.Sh64.dst->Arm.Reg.reg);
+ *p++ =3D rexAMode_R(fake(0), i->Ain.Sh64.dst);
*p++ =3D toUChar(opc_cl);
- switch (i->Ain.Sh64.dst->tag) {
- case Arm_Reg:
- p =3D doAMode_R(p, fake(subopc),=20
- i->Ain.Sh64.dst->Arm.Reg.reg);
- goto done;
- default:
- goto bad;
- }
+ p =3D doAMode_R(p, fake(subopc), i->Ain.Sh64.dst);
+ goto done;
} else {
- *p++ =3D rexAMode_R(fake(0), i->Ain.Sh64.dst->Arm.Reg.reg);
+ *p++ =3D rexAMode_R(fake(0), i->Ain.Sh64.dst);
*p++ =3D toUChar(opc_imm);
- switch (i->Ain.Sh64.dst->tag) {
- case Arm_Reg:
- p =3D doAMode_R(p, fake(subopc),=20
- i->Ain.Sh64.dst->Arm.Reg.reg);
- *p++ =3D (UChar)(i->Ain.Sh64.src);
- goto done;
- default:
- goto bad;
- }
+ p =3D doAMode_R(p, fake(subopc), i->Ain.Sh64.dst);
+ *p++ =3D (UChar)(i->Ain.Sh64.src);
+ goto done;
}
break;
=20
case Ain_Test64:
- if (i->Ain.Test64.src->tag =3D=3D Ari_Imm
- && i->Ain.Test64.dst->tag =3D=3D Arm_Reg) {
- /* testq sign-extend($imm32), %reg */
- *p++ =3D rexAMode_R(fake(0), i->Ain.Test64.dst->Arm.Reg.reg);
+ /* testq sign-extend($imm32), %reg */
+ *p++ =3D rexAMode_R(fake(0), i->Ain.Test64.dst);
+ *p++ =3D 0xF7;
+ p =3D doAMode_R(p, fake(0), i->Ain.Test64.dst);
+ p =3D emit32(p, i->Ain.Test64.imm32);
+ goto done;
+
+ case Ain_Unary64:
+ if (i->Ain.Unary64.op =3D=3D Aun_NOT) {
+ *p++ =3D rexAMode_R(fake(0), i->Ain.Unary64.dst);
*p++ =3D 0xF7;
- p =3D doAMode_R(p, fake(0), i->Ain.Test64.dst->Arm.Reg.reg);
- p =3D emit32(p, i->Ain.Test64.src->Ari.Imm.imm32);
+ p =3D doAMode_R(p, fake(2), i->Ain.Unary64.dst);
goto done;
}
+ if (i->Ain.Unary64.op =3D=3D Aun_NEG) {
+ *p++ =3D rexAMode_R(fake(0), i->Ain.Unary64.dst);
+ *p++ =3D 0xF7;
+ p =3D doAMode_R(p, fake(3), i->Ain.Unary64.dst);
+ goto done;
+ }
break;
=20
- case Ain_Unary64:
- if (i->Ain.Unary64.op =3D=3D Aun_NOT) {
- if (i->Ain.Unary64.dst->tag =3D=3D Arm_Reg) {
- *p++ =3D rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg=
);
+ case Ain_MulL:
+ subopc =3D i->Ain.MulL.syned ? 5 : 4;
+ switch (i->Ain.MulL.src->tag) {
+ case Arm_Mem:
+ *p++ =3D rexAMode_M( fake(0),
+ i->Ain.MulL.src->Arm.Mem.am);
*p++ =3D 0xF7;
- p =3D doAMode_R(p, fake(2), i->Ain.Unary64.dst->Arm.Reg.reg)=
;
+ p =3D doAMode_M(p, fake(subopc),
+ i->Ain.MulL.src->Arm.Mem.am);
goto done;
- } else {
- goto bad;
- }
- }
- if (i->Ain.Unary64.op =3D=3D Aun_NEG) {
- if (i->Ain.Unary64.dst->tag =3D=3D Arm_Reg) {
- *p++ =3D rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg=
);
+ case Arm_Reg:
+ *p++ =3D rexAMode_R(fake(0),=20
+ i->Ain.MulL.src->Arm.Reg.reg);
*p++ =3D 0xF7;
- p =3D doAMode_R(p, fake(3), i->Ain.Unary64.dst->Arm.Reg.reg)=
;
+ p =3D doAMode_R(p, fake(subopc),=20
+ i->Ain.MulL.src->Arm.Reg.reg);
goto done;
- } else {
+ default:
goto bad;
- }
}
break;
=20
- case Ain_MulL:
- subopc =3D i->Ain.MulL.syned ? 5 : 4;
- if (i->Ain.MulL.sz =3D=3D 8) {
- switch (i->Ain.MulL.src->tag) {
- case Arm_Mem:
- *p++ =3D rexAMode_M( fake(0),
- i->Ain.MulL.src->Arm.Mem.am);
- *p++ =3D 0xF7;
- p =3D doAMode_M(p, fake(subopc),
- i->Ain.MulL.src->Arm.Mem.am);
- goto done;
- case Arm_Reg:
- *p++ =3D rexAMode_R(fake(0),=20
- i->Ain.MulL.src->Arm.Reg.reg);
- *p++ =3D 0xF7;
- p =3D doAMode_R(p, fake(subopc),=20
- i->Ain.MulL.src->Arm.Reg.reg);
- goto done;
- default:
- goto bad;
- }
- }
- break;
-
case Ain_Div:
subopc =3D i->Ain.Div.syned ? 7 : 6;
if (i->Ain.Div.sz =3D=3D 4) {
Modified: trunk/priv/host-amd64/hdefs.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-amd64/hdefs.h 2005-05-11 10:05:04 UTC (rev 1184)
+++ trunk/priv/host-amd64/hdefs.h 2005-05-11 15:37:50 UTC (rev 1185)
@@ -429,22 +429,21 @@
struct {
AMD64ShiftOp op;
UInt src; /* shift amount, or 0 means %cl */
- AMD64RM* dst;
+ HReg dst;
} Sh64;
struct {
- AMD64RI* src;
- AMD64RM* dst;
+ UInt imm32;
+ HReg dst;
} Test64;
/* Not and Neg */
struct {
AMD64UnaryOp op;
- AMD64RM* dst;
+ HReg dst;
} Unary64;
- /* DX:AX =3D AX *s/u r/m16, or EDX:EAX =3D EAX *s/u r/m32,
- or RDX:RAX =3D RAX *s/u r/m64 */
+ /* 64 x 64 -> 128 bit widening multiply: RDX:RAX =3D RAX *s/u
+ r/m64 */
struct {
Bool syned;
- Int sz; /* 2, 4 or 8 only */
AMD64RM* src;
} MulL;
/* amd64 div/idiv instruction. Modifies RDX and RAX and
@@ -651,10 +650,10 @@
extern AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst );
extern AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp, AMD64RMI*, HReg )=
;
extern AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp, AMD64RI*, AMD64A=
Mode* );
-extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, AMD64RM* dst=
);
-extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, AMD64RM* =
);
-extern AMD64Instr* AMD64Instr_Test64 ( AMD64RI* src, AMD64RM* dst );
-extern AMD64Instr* AMD64Instr_MulL ( Bool syned, Int sz, AMD64RM* =
);
+extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst );
+extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, HReg );
+extern AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst );
+extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* );
extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* =
);
//.. extern AMD64Instr* AMD64Instr_Sh3232 ( AMD64ShiftOp, UInt amt, H=
Reg src, HReg dst );
extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* );
Modified: trunk/priv/host-amd64/isel.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-amd64/isel.c 2005-05-11 10:05:04 UTC (rev 1184)
+++ trunk/priv/host-amd64/isel.c 2005-05-11 15:37:50 UTC (rev 1185)
@@ -659,7 +659,7 @@
addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Imm(3), reg));
addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
iselIntExpr_RMI(env, mode), reg));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, AMD64RM_Reg(reg)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, reg));
addInstr(env, AMD64Instr_Alu64R(
Aalu_OR, AMD64RMI_Imm(DEFAULT_MXCSR), reg));
addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(reg)));
@@ -689,7 +689,7 @@
*/
addInstr(env, mk_iMOVsd_RR(rrm, rrm2));
addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(3), rrm2));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, AMD64RM_Reg(rrm2)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, rrm2));
addInstr(env, AMD64Instr_Alu64R(Aalu_OR,=20
AMD64RMI_Imm(DEFAULT_FPUCW), rrm2));
addInstr(env, AMD64Instr_Alu64M(Aalu_MOV,=20
@@ -893,8 +893,8 @@
//.. addInstr(env, X86Instr_Sh32(Xsh_SAR, 16, X86RM_Reg(d=
st)));
//.. break;
case Iop_Sar32:
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(ds=
t)));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, AMD64RM_Reg(ds=
t)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, dst));
break;
default:=20
ppIROp(e->Iex.Binop.op);
@@ -911,15 +911,12 @@
vassert(nshift >=3D 0);
if (nshift > 0)
/* Can't allow nshift=3D=3D0 since that means %cl */
- addInstr(env, AMD64Instr_Sh64(
- shOp,=20
- nshift,
- AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(shOp, nshift, dst));
} else {
/* General case; we have to force the amount into %cl. */
HReg regR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(regR,hregAMD64_RCX()));
- addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, AMD64RM_Reg(=
dst)));
+ addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, dst));
}
return dst;
}
@@ -1076,11 +1073,11 @@
HReg left64 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
addInstr(env, mk_iMOVsd_RR(left64, rdx));
addInstr(env, mk_iMOVsd_RR(left64, rax));
- addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, AMD64RM_Reg(rdx)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, rdx));
addInstr(env, AMD64Instr_Div(syned, 4, rmRight));
addInstr(env, AMD64Instr_MovZLQ(rdx,rdx));
addInstr(env, AMD64Instr_MovZLQ(rax,rax));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(rdx)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, rdx));
addInstr(env, mk_iMOVsd_RR(rax, dst));
addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(rdx), dst=
));
return dst;
@@ -1093,7 +1090,7 @@
HReg lo32s =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi32s, hi32));
addInstr(env, mk_iMOVsd_RR(lo32s, lo32));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(hi32)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, hi32));
addInstr(env, AMD64Instr_MovZLQ(lo32,lo32));
addInstr(env, AMD64Instr_Alu64R(
Aalu_OR, AMD64RMI_Reg(lo32), hi32));
@@ -1107,7 +1104,7 @@
HReg lo16s =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi16s, hi16));
addInstr(env, mk_iMOVsd_RR(lo16s, lo16));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, AMD64RM_Reg(hi16)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, hi16));
addInstr(env, AMD64Instr_Alu64R(
Aalu_AND, AMD64RMI_Imm(0xFFFF), lo16));
addInstr(env, AMD64Instr_Alu64R(
@@ -1122,7 +1119,7 @@
HReg lo8s =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi8s, hi8));
addInstr(env, mk_iMOVsd_RR(lo8s, lo8));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, AMD64RM_Reg(hi8)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, hi8));
addInstr(env, AMD64Instr_Alu64R(
Aalu_AND, AMD64RMI_Imm(0xFF), lo8));
addInstr(env, AMD64Instr_Alu64R(
@@ -1154,10 +1151,10 @@
=20
addInstr(env, mk_iMOVsd_RR(a32s, a32));
addInstr(env, mk_iMOVsd_RR(b32s, b32));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(a32))=
);
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(b32))=
);
- addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(a32))=
);
- addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(b32))=
);
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, a32));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, b32));
+ addInstr(env, AMD64Instr_Sh64(shr_op, shift, a32));
+ addInstr(env, AMD64Instr_Sh64(shr_op, shift, b32));
addInstr(env, AMD64Instr_Alu64R(Aalu_MUL, AMD64RMI_Reg(a32), b3=
2));
return b32;
}
@@ -1259,8 +1256,8 @@
HReg dst =3D newVRegI(env);
HReg src =3D iselIntExpr_R(env, expr8);
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, dst));
return dst;
}
=20
@@ -1302,8 +1299,8 @@
HReg src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
UInt amt =3D 32;
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst)=
));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst)=
));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst));
return dst;
}
case Iop_128HIto64: {
@@ -1342,8 +1339,8 @@
|| e->Iex.Unop.op=3D=3DIop_16Sto64 );
UInt amt =3D srcIs16 ? 48 : 56;
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst)=
));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst)=
));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst));
return dst;
}
case Iop_Not8:
@@ -1353,7 +1350,7 @@
HReg dst =3D newVRegI(env);
HReg src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Unary64(Aun_NOT,AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Unary64(Aun_NOT,dst));
return dst;
}
//.. case Iop_64HIto32: {
@@ -1378,8 +1375,7 @@
default: vassert(0);
}
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(
- Ash_SHR, shift, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, shift, dst));
return dst;
}
case Iop_1Uto64:
@@ -1398,8 +1394,8 @@
HReg dst =3D newVRegI(env);
AMD64CondCode cond =3D iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, AMD64Instr_Set64(cond,dst));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, AMD64RM_Reg(dst))=
);
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, AMD64RM_Reg(dst))=
);
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst));
return dst;
}
case Iop_Ctz64: {
@@ -1430,7 +1426,7 @@
HReg dst =3D newVRegI(env);
HReg reg =3D iselIntExpr_R(env, e->Iex.Unop.arg);
addInstr(env, mk_iMOVsd_RR(reg,dst));
- addInstr(env, AMD64Instr_Unary64(Aun_NEG,AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst));
return dst;
}
=20
@@ -1597,7 +1593,7 @@
HReg dst =3D newVRegI(env);
addInstr(env, mk_iMOVsd_RR(rX,dst));
r8 =3D iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r=
8)));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_CMov64(Acc_Z,r0,dst));
return dst;
}
@@ -1958,8 +1954,8 @@
=20
/* 64to1 */
if (e->tag =3D=3D Iex_Unop && e->Iex.Unop.op =3D=3D Iop_64to1) {
- AMD64RM* rm =3D iselIntExpr_RM(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(1),rm));
+ HReg reg =3D iselIntExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, AMD64Instr_Test64(1,reg));
return Acc_NZ;
}
=20
@@ -1969,7 +1965,7 @@
if (e->tag =3D=3D Iex_Unop=20
&& e->Iex.Unop.op =3D=3D Iop_CmpNEZ8) {
HReg r =3D iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF),AMD64RM_Reg(r)))=
;
+ addInstr(env, AMD64Instr_Test64(0xFF,r));
return Acc_NZ;
}
=20
@@ -1979,7 +1975,7 @@
if (e->tag =3D=3D Iex_Unop=20
&& e->Iex.Unop.op =3D=3D Iop_CmpNEZ16) {
HReg r =3D iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFFFF),AMD64RM_Reg(r)=
));
+ addInstr(env, AMD64Instr_Test64(0xFFFF,r));
return Acc_NZ;
}
=20
@@ -2052,7 +2048,7 @@
HReg r =3D newVRegI(env);
addInstr(env, mk_iMOVsd_RR(r1,r));
addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(r)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, r));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: return Acc_Z;
case Iop_CmpNE32: return Acc_NZ;
@@ -2265,7 +2261,7 @@
AMD64RM* rmLeft =3D iselIntExpr_RM(env, e->Iex.Binop.arg1);
HReg rRight =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(rRight, hregAMD64_RAX()));
- addInstr(env, AMD64Instr_MulL(syned, 8, rmLeft));
+ addInstr(env, AMD64Instr_MulL(syned, rmLeft));
/* Result is now in RDX:RAX. Tell the caller. */
addInstr(env, mk_iMOVsd_RR(hregAMD64_RDX(), tHi));
addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), tLo));
@@ -2995,7 +2991,7 @@
r0 =3D iselDblExpr(env, e->Iex.Mux0X.expr0);
dst =3D newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8)=
));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
return dst;
}
@@ -3475,7 +3471,7 @@
HReg r0 =3D iselVecExpr(env, e->Iex.Mux0X.expr0);
HReg dst =3D newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8)=
));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
return dst;
}
|