|
From: <sv...@va...> - 2005-04-23 01:15:51
|
Author: sewardj
Date: 2005-04-23 02:15:47 +0100 (Sat, 23 Apr 2005)
New Revision: 1137
Modified:
trunk/priv/host-amd64/isel.c
Log:
More code-generation cases.
Modified: trunk/priv/host-amd64/isel.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-amd64/isel.c 2005-04-22 21:21:09 UTC (rev 1136)
+++ trunk/priv/host-amd64/isel.c 2005-04-23 01:15:47 UTC (rev 1137)
@@ -1283,8 +1283,8 @@
return dst;
}
//.. case Iop_1Sto8:
-//.. case Iop_1Sto16:
-//.. case Iop_1Sto32:
+ case Iop_1Sto16:
+ case Iop_1Sto32:
case Iop_1Sto64: {
/* could do better than this, but for now ... */
HReg dst =3D newVRegI(env);
@@ -1316,18 +1316,15 @@
return dst;
}
=20
-//.. case Iop_128to32: {
-//.. HReg dst =3D newVRegI(env);
-//.. HReg vec =3D iselVecExpr(env, e->Iex.Unop.arg);
-//.. X86AMode* esp0 =3D X86AMode_IR(0, hregX86_ESP());
-//.. sub_from_esp(env, 16);
-//.. addInstr(env, X86Instr_SseLdSt(False/*store*/, vec, esp=
0));
-//.. addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(esp=
0), dst ));
-//.. add_to_esp(env, 16);
-//.. return dst;
-//.. }
+ case Iop_V128to32: {
+ HReg dst =3D newVRegI(env);
+ HReg vec =3D iselVecExpr(env, e->Iex.Unop.arg);
+ AMD64AMode* rsp_m16 =3D AMD64AMode_IR(-16, hregAMD64_RSP());
+ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vec, rs=
p_m16));
+ addInstr(env, AMD64Instr_LoadEX(4, False/*z-widen*/, rsp_m16=
, dst));
+ return dst;
+ }
=20
-
/* V128{HI}to64 */
case Iop_V128HIto64:
case Iop_V128to64: {
@@ -1816,6 +1813,24 @@
}
}
=20
+ /* CmpEQ32 / CmpNE32 */
+ if (e->tag =3D=3D Iex_Binop=20
+ && (e->Iex.Binop.op =3D=3D Iop_CmpEQ32
+ || e->Iex.Binop.op =3D=3D Iop_CmpNE32)) {
+ HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ AMD64RMI* rmi2 =3D iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+ HReg r =3D newVRegI(env);
+ addInstr(env, mk_iMOVsd_RR(r1,r));
+ addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(r)));
+ switch (e->Iex.Binop.op) {
+ case Iop_CmpEQ32: return Acc_Z;
+ case Iop_CmpNE32: return Acc_NZ;
+ default: vpanic("iselCondCode(amd64): CmpXX8");
+ }
+ }
+
+
//.. /* CmpEQ16 / CmpNE16 */
//.. if (e->tag =3D=3D Iex_Binop=20
//.. && (e->Iex.Binop.op =3D=3D Iop_CmpEQ16
@@ -3172,32 +3187,28 @@
if (e->tag =3D=3D Iex_Binop) {
switch (e->Iex.Binop.op) {
=20
-//.. case Iop_Set128lo32: {
-//.. HReg dst =3D newVRegV(env);
-//.. HReg srcV =3D iselVecExpr(env, e->Iex.Binop.arg1);
-//.. HReg srcI =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
-//.. X86AMode* esp0 =3D X86AMode_IR(0, hregX86_ESP());
-//.. sub_from_esp(env, 16);
-//.. addInstr(env, X86Instr_SseLdSt(False/*store*/, srcV, esp0)=
);
-//.. addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(srcI), e=
sp0));
-//.. addInstr(env, X86Instr_SseLdSt(True/*load*/, dst, esp0));
-//.. add_to_esp(env, 16);
-//.. return dst;
-//.. }
-
case Iop_SetV128lo64: {
HReg dst =3D newVRegV(env);
HReg srcV =3D iselVecExpr(env, e->Iex.Binop.arg1);
HReg srcI =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
- AMD64AMode* rsp0 =3D AMD64AMode_IR(0, hregAMD64_RSP());
- sub_from_rsp(env, 16);
- addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, srcV, rsp0=
));
- addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, AMD64RI_Reg(srcI), rs=
p0));
- addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, rsp0));
- add_to_rsp(env, 16);
+ AMD64AMode* rsp_m16 =3D AMD64AMode_IR(-16, hregAMD64_RSP());
+ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, srcV, rsp_=
m16));
+ addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, AMD64RI_Reg(srcI), rs=
p_m16));
+ addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, rsp_m16=
));
return dst;
}
=20
+ case Iop_SetV128lo32: {
+ HReg dst =3D newVRegV(env);
+ HReg srcV =3D iselVecExpr(env, e->Iex.Binop.arg1);
+ HReg srcI =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
+ AMD64AMode* rsp_m16 =3D AMD64AMode_IR(-16, hregAMD64_RSP());
+ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, srcV, rsp_=
m16));
+ addInstr(env, AMD64Instr_Store(4, srcI, rsp_m16));
+ addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, rsp_m16=
));
+ return dst;
+ }
+
case Iop_64HLtoV128: {
AMD64AMode* rsp =3D AMD64AMode_IR(0, hregAMD64_RSP());
HReg dst =3D newVRegV(env);
@@ -3613,7 +3624,8 @@
return;
=20
retty =3D typeOfIRTemp(env->type_env, d->tmp);
- if (retty =3D=3D Ity_I64 || retty =3D=3D Ity_I32) {
+ if (retty =3D=3D Ity_I64 || retty =3D=3D Ity_I32=20
+ || retty =3D=3D Ity_I16 || retty =3D=3D Ity_I8) {
/* The returned value is in %rax. Park it in the register
associated with tmp. */
HReg dst =3D lookupIRTemp(env, d->tmp);
|