|
From: <sv...@va...> - 2005-12-16 13:40:23
|
Author: cerion
Date: 2005-12-16 13:40:18 +0000 (Fri, 16 Dec 2005)
New Revision: 1498
Log:
Fixed up front and backend for 32bit mul,div,cmp,shift in mode64
Backend:
- separated shifts from other alu ops
- gave {shift, mul, div, cmp} ops a bool to indicate 32|64bit insn
- fixed and implemented more mode64 cases
Also improved some IR by moving imm's to right arg of binop - backend ass=
umes this.
All integer ppc32 insns now pass switchback tests in 64bit mode.
(ppc64-only insns not yet fully tested)
Modified:
trunk/priv/guest-ppc32/toIR.c
trunk/priv/host-ppc32/hdefs.c
trunk/priv/host-ppc32/hdefs.h
trunk/priv/host-ppc32/isel.c
Modified: trunk/priv/guest-ppc32/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/guest-ppc32/toIR.c 2005-12-16 01:06:42 UTC (rev 1497)
+++ trunk/priv/guest-ppc32/toIR.c 2005-12-16 13:40:18 UTC (rev 1498)
@@ -749,13 +749,18 @@
binop(Iop_ShrV128, expr_vA, mkU8(16)), \
binop(Iop_ShrV128, expr_vB, mkU8(16)))
=20
-/* */
-static IRExpr* /* :: Ity_I64 */ mkExtendLoS32 ( IRExpr* src )
+static IRExpr* /* :: Ity_I32/64 */ mk64lo32Sto64 ( IRExpr* src )
{
vassert(typeOfIRExpr(irbb->tyenv, src) =3D=3D Ity_I64);
return unop(Iop_32Sto64, unop(Iop_64to32, src));
}
=20
+static IRExpr* /* :: Ity_I32/64 */ mk64lo32Uto64 ( IRExpr* src )
+{
+ vassert(typeOfIRExpr(irbb->tyenv, src) =3D=3D Ity_I64);
+ return unop(Iop_32Uto64, unop(Iop_64to32, src));
+}
+
static IROp mkSzOp ( IRType ty, IROp op8 )
{
Int adj;
@@ -1551,28 +1556,28 @@
static void putXER_SO ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) =3D=3D Ity_I8);
- IRExpr* so =3D binop(Iop_And8, mkU8(1), e);
+ IRExpr* so =3D binop(Iop_And8, e, mkU8(1));
stmt( IRStmt_Put( (mode64 ? OFFB64_XER_SO : OFFB32_XER_SO), so) );
}
=20
static void putXER_OV ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) =3D=3D Ity_I8);
- IRExpr* ov =3D binop(Iop_And8, mkU8(1), e);
+ IRExpr* ov =3D binop(Iop_And8, e, mkU8(1));
stmt( IRStmt_Put( (mode64 ? OFFB64_XER_OV : OFFB32_XER_OV), ov) );
}
=20
static void putXER_CA ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) =3D=3D Ity_I8);
- IRExpr* ca =3D binop(Iop_And8, mkU8(1), e);
+ IRExpr* ca =3D binop(Iop_And8, e, mkU8(1));
stmt( IRStmt_Put( (mode64 ? OFFB64_XER_CA : OFFB32_XER_CA), ca) );
}
=20
static void putXER_BC ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) =3D=3D Ity_I8);
- IRExpr* bc =3D binop(Iop_And8, mkU8(0x7F), e);
+ IRExpr* bc =3D binop(Iop_And8, e, mkU8(0x7F));
stmt( IRStmt_Put( (mode64 ? OFFB64_XER_BC : OFFB32_XER_BC), bc) );
}
=20
@@ -1792,14 +1797,13 @@
case /* 4 */ PPC32G_FLAG_OP_MULLW: {
/* OV true if result can't be represented in 64 bits
i.e sHi !=3D sign extension of sLo */
- IRTemp t128 =3D newTemp(Ity_I128);
- assign( t128, binop(Iop_MullS64, argL, argR) );
xer_ov=20
- =3D binop( Iop_CmpNE64,
- unop(Iop_128HIto64, mkexpr(t128)),
- binop( Iop_Sar64,=20
- unop(Iop_128to64, mkexpr(t128)),=20
- mkU8(63)) );
+ =3D binop( Iop_CmpNE32,
+ unop(Iop_64HIto32, res),
+ binop( Iop_Sar32,=20
+ unop(Iop_64to32, res),=20
+ mkU8(31))
+ );
break;
}
=20
@@ -2647,13 +2651,11 @@
overflow of the low-order 32bit result
CR0[LT|GT|EQ] are undefined if flag_rC && mode64
*/
- IRExpr* dividend =3D unop(Iop_32Sto64,
- unop(Iop_64to32, mkexpr(rA)));
- IRExpr* divisor =3D unop(Iop_32Sto64,
- unop(Iop_64to32, mkexpr(rB)));
- assign( rD, unop(Iop_32Uto64,
- unop(Iop_64to32,
- binop(Iop_DivS64, dividend, divisor)))=
);
+ /* rD[hi32] are undefined: setting them to sign of lo32
+ - makes set_CR0 happy */
+ IRExpr* dividend =3D mk64lo32Sto64( mkexpr(rA) );
+ IRExpr* divisor =3D mk64lo32Sto64( mkexpr(rB) );
+ assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend, divis=
or) ) );
if (flag_OE) {
set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,=20
mkexpr(rD), dividend, divisor );
@@ -2681,13 +2683,9 @@
overflow of the low-order 32bit result
CR0[LT|GT|EQ] are undefined if flag_rC && mode64
*/
- IRExpr* dividend =3D unop(Iop_32Uto64,
- unop(Iop_64to32, mkexpr(rA)));
- IRExpr* divisor =3D unop(Iop_32Uto64,
- unop(Iop_64to32, mkexpr(rB)));
- assign( rD, unop(Iop_32Uto64,
- unop(Iop_64to32,
- binop(Iop_DivU64, dividend, divisor)))=
);
+ IRExpr* dividend =3D mk64lo32Uto64( mkexpr(rA) );
+ IRExpr* divisor =3D mk64lo32Uto64( mkexpr(rB) );
+ assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend, divis=
or) ) );
if (flag_OE) {
set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU,=20
mkexpr(rD), dividend, divisor );
@@ -2710,9 +2708,13 @@
DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? "." : "",
rD_addr, rA_addr, rB_addr);
if (mode64) {
- assign( rD, unop(Iop_128HIto64,
- binop(Iop_MullS64,
- mkexpr(rA), mkexpr(rB))) );
+ /* rD[hi32] are undefined: setting them to sign of lo32
+ - makes set_CR0 happy */
+ assign( rD, binop(Iop_Sar64,
+ binop(Iop_Mul64,
+ mk64lo32Sto64( mkexpr(rA) ),
+ mk64lo32Sto64( mkexpr(rB) )),
+ mkU8(32)) );
} else {
assign( rD, unop(Iop_64HIto32,
binop(Iop_MullS32,
@@ -2728,9 +2730,13 @@
DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? "." : "",
rD_addr, rA_addr, rB_addr);
if (mode64) {
- assign( rD, unop(Iop_128HIto64,
- binop(Iop_MullU64,
- mkexpr(rA), mkexpr(rB))) );
+ /* rD[hi32] are undefined: setting them to sign of lo32
+ - makes set_CR0 happy */
+ assign( rD, binop(Iop_Sar64,
+ binop(Iop_Mul64,
+ mk64lo32Uto64( mkexpr(rA) ),
+ mk64lo32Uto64( mkexpr(rB) ) ),
+ mkU8(32)) );
} else {
assign( rD, unop(Iop_64HIto32,=20
binop(Iop_MullU32,
@@ -2743,18 +2749,25 @@
flag_OE ? "o" : "", flag_rC ? "." : "",
rD_addr, rA_addr, rB_addr);
if (mode64) {
- assign( rD, unop(Iop_128to64,
- binop(Iop_MullU64,
- mkexpr(rA), mkexpr(rB))) );
+ /* rD[hi32] are undefined: setting them to sign of lo32
+ - set_XER_OV() and set_CR0() depend on this */
+ IRExpr *a =3D unop(Iop_64to32, mkexpr(rA) );
+ IRExpr *b =3D unop(Iop_64to32, mkexpr(rB) );
+ assign( rD, binop(Iop_MullS32, a, b) );
+ if (flag_OE) {
+ set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,=20
+ mkexpr(rD),
+ unop(Iop_32Uto64, a), unop(Iop_32Uto64, b) );
+ }
} else {
assign( rD, unop(Iop_64to32,
binop(Iop_MullU32,
mkexpr(rA), mkexpr(rB))) );
+ if (flag_OE) {
+ set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,=20
+ mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ }
}
- if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,=20
- mkexpr(rD), mkexpr(rA), mkexpr(rB) );
- }
break;
=20
case 0x068: // neg (Negate, PPC32 p493)
@@ -2999,14 +3012,10 @@
UInt opc2 =3D ifieldOPClo10(theInstr);
UChar b0 =3D ifieldBIT0(theInstr);
=20
- IRType ty =3D mode64 ? Ity_I64 : Ity_I32;
- IRTemp rA =3D newTemp(ty);
- IRTemp rB =3D newTemp(ty);
- IRExpr *a, *b;
+ IRType ty =3D mode64 ? Ity_I64 : Ity_I32;
+ IRExpr *a =3D getIReg(rA_addr);
+ IRExpr *b;
=20
- assign(rA, getIReg(rA_addr));
- a =3D mkexpr(rA);
- =20
if (!mode64 && flag_L=3D=3D1) { // L=3D=3D1 invalid for 32 bit.
vex_printf("dis_int_cmp(PPC32)(flag_L)\n");
return False;
@@ -3022,10 +3031,11 @@
DIP("cmpi cr%u,%u,r%u,%d\n", crfD, flag_L, rA_addr,
(Int)extend_s_16to32(uimm16));
b =3D mkSzExtendS16( ty, uimm16 );
- if (mode64) {
- if (flag_L =3D=3D 0) a =3D mkExtendLoS32( mkexpr(rA) );
+ if (flag_L =3D=3D 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
} else {
+ a =3D mkSzNarrow32( ty, a );
+ b =3D mkSzNarrow32( ty, b );
putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32S, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -3034,10 +3044,11 @@
case 0x0A: // cmpli (Compare Logical Immediate, PPC32 p370)
DIP("cmpli cr%u,%u,r%u,0x%x\n", crfD, flag_L, rA_addr, uimm16);
b =3D mkSzImm( ty, uimm16 );
- if (mode64) {
- if (flag_L =3D=3D 0) a =3D mkExtendLoS32( mkexpr(rA) );
+ if (flag_L =3D=3D 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
} else {
+ a =3D mkSzNarrow32( ty, a );
+ b =3D mkSzNarrow32( ty, b );
putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -3049,34 +3060,29 @@
vex_printf("dis_int_cmp(PPC32)(0x1F,b0)\n");
return False;
}
- assign(rB, getIReg(rB_addr));
- b =3D mkexpr(rB);
- if (mode64 && flag_L =3D=3D 0) {
- a =3D mkExtendLoS32( mkexpr(rA) );
- b =3D mkExtendLoS32( mkexpr(rB) );
- }
+ b =3D getIReg(rB_addr);
=20
switch (opc2) {
case 0x000: // cmp (Compare, PPC32 p367)
DIP("cmp cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
- if (mode64) {
- putCR321( crfD, unop(Iop_64to8,
- binop(Iop_CmpORD64S, a, b)) );
+ if (flag_L =3D=3D 1) {
+ putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
} else {
- putCR321( crfD, unop(Iop_32to8,
- binop(Iop_CmpORD32S, a, b)) );
+ a =3D mkSzNarrow32( ty, a );
+ b =3D mkSzNarrow32( ty, b );
+ putCR321(crfD, unop(Iop_32to8,binop(Iop_CmpORD32S, a, b)));
}
putCR0( crfD, getXER_SO() );
break;
=20
case 0x020: // cmpl (Compare Logical, PPC32 p369)
DIP("cmpl cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
- if (mode64) {
- putCR321( crfD, unop(Iop_64to8,
- binop(Iop_CmpORD64U, a, b)) );
+ if (flag_L =3D=3D 1) {
+ putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
} else {
- putCR321( crfD, unop(Iop_32to8,
- binop(Iop_CmpORD32U, a, b)) );
+ a =3D mkSzNarrow32( ty, a );
+ b =3D mkSzNarrow32( ty, b );
+ putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
}
putCR0( crfD, getXER_SO() );
break;
@@ -3196,8 +3202,8 @@
// Iop_Clz32 undefined for arg=3D=3D0, so deal with that case:
irx =3D binop(Iop_CmpNE32, lo32, mkU32(0));
assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx),
- mkU32(32),
- unop(Iop_Clz32, lo32) ));
+ mkSzImm(ty, 32),
+ mkSzWiden32(ty, unop(Iop_Clz32, lo32),=
False) ));
// TODO: alternatively: assign(rA, verbose_Clz32(rS));
break;
}
@@ -3348,6 +3354,7 @@
IRTemp rS =3D newTemp(ty);
IRTemp rA =3D newTemp(ty);
IRTemp rB =3D newTemp(ty);
+ IRTemp rot =3D newTemp(ty);
IRExpr *r;
UInt mask32;
ULong mask64;
@@ -3366,10 +3373,10 @@
mask64 =3D MASK64(31-MaskEnd, 31-MaskBeg);
r =3D ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r =3D unop(Iop_32Uto64, r);
- r =3D binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
+ assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) =
);
assign( rA,
binop(Iop_Or64,
- binop(Iop_And64, r, mkU64(mask64)),
+ binop(Iop_And64, mkexpr(rot), mkU64(mask64)),
binop(Iop_And64, getIReg(rA_addr), mkU64(~mask64))) );
}
else {
@@ -3398,8 +3405,8 @@
// rA =3D ((tmp32 || tmp32) & mask64)
r =3D ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r =3D unop(Iop_32Uto64, r);
- r =3D binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
- assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) =
);
+ assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
}
else {
if (MaskBeg =3D=3D 0 && sh_imm+MaskEnd =3D=3D 31) {
@@ -3435,14 +3442,16 @@
rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd);
if (mode64) {
mask64 =3D MASK64(31-MaskEnd, 31-MaskBeg);
- // tmp32 =3D (ROTL(rS_Lo32, rB[0-4])
- // rA =3D ((tmp32 || tmp32) & mask64)
+ /* weird insn alert!
+ tmp32 =3D (ROTL(rS_Lo32, rB[0-4])
+ rA =3D ((tmp32 || tmp32) & mask64)
+ */
// note, ROTL does the masking, so we don't do it here
r =3D ROTL( unop(Iop_64to32, mkexpr(rS)),
- unop(Iop_32to8, mkexpr(rB)) );
+ unop(Iop_64to8, mkexpr(rB)) );
r =3D unop(Iop_32Uto64, r);
- r =3D binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
- assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ assign(rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))));
+ assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
} else {
mask32 =3D MASK32(31-MaskEnd, 31-MaskBeg);
// rA =3D ROTL(rS, rB[0-4]) & mask
Modified: trunk/priv/host-ppc32/hdefs.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-ppc32/hdefs.c 2005-12-16 01:06:42 UTC (rev 1497)
+++ trunk/priv/host-ppc32/hdefs.c 2005-12-16 13:40:18 UTC (rev 1498)
@@ -573,23 +573,29 @@
}
}
=20
-HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR, Bool is32Bit ) {
+HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR ) {
switch (op) {
case Palu_ADD: return immR ? "addi" : "add";
case Palu_SUB: return immR ? "subi" : "sub";
case Palu_AND: return immR ? "andi." : "and";
case Palu_OR: return immR ? "ori" : "or";
case Palu_XOR: return immR ? "xori" : "xor";
- case Palu_SHL: return is32Bit ? (immR ? "slwi" : "slw") :=20
- (immR ? "sldi" : "sld");
- case Palu_SHR: return is32Bit ? (immR ? "srwi" : "srw") :
- (immR ? "srdi" : "srd");
- case Palu_SAR: return is32Bit ? (immR ? "srawi" : "sraw") :
- (immR ? "sradi" : "srad");
default: vpanic("showPPC32AluOp");
}
}
=20
+HChar* showPPC32ShftOp ( PPC32ShftOp op, Bool immR, Bool sz32 ) {
+ switch (op) {
+ case Pshft_SHL: return sz32 ? (immR ? "slwi" : "slw") :=20
+ (immR ? "sldi" : "sld");
+ case Pshft_SHR: return sz32 ? (immR ? "srwi" : "srw") :
+ (immR ? "srdi" : "srd");
+ case Pshft_SAR: return sz32 ? (immR ? "srawi" : "sraw") :
+ (immR ? "sradi" : "srad");
+ default: vpanic("showPPC32ShftOp");
+ }
+}
+
HChar* showPPC32FpOp ( PPC32FpOp op ) {
switch (op) {
case Pfp_ADD: return "fadd";
@@ -719,6 +725,17 @@
i->Pin.Alu.srcR =3D srcR;
return i;
}
+PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp op, Bool sz32,=20
+ HReg dst, HReg srcL, PPC32RH* srcR ) {
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_Shft;
+ i->Pin.Shft.op =3D op;
+ i->Pin.Shft.sz32 =3D sz32;
+ i->Pin.Shft.dst =3D dst;
+ i->Pin.Shft.srcL =3D srcL;
+ i->Pin.Shft.srcR =3D srcR;
+ return i;
+}
PPC32Instr* PPC32Instr_AddSubC32 ( Bool isAdd, Bool setC,
HReg dst, HReg srcL, HReg srcR ) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
@@ -730,11 +747,12 @@
i->Pin.AddSubC32.srcR =3D srcR;
return i;
}
-PPC32Instr* PPC32Instr_Cmp ( Bool syned, UInt crfD,=20
- HReg srcL, PPC32RH* srcR ) {
+PPC32Instr* PPC32Instr_Cmp ( Bool syned, Bool sz32,=20
+ UInt crfD, HReg srcL, PPC32RH* srcR ) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
i->tag =3D Pin_Cmp;
i->Pin.Cmp.syned =3D syned;
+ i->Pin.Cmp.sz32 =3D sz32;
i->Pin.Cmp.crfD =3D crfD;
i->Pin.Cmp.srcL =3D srcL;
i->Pin.Cmp.srcR =3D srcR;
@@ -748,12 +766,13 @@
i->Pin.Unary32.src =3D src;
return i;
}
-PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi,=20
+PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi, Bool sz32,=20
HReg dst, HReg srcL, HReg srcR ) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
i->tag =3D Pin_MulL;
i->Pin.MulL.syned =3D syned;
i->Pin.MulL.hi =3D hi;
+ i->Pin.MulL.sz32 =3D sz32;
i->Pin.MulL.dst =3D dst;
i->Pin.MulL.srcL =3D srcL;
i->Pin.MulL.srcR =3D srcR;
@@ -762,10 +781,12 @@
if (!hi) vassert(!syned);
return i;
}
-PPC32Instr* PPC32Instr_Div ( Bool syned, HReg dst, HReg srcL, HReg srcR =
) {
+PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32,
+ HReg dst, HReg srcL, HReg srcR ) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
i->tag =3D Pin_Div;
i->Pin.Div.syned =3D syned;
+ i->Pin.Div.sz32 =3D sz32;
i->Pin.Div.dst =3D dst;
i->Pin.Div.srcL =3D srcL;
i->Pin.Div.srcR =3D srcR;
@@ -1137,19 +1158,41 @@
ppHRegPPC32(i->Pin.Alu.dst);
vex_printf(",");
ppHRegPPC32(r_srcL);
- } else {
- /* generic */
- vex_printf("%s ", showPPC32AluOp(i->Pin.Alu.op,
- toBool(rh_srcR->tag =3D=3D Prh_Imm),
- toBool(hregClass(r_srcL) =3D=3D HRcInt32=
)));
+ return;
+ }
+ /* special-case "li" */
+ if (i->Pin.Alu.op =3D=3D Palu_ADD && // addi Rd,0,imm =3D=3D li =
Rd,imm
+ rh_srcR->tag =3D=3D Prh_Imm &&
+ hregNumber(r_srcL) =3D=3D 0) {
+ vex_printf("li ");
ppHRegPPC32(i->Pin.Alu.dst);
vex_printf(",");
- ppHRegPPC32(r_srcL);
- vex_printf(",");
ppPPC32RH(rh_srcR);
+ return;
}
+ /* generic */
+ vex_printf("%s ", showPPC32AluOp(i->Pin.Alu.op,
+ toBool(rh_srcR->tag =3D=3D Prh_Im=
m)));
+ ppHRegPPC32(i->Pin.Alu.dst);
+ vex_printf(",");
+ ppHRegPPC32(r_srcL);
+ vex_printf(",");
+ ppPPC32RH(rh_srcR);
return;
}
+ case Pin_Shft: {
+ HReg r_srcL =3D i->Pin.Shft.srcL;
+ PPC32RH* rh_srcR =3D i->Pin.Shft.srcR;
+ vex_printf("%s ", showPPC32ShftOp(i->Pin.Shft.op,
+ toBool(rh_srcR->tag =3D=3D Prh_I=
mm),
+ i->Pin.Shft.sz32));
+ ppHRegPPC32(i->Pin.Shft.dst);
+ vex_printf(",");
+ ppHRegPPC32(r_srcL);
+ vex_printf(",");
+ ppPPC32RH(rh_srcR);
+ return;
+ }
case Pin_AddSubC32:
vex_printf("%s%s ",
i->Pin.AddSubC32.isAdd ? "add" : "sub",
@@ -1161,8 +1204,9 @@
ppHRegPPC32(i->Pin.AddSubC32.srcR);
return;
case Pin_Cmp:
- vex_printf("%s%s %%cr%u,",
+ vex_printf("%s%c%s %%cr%u,",
i->Pin.Cmp.syned ? "cmp" : "cmpl",
+ i->Pin.Cmp.sz32 ? 'w' : 'd',
i->Pin.Cmp.srcR->tag =3D=3D Prh_Imm ? "i" : "",
i->Pin.Cmp.crfD);
ppHRegPPC32(i->Pin.Cmp.srcL);
@@ -1176,8 +1220,9 @@
ppHRegPPC32(i->Pin.Unary32.src);
return;
case Pin_MulL:
- vex_printf("mul%s%s ",
- i->Pin.MulL.hi ? "hw" : "lw",
+ vex_printf("mul%c%c%s ",
+ i->Pin.MulL.hi ? 'h' : 'l',
+ i->Pin.MulL.sz32 ? 'w' : 'd',
i->Pin.MulL.hi ? (i->Pin.MulL.syned ? "s" : "u") : "");
ppHRegPPC32(i->Pin.MulL.dst);
vex_printf(",");
@@ -1186,7 +1231,8 @@
ppHRegPPC32(i->Pin.MulL.srcR);
return;
case Pin_Div:
- vex_printf("divw%s ",
+ vex_printf("div%c%s ",
+ i->Pin.Div.sz32 ? 'w' : 'd',
i->Pin.Div.syned ? "" : "u");
ppHRegPPC32(i->Pin.Div.dst);
vex_printf(",");
@@ -1555,6 +1601,11 @@
addRegUsage_PPC32RH(u, i->Pin.Alu.srcR);
addHRegUse(u, HRmWrite, i->Pin.Alu.dst);
return;
+ case Pin_Shft:
+ addHRegUse(u, HRmRead, i->Pin.Shft.srcL);
+ addRegUsage_PPC32RH(u, i->Pin.Shft.srcR);
+ addHRegUse(u, HRmWrite, i->Pin.Shft.dst);
+ return;
case Pin_AddSubC32:
addHRegUse(u, HRmWrite, i->Pin.AddSubC32.dst);
addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcL);
@@ -1800,6 +1851,11 @@
mapReg(m, &i->Pin.Alu.srcL);
mapRegs_PPC32RH(m, i->Pin.Alu.srcR);
return;
+ case Pin_Shft:
+ mapReg(m, &i->Pin.Shft.dst);
+ mapReg(m, &i->Pin.Shft.srcL);
+ mapRegs_PPC32RH(m, i->Pin.Shft.srcR);
+ return;
case Pin_AddSubC32:
mapReg(m, &i->Pin.AddSubC32.dst);
mapReg(m, &i->Pin.AddSubC32.srcL);
@@ -2429,10 +2485,8 @@
UInt r_srcL =3D iregNo(i->Pin.Alu.srcL, mode64);
UInt r_srcR =3D immR ? (-1)/*bogus*/ :
iregNo(srcR->Prh.Reg.reg, mode64);
- Bool is32BitOp =3D toBool(hregClass(i->Pin.Alu.srcL) =3D=3D HRcIn=
t32);
=20
switch (i->Pin.Alu.op) {
-
case Palu_ADD:
if (immR) {
/* addi (PPC32 p350) */
@@ -2490,9 +2544,26 @@
}
break;
=20
- case Palu_SHL:
- if (is32BitOp) {
- vassert(!mode64);
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
+ case Pin_Shft: {
+ PPC32RH* srcR =3D i->Pin.Shft.srcR;
+ Bool sz32 =3D i->Pin.Shft.sz32;
+ Bool immR =3D toBool(srcR->tag =3D=3D Prh_Imm);
+ UInt r_dst =3D iregNo(i->Pin.Shft.dst, mode64);
+ UInt r_srcL =3D iregNo(i->Pin.Shft.srcL, mode64);
+ UInt r_srcR =3D immR ? (-1)/*bogus*/ :
+ iregNo(srcR->Prh.Reg.reg, mode64);
+ if (!mode64)
+ vassert(sz32);
+
+ switch (i->Pin.Shft.op) {
+ case Pshft_SHL:
+ if (sz32) {
if (immR) {
/* rd =3D rs << n, 1 <=3D n <=3D 31
is
@@ -2507,7 +2578,6 @@
p =3D mkFormX(p, 31, r_srcL, r_dst, r_srcR, 24, 0);
}
} else {
- vassert(mode64);
if (immR) {
/* rd =3D rs << n, 1 <=3D n <=3D 63
is
@@ -2524,10 +2594,9 @@
}
break;
=20
- case Palu_SHR:
- if (is32BitOp) {
- vassert(!mode64);
- if (immR) {
+ case Pshft_SHR:
+ if (sz32) {
+ if (immR) {
/* rd =3D rs >>u n, 1 <=3D n <=3D 31
is
rlwinm rd,rs,32-n,n,31 (PPC32 p501)
@@ -2541,7 +2610,6 @@
p =3D mkFormX(p, 31, r_srcL, r_dst, r_srcR, 536, 0);
}
} else {
- vassert(mode64);
if (immR) {
/* rd =3D rs >>u n, 1 <=3D n <=3D 63
is
@@ -2558,9 +2626,8 @@
}
break;
=20
- case Palu_SAR:
- if (is32BitOp) {
- vassert(!mode64);
+ case Pshft_SAR:
+ if (sz32) {
if (immR) {
/* srawi (PPC32 p507) */
UInt n =3D srcR->Prh.Imm.imm16;
@@ -2572,7 +2639,6 @@
p =3D mkFormX(p, 31, r_srcL, r_dst, r_srcR, 792, 0);
}
} else {
- vassert(mode64);
if (immR) {
/* sradi (PPC64 p571) */
UInt n =3D srcR->Prh.Imm.imm16;
@@ -2616,29 +2682,34 @@
=20
case Pin_Cmp: {
Bool syned =3D i->Pin.Cmp.syned;
+ Bool sz32 =3D i->Pin.Cmp.sz32;
UInt fld1 =3D i->Pin.Cmp.crfD << 2;
UInt r_srcL =3D iregNo(i->Pin.Cmp.srcL, mode64);
UInt r_srcR, imm_srcR;
PPC32RH* srcR =3D i->Pin.Cmp.srcR;
=20
+ if (!mode64) // cmp double word invalid for mode32
+ vassert(sz32); =20
+ else if (!sz32) // mode64 && cmp64: set L=3D1
+ fld1 |=3D 1;
+=20
switch (srcR->tag) {
case Prh_Imm:
- /* cmpi (signed) (PPC32 p368) or=20
- cmpli (unsigned) (PPC32 p370) */
+ vassert(syned =3D=3D srcR->Prh.Imm.syned);
imm_srcR =3D srcR->Prh.Imm.imm16;
- if (syned) {
- vassert(srcR->Prh.Imm.syned);
+ if (syned) { // cmpw/di (signed) (PPC32 p368)
vassert(imm_srcR !=3D 0x8000);
- } else {
- vassert(!srcR->Prh.Imm.syned);
+ p =3D mkFormD(p, 11, fld1, r_srcL, imm_srcR);
+ } else { // cmplw/di (unsigned) (PPC32 p370)
+ p =3D mkFormD(p, 10, fld1, r_srcL, imm_srcR);
}
- p =3D mkFormD(p, syned ? 11 : 10, fld1, r_srcL, imm_srcR);
break;
case Prh_Reg:
- /* cmpi (signed) (PPC32 p367) or=20
- cmpli (unsigned) (PPC32 p379) */
r_srcR =3D iregNo(srcR->Prh.Reg.reg, mode64);
- p =3D mkFormX(p, 31, fld1, r_srcL, r_srcR, syned ? 0 : 32, 0);
+ if (syned) // cmpwi (signed) (PPC32 p367)
+ p =3D mkFormX(p, 31, fld1, r_srcL, r_srcR, 0, 0);
+ else // cmplwi (unsigned) (PPC32 p379)
+ p =3D mkFormX(p, 31, fld1, r_srcL, r_srcR, 32, 0);
break;
default:=20
goto bad;
@@ -2667,30 +2738,33 @@
=20
case Pin_MulL: {
Bool syned =3D i->Pin.MulL.syned;
+ Bool sz32 =3D i->Pin.MulL.sz32;
UInt r_dst =3D iregNo(i->Pin.MulL.dst, mode64);
UInt r_srcL =3D iregNo(i->Pin.MulL.srcL, mode64);
UInt r_srcR =3D iregNo(i->Pin.MulL.srcR, mode64);
- Bool is32BitOp =3D toBool(hregClass(i->Pin.MulL.dst) =3D=3D HRcInt=
32);
=20
+ if (!mode64)
+ vassert(sz32);
+
if (i->Pin.MulL.hi) {
// mul hi words, must consider sign
- if (syned) {
- if (is32BitOp) // mulhw r_dst,r_srcL,r_srcR
+ if (sz32) {
+ if (syned) // mulhw r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 75, 0);
- else // mulhd r_dst,r_srcL,r_srcR
+ else // mulhwu r_dst,r_srcL,r_srcR
+ p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 11, 0);
+ } else {
+ if (syned) // mulhd r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 73, 0);
- } else {
- if (is32BitOp) // mulhwu r_dst,r_srcL,r_srcR
- p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 11, 0);
- else // mulhdu r_dst,r_srcL,r_srcR
+ else // mulhdu r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 9, 0);
}
} else {
// mul low word, sign is irrelevant
vassert(!i->Pin.MulL.syned);
- if (is32BitOp) // mullw r_dst,r_srcL,r_srcR
+ if (sz32) // mullw r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 235, 0);
- else // mulld r_dst,r_srcL,r_srcR
+ else // mulld r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 233, 0);
}
goto done;
@@ -2698,20 +2772,23 @@
=20
case Pin_Div: {
Bool syned =3D i->Pin.Div.syned;
+ Bool sz32 =3D i->Pin.Div.sz32;
UInt r_dst =3D iregNo(i->Pin.Div.dst, mode64);
UInt r_srcL =3D iregNo(i->Pin.Div.srcL, mode64);
UInt r_srcR =3D iregNo(i->Pin.Div.srcR, mode64);
- Bool is32BitOp =3D toBool(hregClass(i->Pin.Div.dst) =3D=3D HRcInt3=
2);
=20
- if (syned =3D=3D True) {
- if (is32BitOp) // divw r_dst,r_srcL,r_srcR
+ if (!mode64)
+ vassert(sz32);
+
+ if (sz32) {
+ if (syned) // divw r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 491, 0);
- else
+ else // divwu r_dst,r_srcL,r_srcR
+ p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 459, 0);
+ } else {
+ if (syned) // divd r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 489, 0);
- } else {
- if (is32BitOp) // divwu r_dst,r_srcL,r_srcR
- p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 459, 0);
- else
+ else // divdu r_dst,r_srcL,r_srcR
p =3D mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 457, 0);
}
goto done;
Modified: trunk/priv/host-ppc32/hdefs.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-ppc32/hdefs.h 2005-12-16 01:06:42 UTC (rev 1497)
+++ trunk/priv/host-ppc32/hdefs.h 2005-12-16 13:40:18 UTC (rev 1498)
@@ -339,17 +339,29 @@
Palu_INVALID,
Palu_ADD, Palu_SUB,
Palu_AND, Palu_OR, Palu_XOR,
- Palu_SHL, Palu_SHR, Palu_SAR,=20
}
PPC32AluOp;
=20
extern=20
HChar* showPPC32AluOp ( PPC32AluOp,=20
- Bool /* is the 2nd operand an immediate? */,
- Bool /* is this a 32bit or 64bit op? */ );
+ Bool /* is the 2nd operand an immediate? */);
=20
=20
/* --------- */
+typedef=20
+ enum {
+ Pshft_INVALID,
+ Pshft_SHL, Pshft_SHR, Pshft_SAR,=20
+ }
+ PPC32ShftOp;
+
+extern=20
+HChar* showPPC32ShftOp ( PPC32ShftOp,=20
+ Bool /* is the 2nd operand an immediate? */,
+ Bool /* is this a 32bit or 64bit op? */ );
+
+
+/* --------- */
typedef
enum {
Pfp_INVALID,
@@ -427,7 +439,8 @@
typedef
enum {
Pin_LI, /* load word (32/64-bit) immediate (fake insn) */
- Pin_Alu, /* word add/sub/and/or/xor/shl/shr/sar */
+ Pin_Alu, /* word add/sub/and/or/xor */
+ Pin_Shft, /* word shl/shr/sar */
Pin_AddSubC32, /* 32-bit add/sub with read/write carry */
Pin_Cmp, /* word compare */
Pin_Unary, /* not, neg, clz */
@@ -485,7 +498,7 @@
HReg dst;
ULong imm64;
} LI;
- /* Integer add/sub/and/or/xor/shl/shr/sar. Limitations:
+ /* Integer add/sub/and/or/xor. Limitations:
- For add, the immediate, if it exists, is a signed 16.
- For sub, the immediate, if it exists, is a signed 16
which may not be -32768, since no such instruction=20
@@ -493,8 +506,6 @@
that is not possible.
- For and/or/xor, the immediate, if it exists,=20
is an unsigned 16.
- - For shr/shr/sar, the immediate, if it exists,
- is a signed 5-bit value between 1 and 31 inclusive.
*/
struct {
PPC32AluOp op;
@@ -502,6 +513,17 @@
HReg srcL;
PPC32RH* srcR;
} Alu;
+ /* Integer shl/shr/sar.
+ Limitations: the immediate, if it exists,
+ is a signed 5-bit value between 1 and 31 inclusive.
+ */
+ struct {
+ PPC32ShftOp op;
+ Bool sz32; /* mode64 has both 32 and 64bit shft */
+ HReg dst;
+ HReg srcL;
+ PPC32RH* srcR;
+ } Shft;
/* */
struct {
Bool isAdd; /* else sub */
@@ -514,6 +536,7 @@
else it is an unsigned 16. */
struct {
Bool syned;
+ Bool sz32; /* mode64 has both 32 and 64bit cmp */
UInt crfD;
HReg srcL;
PPC32RH* srcR;
@@ -527,6 +550,7 @@
struct {
Bool syned; /* meaningless if hi32=3D=3DFalse */
Bool hi; /* False=3D>low, True=3D>high */
+ Bool sz32; /* mode64 has both 32 & 64bit mull */
HReg dst;
HReg srcL;
HReg srcR;
@@ -534,6 +558,7 @@
/* ppc32 div/divu instruction. */
struct {
Bool syned;
+ Bool sz32; /* mode64 has both 32 & 64bit div */
HReg dst;
HReg srcL;
HReg srcR;
@@ -564,14 +589,14 @@
} CMov;
/* Sign/Zero extending loads. Dst size is always 32 bits. */
struct {
- UChar sz; /* 1|2|4 */
+ UChar sz; /* 1|2|4|8 */
Bool syned;
HReg dst;
PPC32AMode* src;
} Load;
/* 32/16/8 bit stores */
struct {
- UChar sz; /* 1|2|4 */
+ UChar sz; /* 1|2|4|8 */
PPC32AMode* dst;
HReg src;
} Store;
@@ -734,11 +759,12 @@
=20
extern PPC32Instr* PPC32Instr_LI ( HReg, ULong, Bool );
extern PPC32Instr* PPC32Instr_Alu ( PPC32AluOp, HReg, HReg, PPC32=
RH* );
+extern PPC32Instr* PPC32Instr_Shft ( PPC32AluOp, Bool sz32, HReg, =
HReg, PPC32RH* );
extern PPC32Instr* PPC32Instr_AddSubC32 ( Bool, Bool, HReg, HReg, HReg =
);
-extern PPC32Instr* PPC32Instr_Cmp ( Bool, UInt, HReg, PPC32=
RH* );
+extern PPC32Instr* PPC32Instr_Cmp ( Bool, Bool, UInt, HReg, PPC32=
RH* );
extern PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HR=
eg src );
-extern PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32, HReg, =
HReg, HReg );
-extern PPC32Instr* PPC32Instr_Div ( Bool syned, HReg dst, HReg sr=
cL, HReg srcR );
+extern PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32, Bool s=
z32, HReg, HReg, HReg );
+extern PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32, HReg d=
st, HReg srcL, HReg srcR );
extern PPC32Instr* PPC32Instr_Call ( PPC32CondCode, Addr64, UInt )=
;
extern PPC32Instr* PPC32Instr_Goto ( IRJumpKind, PPC32CondCode con=
d, PPC32RI* dst );
extern PPC32Instr* PPC32Instr_CMov ( PPC32CondCode, HReg dst, PPC3=
2RI* src );
Modified: trunk/priv/host-ppc32/isel.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-ppc32/isel.c 2005-12-16 01:06:42 UTC (rev 1497)
+++ trunk/priv/host-ppc32/isel.c 2005-12-16 13:40:18 UTC (rev 1498)
@@ -767,9 +767,9 @@
PPC32Instr_Alu(Palu_AND, r_rmIR, r_rmIR, PPC32RH_Imm(False,3)));
=20
// r_rmPPC32 =3D XOR( r_rmIR, (r_rmIR << 1) & 2)
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_tmp, r_rmIR, PPC32RH_Imm(False,1)));
addInstr(env,=20
- PPC32Instr_Alu(Palu_SHL, r_tmp, r_rmIR, PPC32RH_Imm(False,1)));
- addInstr(env,=20
PPC32Instr_Alu(Palu_AND, r_tmp, r_tmp, PPC32RH_Imm(False,2)));
addInstr(env,=20
PPC32Instr_Alu(Palu_XOR, r_rmPPC32, r_rmIR, PPC32RH_Reg(r_tmp)));
@@ -1021,6 +1021,7 @@
/* --------- BINARY OP --------- */
case Iex_Binop: {
PPC32AluOp aluOp;
+ PPC32ShftOp shftOp;
=20
//.. /* Pattern: Sub32(0,x) */
//.. if (e->Iex.Binop.op =3D=3D Iop_Sub32 && isZero32(e->Iex.Binop=
.arg1)) {
@@ -1043,12 +1044,6 @@
aluOp =3D Palu_OR; break;
case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64:
aluOp =3D Palu_XOR; break;
- case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:
- aluOp =3D Palu_SHL; break;
- case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:
- aluOp =3D Palu_SHR; break;
- case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:
- aluOp =3D Palu_SAR; break;
default:
aluOp =3D Palu_INVALID; break;
}
@@ -1068,49 +1063,86 @@
ri_srcR =3D iselIntExpr_RH(env, False/*signed*/,
e->Iex.Binop.arg2);
break;
- case Palu_SHL: case Palu_SHR: case Palu_SAR:
+ default:
+ vpanic("iselIntExpr_R_wrk-aluOp-arg2");
+ }
+ addInstr(env, PPC32Instr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+ return r_dst;
+ }
+
+ /* a shift? */
+ switch (e->Iex.Binop.op) {
+ case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:
+ shftOp =3D Pshft_SHL; break;
+ case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:
+ shftOp =3D Pshft_SHR; break;
+ case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:
+ shftOp =3D Pshft_SAR; break;
+ default:
+ shftOp =3D Pshft_INVALID; break;
+ }
+ /* we assume any literal values are on the second operand. */
+ if (shftOp !=3D Pshft_INVALID) {
+ HReg r_dst =3D newVRegI(env);
+ HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPC32RH* ri_srcR =3D NULL;
+ /* get right arg into an RH, in the appropriate way */
+ switch (shftOp) {
+ case Pshft_SHL: case Pshft_SHR: case Pshft_SAR:
if (!mode64)
ri_srcR =3D iselIntExpr_RH5u(env, e->Iex.Binop.arg2);
else
ri_srcR =3D iselIntExpr_RH6u(env, e->Iex.Binop.arg2);
break;
default:
- vpanic("iselIntExpr_R_wrk-aluOp-arg2");
+ vpanic("iselIntExpr_R_wrk-shftOp-arg2");
}
/* widen the left arg if needed */
- if ((aluOp =3D=3D Palu_SHR || aluOp =3D=3D Palu_SAR)) {
- if (!mode64 && (ty =3D=3D Ity_I8 || ty =3D=3D Ity_I16)) {
+ if (shftOp =3D=3D Pshft_SHR || shftOp =3D=3D Pshft_SAR) {
+ if (ty =3D=3D Ity_I8 || ty =3D=3D Ity_I16) {
PPC32RH* amt =3D PPC32RH_Imm(False, toUShort(ty =3D=3D It=
y_I8 ? 24 : 16));
HReg tmp =3D newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_SHL, tmp, r_srcL, amt))=
;
- addInstr(env, PPC32Instr_Alu(aluOp, tmp, tmp, amt));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shif=
t*/,
+ tmp, r_srcL, amt));
+ addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shif=
t*/,
+ tmp, tmp, amt));
r_srcL =3D tmp;
vassert(0); /* AWAITING TEST CASE */
}
- if (mode64 && (ty =3D=3D Ity_I8 || ty =3D=3D Ity_I16 || ty =3D=
=3D Ity_I32)) {
- PPC32RH* amt =3D PPC32RH_Imm(False, toUShort(ty =3D=3D It=
y_I8 ? 56 :
- ty =3D=3D Ity_=
I16 ? 48 : 32));
- HReg tmp =3D newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_SHL, tmp, r_srcL, amt))=
;
- addInstr(env, PPC32Instr_Alu(aluOp, tmp, tmp, amt));
- r_srcL =3D tmp;
- }
}
- addInstr(env, PPC32Instr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+ /* Only 64 expressions need 64bit shifts,
+ 32bit shifts are fine for all others */
+ if (ty =3D=3D Ity_I64) {
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Shft(shftOp, False/*64bit shift*/,
+ r_dst, r_srcL, ri_srcR));
+ } else {
+ addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shift*/,
+ r_dst, r_srcL, ri_srcR));
+ }
return r_dst;
}
=20
/* How about a div? */
if (e->Iex.Binop.op =3D=3D Iop_DivS32 ||=20
- e->Iex.Binop.op =3D=3D Iop_DivU32 ||
- e->Iex.Binop.op =3D=3D Iop_DivS64 ||=20
+ e->Iex.Binop.op =3D=3D Iop_DivU32) {
+ Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_DivS32);
+ HReg r_dst =3D newVRegI(env);
+ HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ HReg r_srcR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
+ addInstr(env, PPC32Instr_Div(syned, True/*32bit div*/,
+ r_dst, r_srcL, r_srcR));
+ return r_dst;
+ }
+ if (e->Iex.Binop.op =3D=3D Iop_DivS64 ||=20
e->Iex.Binop.op =3D=3D Iop_DivU64) {
+ Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_DivS64);
HReg r_dst =3D newVRegI(env);
HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
- Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_DivS32 ||
- e->Iex.Binop.op =3D=3D Iop_DivS64);
- addInstr(env, PPC32Instr_Div(syned, r_dst, r_srcL, r_srcR));
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Div(syned, False/*64bit div*/,
+ r_dst, r_srcL, r_srcR));
return r_dst;
}
=20
@@ -1119,25 +1151,61 @@
e->Iex.Binop.op =3D=3D Iop_Mul32 ||
e->Iex.Binop.op =3D=3D Iop_Mul64) {
Bool syned =3D False;
+ Bool sz32 =3D (e->Iex.Binop.op !=3D Iop_Mul64);
HReg r_dst =3D newVRegI(env);
HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_MulL(syned, False/*lo32*/,=20
+ addInstr(env, PPC32Instr_MulL(syned, False/*lo32*/, sz32,
r_dst, r_srcL, r_srcR));
return r_dst;
} =20
=20
+ /* 32 x 32 -> 64 multiply */
+ if (e->Iex.Binop.op =3D=3D Iop_MullU32 ||
+ e->Iex.Binop.op =3D=3D Iop_MullS32) {
+ HReg tLo =3D newVRegI(env);
+ HReg tHi =3D newVRegI(env);
+ HReg r_dst =3D newVRegI(env);
+ Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_MullS32);
+ HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ HReg r_srcR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
+ vassert(mode64);
+ addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,=20
+ False/*lo32*/, True/*32bit mul*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, PPC32Instr_MulL(syned,
+ True/*hi32*/, True/*32bit mul*/,
+ tHi, r_srcL, r_srcR));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, tHi, PPC32RH_Imm(False,32)=
));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, r_dst, r_dst, PPC32RH_Reg=
(tLo)));
+ return r_dst;
+ }
+
/* El-mutanto 3-way compare? */
if (e->Iex.Binop.op =3D=3D Iop_CmpORD32S ||
- e->Iex.Binop.op =3D=3D Iop_CmpORD32U ||
- e->Iex.Binop.op =3D=3D Iop_CmpORD64S ||
+ e->Iex.Binop.op =3D=3D Iop_CmpORD32U) {
+ Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_CmpORD32S)=
;
+ HReg dst =3D newVRegI(env);
+ HReg srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPC32RH* srcR =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2=
);
+ addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/,
+ 7/*cr*/, srcL, srcR));
+ addInstr(env, PPC32Instr_MfCR(dst));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst,
+ PPC32RH_Imm(False,7<<1)));
+ return dst;
+ }
+
+ if (e->Iex.Binop.op =3D=3D Iop_CmpORD64S ||
e->Iex.Binop.op =3D=3D Iop_CmpORD64U) {
- Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_CmpORD32S =
||
- e->Iex.Binop.op =3D=3D Iop_CmpORD64S);
+ Bool syned =3D toBool(e->Iex.Binop.op =3D=3D Iop_CmpORD64S)=
;
HReg dst =3D newVRegI(env);
HReg srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
PPC32RH* srcR =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2=
);
- addInstr(env, PPC32Instr_Cmp(syned, /*cr*/7, srcL, srcR));
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/,
+ 7/*cr*/, srcL, srcR));
addInstr(env, PPC32Instr_MfCR(dst));
addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst,
PPC32RH_Imm(False,7<<1)));
@@ -1217,18 +1285,22 @@
*/
=20
// r_ccIR_b0 =3D r_ccPPC32[0] | r_ccPPC32[3]
- addInstr(env, PPC32Instr_Alu(Palu_SHR, r_ccIR_b0, r_ccPPC32, PP=
C32RH_Imm(False,0x3)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_ccIR_b0, r_ccPPC32, PPC32RH_Imm=
(False,0x3)));
addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b0, r_ccPPC32, PP=
C32RH_Reg(r_ccIR_b0)));
addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b0, r_ccIR_b0, PP=
C32RH_Imm(False,0x1)));
=20
// r_ccIR_b2 =3D r_ccPPC32[0]
- addInstr(env, PPC32Instr_Alu(Palu_SHL, r_ccIR_b2, r_ccPPC32, PP=
C32RH_Imm(False,0x2)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_ccIR_b2, r_ccPPC32, PPC32RH_Imm=
(False,0x2)));
addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b2, r_ccIR_b2, PP=
C32RH_Imm(False,0x4)));
=20
// r_ccIR_b6 =3D r_ccPPC32[0] | r_ccPPC32[1]
- addInstr(env, PPC32Instr_Alu(Palu_SHR, r_ccIR_b6, r_ccPPC32, PP=
C32RH_Imm(False,0x1)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_ccIR_b6, r_ccPPC32, PPC32RH_Imm=
(False,0x1)));
addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b6, r_ccPPC32, PP=
C32RH_Reg(r_ccIR_b6)));
- addInstr(env, PPC32Instr_Alu(Palu_SHL, r_ccIR_b6, r_ccIR_b6, PP=
C32RH_Imm(False,0x6)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm=
(False,0x6)));
addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b6, r_ccIR_b6, PP=
C32RH_Imm(False,0x40)));
=20
// r_ccIR =3D r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6
@@ -1322,27 +1394,36 @@
HReg r_dst =3D newVRegI(env);
HReg r_src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
- addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_src,=20
- PPC32RH_Imm(False,32))=
);
- addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_dst,=20
- PPC32RH_Imm(False,32))=
);
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, r_src, PPC32RH_Imm(False,3=
2)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*/,
+ r_dst, r_dst, PPC32RH_Imm(False,3=
2)));
return r_dst;
}
case Iop_8Sto16:
case Iop_8Sto32:
- case Iop_16Sto32:
+ case Iop_16Sto32: {
+ HReg r_dst =3D newVRegI(env);
+ HReg r_src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
+ UShort amt =3D toUShort(e->Iex.Unop.op=3D=3DIop_16Sto32 ? 16 =
: 24);
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_dst, r_src, PPC32RH_Imm(False,a=
mt)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
+ r_dst, r_dst, PPC32RH_Imm(False,a=
mt)));
+ return r_dst;
+ }
+ case Iop_8Sto64:
case Iop_16Sto64:
case Iop_32Sto64: {
HReg r_dst =3D newVRegI(env);
HReg r_src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort amt =3D toUShort(e->Iex.Unop.op=3D=3DIop_16Sto64 ? 48 =
:
- e->Iex.Unop.op=3D=3DIop_32Sto64 ? 32 :
- e->Iex.Unop.op=3D=3DIop_16Sto32 ? 16 : =
24);
- vassert(amt<32 || mode64);
- addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_src,=20
- PPC32RH_Imm(False,amt)=
));
- addInstr(env, PPC32Instr_Alu(Palu_SAR, r_dst, r_dst,=20
- PPC32RH_Imm(False,amt)=
));
+ UShort amt =3D toUShort(e->Iex.Unop.op=3D=3DIop_8Sto64 ? 56 =
:
+ e->Iex.Unop.op=3D=3DIop_16Sto64 ? 48 : =
32);
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, r_src, PPC32RH_Imm(False,a=
mt)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SAR, False/*64bit shift*/,
+ r_dst, r_dst, PPC32RH_Imm(False,a=
mt)));
return r_dst;
}
case Iop_Not8:
@@ -1362,8 +1443,8 @@
} else {
HReg r_dst =3D newVRegI(env);
HReg r_src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_src,=20
- PPC32RH_Imm(False,32)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*=
/,
+ r_dst, r_src, PPC32RH_Imm(False=
,32)));
return r_dst;
}
}
@@ -1421,10 +1502,16 @@
HReg r_dst =3D newVRegI(env);
HReg r_src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
UShort shift =3D toUShort(e->Iex.Unop.op =3D=3D Iop_16HIto8 ? 8=
: 16);
- addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_src,=20
- PPC32RH_Imm(False,shift)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_dst, r_src, PPC32RH_Imm(False,s=
hift)));
return r_dst;
}
+ case Iop_128HIto64: {
+ HReg rHi, rLo;
+ vassert(mode64);
+ iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+ return rHi; /* and abandon rLo .. poor wee thing :-) */
+ }
case Iop_128to64: {
vassert(mode64);
HReg rHi, rLo;
@@ -1445,10 +1532,10 @@
HReg r_dst =3D newVRegI(env);
PPC32CondCode cond =3D iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPC32Instr_Set32(cond,r_dst));
- addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_dst,=20
- PPC32RH_Imm(False,31))=
);
- addInstr(env, PPC32Instr_Alu(Palu_SAR, r_dst, r_dst,=20
- PPC32RH_Imm(False,31))=
);
+ addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_dst, r_dst, PPC32RH_Imm(False,3=
1)));
+ addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
+ r_dst, r_dst, PPC32RH_Imm(False,3=
1)));
return r_dst;
}
=20
@@ -1584,7 +1671,8 @@
HReg r_tmp =3D newVRegI(env);
addInstr(env, mk_iMOVds_RR(r_dst,rX));
addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_I=
mm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/, r_tmp,=
PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp=
*/,
+ 7/*cr*/, r_tmp, PPC32RH_Imm(False,=
0)));
addInstr(env, PPC32Instr_CMov(cc,r_dst,r0));
return r_dst;
}
@@ -1917,8 +2005,8 @@
// Make a compare that will always be true:
HReg r_zero =3D newVRegI(env);
addInstr(env, PPC32Instr_LI(r_zero, 0, mode64));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, /*cr*/7,=20
- r_zero, PPC32RH_Reg(r_zero)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r_zero, PPC32RH_Reg(r_zero))=
);
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
=20
@@ -1949,15 +2037,14 @@
//.. }
=20
/* 32to1 */
- if (e->tag =3D=3D Iex_Unop && e->Iex.Unop.op =3D=3D Iop_32to1) {
+ if (e->tag =3D=3D Iex_Unop &&
+ (e->Iex.Unop.op =3D=3D Iop_32to1 || e->Iex.Unop.op =3D=3D Iop_64t=
o1)) {
HReg src =3D iselIntExpr_R(env, e->Iex.Unop.arg);
HReg tmp =3D newVRegI(env);
/* could do better, probably -- andi. */
- addInstr(env, PPC32Instr_Alu(
- Palu_AND, tmp, src, PPC32RH_Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp(
- False/*unsigned*/, 7/*cr*/,=20
- tmp, PPC32RH_Imm(False,1)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, tmp, src, PPC32RH_Imm(False=
,1)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, tmp, PPC32RH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
=20
@@ -1969,9 +2056,10 @@
&& e->Iex.Unop.op =3D=3D Iop_CmpNEZ8) {
HReg r_32 =3D iselIntExpr_R(env, e->Iex.Unop.arg);
HReg r_l =3D newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_l, r_32, PPC32RH_Imm(Fals=
e,0xFF)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/,=20
- r_l, PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_l, r_32,
+ PPC32RH_Imm(False,0xFF)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r_l, PPC32RH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
=20
@@ -1981,7 +2069,8 @@
if (e->tag =3D=3D Iex_Unop
&& e->Iex.Unop.op =3D=3D Iop_CmpNEZ32) {
HReg r1 =3D iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7, r1, PPC32RH_Imm=
(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r1, PPC32RH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
=20
@@ -2040,15 +2129,12 @@
|| e->Iex.Binop.op =3D=3D Iop_CmpLT32U
|| e->Iex.Binop.op =3D=3D Iop_CmpLE32S
|| e->Iex.Binop.op =3D=3D Iop_CmpLE32U)) {
- PPC32RH* ri2;
- HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
- Bool syned =3D False;
- if (e->Iex.Binop.op =3D=3D Iop_CmpLT32S ||
- e->Iex.Binop.op =3D=3D Iop_CmpLE32S) {
- syned =3D True;
- }
- ri2 =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp(syned,7,r1,ri2));
+ Bool syned =3D (e->Iex.Binop.op =3D=3D Iop_CmpLT32S ||
+ e->Iex.Binop.op =3D=3D Iop_CmpLE32S);
+ HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPC32RH* ri2 =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/,
+ 7/*cr*/, r1, ri2));
=20
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
@@ -2069,15 +2155,13 @@
|| e->Iex.Binop.op =3D=3D Iop_CmpLT64U
|| e->Iex.Binop.op =3D=3D Iop_CmpLE64S
|| e->Iex.Binop.op =3D=3D Iop_CmpLE64U)) {
- PPC32RH* ri2;
- HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
- Bool syned =3D False;
- if (e->Iex.Binop.op =3D=3D Iop_CmpLT64S ||
- e->Iex.Binop.op =3D=3D Iop_CmpLE64S) {
- syned =3D True;
- }
- ri2 =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp(syned,7,r1,ri2));
+ Bool syned =3D (e->Iex.Binop.op =3D=3D Iop_CmpLT64S ||
+ e->Iex.Binop.op =3D=3D Iop_CmpLE64S);
+ HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPC32RH* ri2 =3D iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/,
+ 7/*cr*/, r1, ri2));
=20
switch (e->Iex.Binop.op) {
case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
@@ -2147,11 +2231,13 @@
iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
addInstr(env, mk_iMOVds_RR(tmp, lo));
addInstr(env, PPC32Instr_Alu(Palu_OR, tmp, tmp, PPC32RH_Reg(hi)=
));
- addInstr(env, PPC32Instr_Cmp(False/*sign*/,7/*cr*/,tmp,PPC32RH_=
Imm(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*sign*/, True/*32bit cmp*/,
+ 7/*cr*/, tmp,PPC32RH_Imm(False,0))=
);
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
} else { // mode64
HReg r_src =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
- addInstr(env, PPC32Instr_Cmp(False/*sign*/,7/*cr*/,r_src,PPC32R=
H_Imm(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*sign*/, False/*64bit cmp*/,
+ 7/*cr*/, r_src,PPC32RH_Imm(False,0=
)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
}
@@ -2161,7 +2247,8 @@
HReg r_src =3D lookupIRTemp(env, e->Iex.Tmp.tmp);
HReg src_masked =3D newVRegI(env);
addInstr(env, PPC32Instr_Alu(Palu_AND, src_masked, r_src, PPC32RH_=
Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/, src_maske=
d, PPC32RH_Imm(False,1)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, src_masked, PPC32RH_Imm(Fals=
e,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
=20
@@ -2217,9 +2304,11 @@
HReg r_srcL =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR =3D iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,=20
- False/*lo64*/, tLo, r_srcL, r_src=
R));
+ False/*lo64*/, False/*64bit mul*/=
,
+ tLo, r_srcL, r_srcR));
addInstr(env, PPC32Instr_MulL(syned,
- True/*hi64*/, tHi, r_srcL, r_srcR=
));
+ True/*hi64*/, False/*64bit mul*/,
+ tHi, r_srcL, r_srcR));
*rHi =3D tHi;
*rLo =3D tLo;
return;
@@ -2357,7 +2446,7 @@
=20
addInstr(env, PPC32Instr_Alu(Palu_AND,=20
r_tmp, r_cond, PPC32RH_Imm(False,0xFF=
)));
- addInstr(env, PPC32Instr...
[truncated message content] |