Author: sewardj
Date: 2009-06-02 09:18:56 +0100 (Tue, 02 Jun 2009)
New Revision: 1898
Log:
As part of changes to support atomic instructions directly in
Valgrind, add support at the IR level for linked loads and store
conditionals, a la the lwarx/stwcx etc insns in ppc, but abstractified
suitably.
IRExpr_Load gets a new Bool field, indicating whether it's a normal
load or a load-linked (reservation-setting).
IRStmt_Store gets a new IRTemp field. If this is IRTemp_INVALID, this
is a normal store. If it's not IRTemp_INVALID, this is a
store-conditional, and the success/failure bit resulting from the
store is written to the new IRTemp.
There are restrictions on alignment of addresses in LL and SC IR
loads. See libvex_ir.h for details.
Other small IR defn changes:
* IRMBusEvent loses Imbe_BusLock, Imbe_BusUnlock,
Imbe_SnoopedStoreBegin, Imbe_SnoopedStoreEnd. These were all
semantic kludges and can now be removed.
* ppc32 and ppc64 guest state loses the guest_RESVN field. This was
part of a kludge to fake up the behaviour of st{w,d}cx. enough to
make threaded code work. It is no longer necessary.
The rest of these changes just pushes these through the compilation
pipeline in the normal way. One minor notable point is that iropt
considers a linked load as inhibiting tree-building, so as to
guarantee that it will not reorder linked-loads w.r.t. any other
loads.
Modified:
branches/DCAS/priv/guest-amd64/toIR.c
branches/DCAS/priv/guest-arm/toIR.c
branches/DCAS/priv/guest-ppc/ghelpers.c
branches/DCAS/priv/guest-ppc/toIR.c
branches/DCAS/priv/guest-x86/toIR.c
branches/DCAS/priv/host-amd64/isel.c
branches/DCAS/priv/host-arm/isel.c
branches/DCAS/priv/host-ppc/hdefs.c
branches/DCAS/priv/host-ppc/hdefs.h
branches/DCAS/priv/host-ppc/isel.c
branches/DCAS/priv/host-x86/isel.c
branches/DCAS/priv/ir/irdefs.c
branches/DCAS/priv/ir/irmatch.c
branches/DCAS/priv/ir/iropt.c
branches/DCAS/pub/libvex_guest_ppc32.h
branches/DCAS/pub/libvex_guest_ppc64.h
branches/DCAS/pub/libvex_ir.h
branches/DCAS/pub/libvex_trc_values.h
Modified: branches/DCAS/priv/guest-amd64/toIR.c
===================================================================
--- branches/DCAS/priv/guest-amd64/toIR.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/guest-amd64/toIR.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -305,12 +305,12 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE,addr,data) );
+ stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
}
static IRExpr* loadLE ( IRType ty, IRExpr* data )
{
- return IRExpr_Load(Iend_LE,ty,data);
+ return IRExpr_Load(False, Iend_LE, ty, data);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
@@ -8825,9 +8825,6 @@
/* pfx holds the summary of prefixes. */
Prefix pfx = PFX_EMPTY;
- /* do we need follow the insn with MBusEvent(BusUnlock) ? */
- Bool unlock_bus_after_insn = False;
-
/* Set result defaults. */
dres.whatNext = Dis_Continue;
dres.len = 0;
@@ -8975,8 +8972,6 @@
if (pfx & PFX_LOCK) {
if (can_be_used_with_LOCK_prefix( (UChar*)&guest_code[delta] )) {
- stmt( IRStmt_MBE(Imbe_BusLock) );
- unlock_bus_after_insn = True;
DIP("lock ");
} else {
*expect_CAS = False;
@@ -15012,18 +15007,6 @@
nameIRegE(sz, pfx, modrm));
} else {
*expect_CAS = True;
- /* Need to add IRStmt_MBE(Imbe_BusLock). */
- if (pfx & PFX_LOCK) {
- /* check it's already been taken care of */
- vassert(unlock_bus_after_insn);
- } else {
- vassert(!unlock_bus_after_insn);
- stmt( IRStmt_MBE(Imbe_BusLock) );
- unlock_bus_after_insn = True;
- }
- /* Because unlock_bus_after_insn is now True, generic logic
- at the bottom of disInstr will add the
- IRStmt_MBE(Imbe_BusUnlock). */
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
assign( t1, loadLE(ty, mkexpr(addr)) );
assign( t2, getIRegG(sz, pfx, modrm) );
@@ -16061,8 +16044,6 @@
insn, but nevertheless be paranoid and update it again right
now. */
stmt( IRStmt_Put( OFFB_RIP, mkU64(guest_RIP_curr_instr) ) );
- if (unlock_bus_after_insn)
- stmt( IRStmt_MBE(Imbe_BusUnlock) );
jmp_lit(Ijk_NoDecode, guest_RIP_curr_instr);
dres.whatNext = Dis_StopHere;
dres.len = 0;
@@ -16079,8 +16060,6 @@
decode_success:
/* All decode successes end up here. */
DIP("\n");
- if (unlock_bus_after_insn)
- stmt( IRStmt_MBE(Imbe_BusUnlock) );
dres.len = (Int)toUInt(delta - delta_start);
return dres;
}
Modified: branches/DCAS/priv/guest-arm/toIR.c
===================================================================
--- branches/DCAS/priv/guest-arm/toIR.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/guest-arm/toIR.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -495,7 +495,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE,addr,data) );
+ stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -545,7 +545,7 @@
static IRExpr* loadLE ( IRType ty, IRExpr* data )
{
- return IRExpr_Load(Iend_LE,ty,data);
+ return IRExpr_Load(False, Iend_LE, ty, data);
}
#if 0
Modified: branches/DCAS/priv/guest-ppc/ghelpers.c
===================================================================
--- branches/DCAS/priv/guest-ppc/ghelpers.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/guest-ppc/ghelpers.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -477,8 +477,6 @@
vex_state->guest_EMWARN = EmWarn_NONE;
- vex_state->guest_RESVN = 0;
-
vex_state->guest_TISTART = 0;
vex_state->guest_TILEN = 0;
@@ -636,7 +634,7 @@
vex_state->guest_EMWARN = EmWarn_NONE;
- vex_state->guest_RESVN = 0;
+ vex_state->padding = 0;
vex_state->guest_TISTART = 0;
vex_state->guest_TILEN = 0;
@@ -650,6 +648,8 @@
vex_state->guest_IP_AT_SYSCALL = 0;
vex_state->guest_SPRG3_RO = 0;
+
+ vex_state->padding2 = 0;
}
@@ -767,7 +767,7 @@
/* Describe any sections to be regarded by Memcheck as
'always-defined'. */
- .n_alwaysDefd = 12,
+ .n_alwaysDefd = 11,
.alwaysDefd
= { /* 0 */ ALWAYSDEFD32(guest_CIA),
@@ -776,12 +776,11 @@
/* 3 */ ALWAYSDEFD32(guest_TILEN),
/* 4 */ ALWAYSDEFD32(guest_VSCR),
/* 5 */ ALWAYSDEFD32(guest_FPROUND),
- /* 6 */ ALWAYSDEFD32(guest_RESVN),
- /* 7 */ ALWAYSDEFD32(guest_NRADDR),
- /* 8 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
- /* 9 */ ALWAYSDEFD32(guest_REDIR_SP),
- /* 10 */ ALWAYSDEFD32(guest_REDIR_STACK),
- /* 11 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL)
+ /* 6 */ ALWAYSDEFD32(guest_NRADDR),
+ /* 7 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
+ /* 8 */ ALWAYSDEFD32(guest_REDIR_SP),
+ /* 9 */ ALWAYSDEFD32(guest_REDIR_STACK),
+ /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL)
}
};
@@ -818,12 +817,11 @@
/* 3 */ ALWAYSDEFD64(guest_TILEN),
/* 4 */ ALWAYSDEFD64(guest_VSCR),
/* 5 */ ALWAYSDEFD64(guest_FPROUND),
- /* 6 */ ALWAYSDEFD64(guest_RESVN),
- /* 7 */ ALWAYSDEFD64(guest_NRADDR),
- /* 8 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
- /* 9 */ ALWAYSDEFD64(guest_REDIR_SP),
- /* 10 */ ALWAYSDEFD64(guest_REDIR_STACK),
- /* 11 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL)
+ /* 6 */ ALWAYSDEFD64(guest_NRADDR),
+ /* 7 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
+ /* 8 */ ALWAYSDEFD64(guest_REDIR_SP),
+ /* 9 */ ALWAYSDEFD64(guest_REDIR_STACK),
+ /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL)
}
};
Modified: branches/DCAS/priv/guest-ppc/toIR.c
===================================================================
--- branches/DCAS/priv/guest-ppc/toIR.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/guest-ppc/toIR.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -232,7 +232,6 @@
#define OFFB_EMWARN offsetofPPCGuestState(guest_EMWARN)
#define OFFB_TISTART offsetofPPCGuestState(guest_TISTART)
#define OFFB_TILEN offsetofPPCGuestState(guest_TILEN)
-#define OFFB_RESVN offsetofPPCGuestState(guest_RESVN)
#define OFFB_NRADDR offsetofPPCGuestState(guest_NRADDR)
#define OFFB_NRADDR_GPR2 offsetofPPCGuestState(guest_NRADDR_GPR2)
@@ -326,7 +325,6 @@
PPC_GST_EMWARN, // Emulation warnings
PPC_GST_TISTART,// For icbi: start of area to invalidate
PPC_GST_TILEN, // For icbi: length of area to invalidate
- PPC_GST_RESVN, // For lwarx/stwcx.
PPC_GST_IP_AT_SYSCALL, // the CIA of the most recently executed SC insn
PPC_GST_SPRG3_RO, // SPRG3
PPC_GST_MAX
@@ -464,11 +462,12 @@
stmt( IRStmt_WrTmp(dst, e) );
}
+/* This generates a normal (non store-conditional) store. */
static void storeBE ( IRExpr* addr, IRExpr* data )
{
- vassert(typeOfIRExpr(irsb->tyenv, addr) == Ity_I32 ||
- typeOfIRExpr(irsb->tyenv, addr) == Ity_I64);
- stmt( IRStmt_Store(Iend_BE,addr,data) );
+ IRType tyA = typeOfIRExpr(irsb->tyenv, addr);
+ vassert(tyA == Ity_I32 || tyA == Ity_I64);
+ stmt( IRStmt_Store(Iend_BE, IRTemp_INVALID, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -517,11 +516,23 @@
return IRExpr_Const(IRConst_U64(i));
}
+/* This generates a normal (non load-linked) load. */
static IRExpr* loadBE ( IRType ty, IRExpr* data )
{
- return IRExpr_Load(Iend_BE,ty,data);
+ return IRExpr_Load(False, Iend_BE, ty, data);
}
+/* And this, a linked load. */
+static IRExpr* loadlinkedBE ( IRType ty, IRExpr* data )
+{
+ if (mode64) {
+ vassert(ty == Ity_I32 || ty == Ity_I64);
+ } else {
+ vassert(ty == Ity_I32);
+ }
+ return IRExpr_Load(True, Iend_BE, ty, data);
+}
+
static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 )
{
vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
@@ -832,26 +843,26 @@
}
/* IR narrows I32/I64 -> I8/I16/I32 */
-static IRExpr* mkSzNarrow8 ( IRType ty, IRExpr* src )
+static IRExpr* mkNarrowTo8 ( IRType ty, IRExpr* src )
{
vassert(ty == Ity_I32 || ty == Ity_I64);
return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src);
}
-static IRExpr* mkSzNarrow16 ( IRType ty, IRExpr* src )
+static IRExpr* mkNarrowTo16 ( IRType ty, IRExpr* src )
{
vassert(ty == Ity_I32 || ty == Ity_I64);
return ty == Ity_I64 ? unop(Iop_64to16, src) : unop(Iop_32to16, src);
}
-static IRExpr* mkSzNarrow32 ( IRType ty, IRExpr* src )
+static IRExpr* mkNarrowTo32 ( IRType ty, IRExpr* src )
{
vassert(ty == Ity_I32 || ty == Ity_I64);
return ty == Ity_I64 ? unop(Iop_64to32, src) : src;
}
/* Signed/Unsigned IR widens I8/I16/I32 -> I32/I64 */
-static IRExpr* mkSzWiden8 ( IRType ty, IRExpr* src, Bool sined )
+static IRExpr* mkWidenFrom8 ( IRType ty, IRExpr* src, Bool sined )
{
IROp op;
vassert(ty == Ity_I32 || ty == Ity_I64);
@@ -860,7 +871,7 @@
return unop(op, src);
}
-static IRExpr* mkSzWiden16 ( IRType ty, IRExpr* src, Bool sined )
+static IRExpr* mkWidenFrom16 ( IRType ty, IRExpr* src, Bool sined )
{
IROp op;
vassert(ty == Ity_I32 || ty == Ity_I64);
@@ -869,7 +880,7 @@
return unop(op, src);
}
-static IRExpr* mkSzWiden32 ( IRType ty, IRExpr* src, Bool sined )
+static IRExpr* mkWidenFrom32 ( IRType ty, IRExpr* src, Bool sined )
{
vassert(ty == Ity_I32 || ty == Ity_I64);
if (ty == Ity_I32)
@@ -1113,30 +1124,6 @@
/* non-zero rotate */ rot );
}
-#if 0
-/* ROTL32_64(src64, rot_amt5)
- Weirdo 32bit rotl on ppc64:
- rot32 = ROTL(src_lo32,y);
- return (rot32|rot32);
-*/
-static IRExpr* /* :: Ity_I64 */ ROTL32_64 ( IRExpr* src64,
- IRExpr* rot_amt )
-{
- IRExpr *mask, *rot32;
- vassert(mode64); // used only in 64bit mode
- vassert(typeOfIRExpr(irsb->tyenv,src64) == Ity_I64);
- vassert(typeOfIRExpr(irsb->tyenv,rot_amt) == Ity_I8);
-
- mask = binop(Iop_And8, rot_amt, mkU8(31));
- rot32 = ROTL( unop(Iop_64to32, src64), rot_amt );
-
- return binop(Iop_Or64,
- binop(Iop_Shl64, unop(Iop_32Uto64, rot32), mkU8(32)),
- unop(Iop_32Uto64, rot32));
-}
-#endif
-
-
/* Standard effective address calc: (rA + rB) */
static IRExpr* ea_rA_idxd ( UInt rA, UInt rB )
{
@@ -1208,6 +1195,38 @@
}
+/* Exit the trace if ADDR (intended to be a guest memory address) is
+ not ALIGN-aligned, generating a request for a SIGBUS followed by a
+ restart of the current insn. */
+static void gen_SIGBUS_if_misaligned ( IRTemp addr, UChar align )
+{
+ vassert(align == 4 || align == 8);
+ if (mode64) {
+ vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I64);
+ stmt(
+ IRStmt_Exit(
+ binop(Iop_CmpNE64,
+ binop(Iop_And64, mkexpr(addr), mkU64(align-1)),
+ mkU64(0)),
+ Ijk_SigBUS,
+ IRConst_U64( guest_CIA_curr_instr )
+ )
+ );
+ } else {
+ vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I32);
+ stmt(
+ IRStmt_Exit(
+ binop(Iop_CmpNE32,
+ binop(Iop_And32, mkexpr(addr), mkU32(align-1)),
+ mkU32(0)),
+ Ijk_SigBUS,
+ IRConst_U32( guest_CIA_curr_instr )
+ )
+ );
+ }
+}
+
+
/* Generate AbiHints which mark points at which the ELF or PowerOpen
ABIs say that the stack red zone (viz, -N(r1) .. -1(r1), for some
N) becomes undefined. That is at function calls and returns. ELF
@@ -2125,9 +2144,6 @@
binop( Iop_Shl32, getXER_CA32(), mkU8(29)),
getXER_BC32()));
- case PPC_GST_RESVN:
- return IRExpr_Get( OFFB_RESVN, ty);
-
default:
vex_printf("getGST(ppc): reg = %u", reg);
vpanic("getGST(ppc)");
@@ -2257,11 +2273,6 @@
stmt( IRStmt_Put( OFFB_TILEN, src) );
break;
- case PPC_GST_RESVN:
- vassert( ty_src == ty );
- stmt( IRStmt_Put( OFFB_RESVN, src) );
- break;
-
default:
vex_printf("putGST(ppc): reg = %u", reg);
vpanic("putGST(ppc)");
@@ -2495,7 +2506,7 @@
flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rA + rB + XER[CA]
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
binop( mkSzOp(ty, Iop_Add8),
mkexpr(rB), mkexpr(old_xer_ca))) );
@@ -2521,7 +2532,7 @@
rD_addr, rA_addr, rB_addr);
// rD = rA + (-1) + XER[CA]
// => Just another form of adde
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
min_one = mkSzImm(ty, (Long)-1);
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
binop( mkSzOp(ty, Iop_Add8),
@@ -2547,7 +2558,7 @@
rD_addr, rA_addr, rB_addr);
// rD = rA + (0) + XER[CA]
// => Just another form of adde
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8),
mkexpr(rA), mkexpr(old_xer_ca)) );
set_XER_CA( ty, PPCG_FLAG_OP_ADDE,
@@ -2744,7 +2755,7 @@
flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = (log not)rA + rB + XER[CA]
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8),
unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
binop( mkSzOp(ty, Iop_Add8),
@@ -2771,7 +2782,7 @@
rD_addr, rA_addr);
// rD = (log not)rA + (-1) + XER[CA]
// => Just another form of subfe
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
min_one = mkSzImm(ty, (Long)-1);
assign( rD, binop( mkSzOp(ty, Iop_Add8),
unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
@@ -2798,7 +2809,7 @@
rD_addr, rA_addr);
// rD = (log not)rA + (0) + XER[CA]
// => Just another form of subfe
- assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
+ assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8),
unop( mkSzOp(ty, Iop_Not8),
mkexpr(rA)), mkexpr(old_xer_ca)) );
@@ -2936,8 +2947,8 @@
if (flag_L == 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
} else {
- a = mkSzNarrow32( ty, a );
- b = mkSzNarrow32( ty, b );
+ a = mkNarrowTo32( ty, a );
+ b = mkNarrowTo32( ty, b );
putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32S, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -2949,8 +2960,8 @@
if (flag_L == 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
} else {
- a = mkSzNarrow32( ty, a );
- b = mkSzNarrow32( ty, b );
+ a = mkNarrowTo32( ty, a );
+ b = mkNarrowTo32( ty, b );
putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -2977,8 +2988,8 @@
if (flag_L == 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
} else {
- a = mkSzNarrow32( ty, a );
- b = mkSzNarrow32( ty, b );
+ a = mkNarrowTo32( ty, a );
+ b = mkNarrowTo32( ty, b );
putCR321(crfD, unop(Iop_32to8,binop(Iop_CmpORD32S, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -2996,8 +3007,8 @@
if (flag_L == 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
} else {
- a = mkSzNarrow32( ty, a );
- b = mkSzNarrow32( ty, b );
+ a = mkNarrowTo32( ty, a );
+ b = mkNarrowTo32( ty, b );
putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
}
putCR0( crfD, getXER_SO() );
@@ -3117,7 +3128,7 @@
// Iop_Clz32 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE32, lo32, mkU32(0));
- assign(rA, mkSzWiden32(ty,
+ assign(rA, mkWidenFrom32(ty,
IRExpr_Mux0X( unop(Iop_1Uto8, irx),
mkU32(32),
unop(Iop_Clz32, lo32)),
@@ -3538,7 +3549,7 @@
case 0x22: // lbz (Load B & Zero, PPC32 p433)
DIP("lbz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I8, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden8(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
break;
case 0x23: // lbzu (Load B & Zero, Update, PPC32 p434)
@@ -3548,14 +3559,14 @@
}
DIP("lbzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I8, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden8(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x2A: // lha (Load HW Alg, PPC32 p445)
DIP("lha r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, True) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
break;
case 0x2B: // lhau (Load HW Alg, Update, PPC32 p446)
@@ -3565,14 +3576,14 @@
}
DIP("lhau r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, True) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x28: // lhz (Load HW & Zero, PPC32 p450)
DIP("lhz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
break;
case 0x29: // lhzu (Load HW & and Zero, Update, PPC32 p451)
@@ -3582,14 +3593,14 @@
}
DIP("lhzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x20: // lwz (Load W & Zero, PPC32 p460)
DIP("lwz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I32, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden32(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
break;
case 0x21: // lwzu (Load W & Zero, Update, PPC32 p461))
@@ -3599,7 +3610,7 @@
}
DIP("lwzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I32, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden32(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
@@ -3618,14 +3629,14 @@
return False;
}
val = loadBE(Ity_I8, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden8(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x057: // lbzx (Load B & Zero, Indexed, PPC32 p436)
DIP("lbzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I8, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden8(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
break;
case 0x177: // lhaux (Load HW Alg, Update Indexed, PPC32 p447)
@@ -3635,14 +3646,14 @@
}
DIP("lhaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, True) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x157: // lhax (Load HW Alg, Indexed, PPC32 p448)
DIP("lhax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, True) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
break;
case 0x137: // lhzux (Load HW & Zero, Update Indexed, PPC32 p452)
@@ -3652,14 +3663,14 @@
}
DIP("lhzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x117: // lhzx (Load HW & Zero, Indexed, PPC32 p453)
DIP("lhzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden16(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
break;
case 0x037: // lwzux (Load W & Zero, Update Indexed, PPC32 p462)
@@ -3669,14 +3680,14 @@
}
DIP("lwzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I32, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden32(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
putIReg( rA_addr, mkexpr(EA) );
break;
case 0x017: // lwzx (Load W & Zero, Indexed, PPC32 p463)
DIP("lwzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I32, mkexpr(EA));
- putIReg( rD_addr, mkSzWiden32(ty, val, False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
break;
@@ -3798,7 +3809,7 @@
switch (opc1) {
case 0x26: // stb (Store B, PPC32 p509)
DIP("stb r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
- storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
break;
case 0x27: // stbu (Store B, Update, PPC32 p510)
@@ -3808,12 +3819,12 @@
}
DIP("stbu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
break;
case 0x2C: // sth (Store HW, PPC32 p522)
DIP("sth r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
- storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
break;
case 0x2D: // sthu (Store HW, Update, PPC32 p524)
@@ -3823,12 +3834,12 @@
}
DIP("sthu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
break;
case 0x24: // stw (Store W, PPC32 p530)
DIP("stw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
- storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
break;
case 0x25: // stwu (Store W, Update, PPC32 p534)
@@ -3838,7 +3849,7 @@
}
DIP("stwu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
break;
/* X Form : all these use EA_indexed */
@@ -3856,12 +3867,12 @@
}
DIP("stbux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
break;
case 0x0D7: // stbx (Store B Indexed, PPC32 p512)
DIP("stbx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
break;
case 0x1B7: // sthux (Store HW, Update Indexed, PPC32 p525)
@@ -3871,12 +3882,12 @@
}
DIP("sthux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
break;
case 0x197: // sthx (Store HW Indexed, PPC32 p526)
DIP("sthx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
break;
case 0x0B7: // stwux (Store W, Update Indexed, PPC32 p535)
@@ -3886,12 +3897,12 @@
}
DIP("stwux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
- storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
break;
case 0x097: // stwx (Store W Indexed, PPC32 p536)
DIP("stwx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
+ storeBE( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
break;
@@ -3977,8 +3988,8 @@
DIP("lmw r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
for (r = rD_addr; r <= 31; r++) {
irx_addr = binop(Iop_Add32, mkexpr(EA), mkU32(ea_off));
- putIReg( r, mkSzWiden32(ty, loadBE(Ity_I32, irx_addr ),
- False) );
+ putIReg( r, mkWidenFrom32(ty, loadBE(Ity_I32, irx_addr ),
+ False) );
ea_off += 4;
}
break;
@@ -3987,7 +3998,7 @@
DIP("stmw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
for (r = rS_addr; r <= 31; r++) {
irx_addr = binop(Iop_Add32, mkexpr(EA), mkU32(ea_off));
- storeBE( irx_addr, mkSzNarrow32(ty, getIReg(r)) );
+ storeBE( irx_addr, mkNarrowTo32(ty, getIReg(r)) );
ea_off += 4;
}
break;
@@ -4033,11 +4044,11 @@
vassert(shift == 0 || shift == 8 || shift == 16 || shift == 24);
putIReg(
rD,
- mkSzWiden32(
+ mkWidenFrom32(
ty,
binop(
Iop_Or32,
- mkSzNarrow32(ty, getIReg(rD)),
+ mkNarrowTo32(ty, getIReg(rD)),
binop(
Iop_Shl32,
unop(
@@ -4085,7 +4096,7 @@
binop(mkSzOp(ty,Iop_Add8), e_EA, mkSzImm(ty,i)),
unop(Iop_32to8,
binop(Iop_Shr32,
- mkSzNarrow32(ty, getIReg(rS)),
+ mkNarrowTo32(ty, getIReg(rS)),
mkU8(toUChar(shift))))
);
shift -= 8;
@@ -4819,7 +4830,6 @@
IRType ty = mode64 ? Ity_I64 : Ity_I32;
IRTemp EA = newTemp(ty);
- IRTemp rS = newTemp(ty);
assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
@@ -4857,53 +4867,46 @@
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
DIP("lwarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
- putIReg( rD_addr, mkSzWiden32(ty, loadBE(Ity_I32, mkexpr(EA)),
- False) );
- /* Take a reservation */
- putGST( PPC_GST_RESVN, mkexpr(EA) );
+
+ // trap if misaligned
+ gen_SIGBUS_if_misaligned( EA, 4 );
+
+ // and actually do the load
+ putIReg( rD_addr, mkWidenFrom32(ty, loadlinkedBE(Ity_I32, mkexpr(EA)),
+ False) );
break;
case 0x096: {
// stwcx. (Store Word Conditional Indexed, PPC32 p532)
- IRTemp resaddr = newTemp(ty);
+ // Note this has to handle stwcx. in both 32- and 64-bit modes,
+ // so isn't quite as straightforward as it might otherwise be.
+ IRTemp rS = newTemp(Ity_I32);
+ IRTemp resSC;
if (b0 != 1) {
vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
return False;
}
DIP("stwcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- assign( rS, getIReg(rS_addr) );
- /* First set up as if the reservation failed */
- // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]
- putCR321(0, mkU8(0<<1));
- putCR0(0, getXER_SO());
+ // trap if misaligned
+ gen_SIGBUS_if_misaligned( EA, 4 );
- /* Get the reservation address into a temporary, then
- clear it. */
- assign( resaddr, getGST(PPC_GST_RESVN) );
- putGST( PPC_GST_RESVN, mkSzImm(ty, 0) );
+ // Get the data to be stored, and narrow to 32 bits if necessary
+ assign( rS, mkNarrowTo32(ty, getIReg(rS_addr)) );
- /* Skip the rest if the reservation really did fail. */
- stmt( IRStmt_Exit(
- ( mode64 ?
- binop(Iop_CmpNE64, mkexpr(resaddr), mkexpr(EA)) :
- binop(Iop_CmpNE32, mkexpr(resaddr), mkexpr(EA)) ),
- Ijk_Boring,
- mkSzConst( ty, nextInsnAddr()) ));
+ // Do the store, and get success/failure bit into resSC
+ resSC = newTemp(Ity_I1);
+ stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
- /* Note for mode64:
+ // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
+ // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
+ putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
+ putCR0(0, getXER_SO());
+
+ /* Note:
If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
whether rS is stored is dependent on that value. */
-
- /* Success? Do the (32bit) store. Mark the store as
- snooped, so that threading tools can handle it differently
- if necessary. */
- stmt( IRStmt_MBE(Imbe_SnoopedStoreBegin) );
- storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
- stmt( IRStmt_MBE(Imbe_SnoopedStoreEnd) );
-
- // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]
- putCR321(0, mkU8(1<<1));
+ /* So I guess we can just ignore this case? */
break;
}
@@ -4950,41 +4953,48 @@
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
+ if (!mode64)
+ return False;
DIP("ldarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
- putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
- // Take a reservation
- putGST( PPC_GST_RESVN, mkexpr(EA) );
+
+ // trap if misaligned
+ gen_SIGBUS_if_misaligned( EA, 8 );
+
+ // and actually do the load
+ putIReg( rD_addr, loadlinkedBE(Ity_I64, mkexpr(EA)) );
break;
case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
- IRTemp resaddr = newTemp(ty);
+ // A marginally simplified version of the stwcx. case
+ IRTemp rS = newTemp(Ity_I64);
+ IRTemp resSC;
if (b0 != 1) {
vex_printf("dis_memsync(ppc)(stdcx.,b0)\n");
return False;
}
+ if (!mode64)
+ return False;
DIP("stdcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+ // trap if misaligned
+ gen_SIGBUS_if_misaligned( EA, 8 );
+
+ // Get the data to be stored
assign( rS, getIReg(rS_addr) );
- // First set up as if the reservation failed
- // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]
- putCR321(0, mkU8(0<<1));
+ // Do the store, and get success/failure bit into resSC
+ resSC = newTemp(Ity_I1);
+ stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
+
+ // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
+ // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
+ putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
putCR0(0, getXER_SO());
-
- // Get the reservation address into a temporary, then clear it.
- assign( resaddr, getGST(PPC_GST_RESVN) );
- putGST( PPC_GST_RESVN, mkSzImm(ty, 0) );
- // Skip the rest if the reservation really did fail.
- stmt( IRStmt_Exit( binop(Iop_CmpNE64, mkexpr(resaddr),
- mkexpr(EA)),
- Ijk_Boring,
- IRConst_U64(nextInsnAddr())) );
-
- // Success? Do the store
- storeBE( mkexpr(EA), mkexpr(rS) );
-
- // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]
- putCR321(0, mkU8(1<<1));
+ /* Note:
+ If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
+ whether rS is stored is dependent on that value. */
+ /* So I guess we can just ignore this case? */
break;
}
@@ -5029,8 +5039,8 @@
assign( rS, getIReg(rS_addr) );
assign( rB, getIReg(rB_addr) );
- assign( rS_lo32, mkSzNarrow32(ty, mkexpr(rS)) );
- assign( rB_lo32, mkSzNarrow32(ty, mkexpr(rB)) );
+ assign( rS_lo32, mkNarrowTo32(ty, mkexpr(rS)) );
+ assign( rB_lo32, mkNarrowTo32(ty, mkexpr(rB)) );
if (opc1 == 0x1F) {
switch (opc2) {
@@ -5054,7 +5064,7 @@
binop( Iop_Sar32,
binop(Iop_Shl32, mkexpr(rB_lo32), mkU8(26)),
mkU8(31))) );
- assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */False) );
+ assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
break;
}
@@ -5079,13 +5089,13 @@
IRExpr_Mux0X( mkexpr(outofrange),
mkexpr(sh_amt),
mkU32(31)) ) );
- assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */True) );
+ assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */True) );
set_XER_CA( ty, PPCG_FLAG_OP_SRAW,
mkexpr(rA),
- mkSzWiden32(ty, mkexpr(rS_lo32), True),
- mkSzWiden32(ty, mkexpr(sh_amt), True ),
- mkSzWiden32(ty, getXER_CA32(), True) );
+ mkWidenFrom32(ty, mkexpr(rS_lo32), True),
+ mkWidenFrom32(ty, mkexpr(sh_amt), True ),
+ mkWidenFrom32(ty, getXER_CA32(), True) );
break;
}
@@ -5105,9 +5115,9 @@
set_XER_CA( ty, PPCG_FLAG_OP_SRAWI,
mkexpr(rA),
- mkSzWiden32(ty, mkexpr(rS_lo32), /* Syned */True),
+ mkWidenFrom32(ty, mkexpr(rS_lo32), /* Syned */True),
mkSzImm(ty, sh_imm),
- mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
+ mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
break;
case 0x218: // srw (Shift Right Word, PPC32 p508)
@@ -5132,7 +5142,7 @@
binop(Iop_Shl32, mkexpr(rB_lo32),
mkU8(26)),
mkU8(31))));
- assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */False) );
+ assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
break;
@@ -5182,7 +5192,7 @@
);
set_XER_CA( ty, PPCG_FLAG_OP_SRAD,
mkexpr(rA), mkexpr(rS), mkexpr(sh_amt),
- mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
+ mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
break;
}
@@ -5197,7 +5207,7 @@
mkexpr(rA),
getIReg(rS_addr),
mkU64(sh_imm),
- mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
+ mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
break;
case 0x21B: // srd (Shift Right DWord, PPC64 p574)
@@ -5305,27 +5315,27 @@
DIP("lhbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
assign( w1, unop(Iop_16Uto32, loadBE(Ity_I16, mkexpr(EA))) );
assign( w2, gen_byterev16(w1) );
- putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
+ /* Signed */False) );
break;
case 0x216: // lwbrx (Load Word Byte-Reverse Indexed, PPC32 p459)
DIP("lwbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
assign( w1, loadBE(Ity_I32, mkexpr(EA)) );
assign( w2, gen_byterev32(w1) );
- putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
+ /* Signed */False) );
break;
case 0x396: // sthbrx (Store Half Word Byte-Reverse Indexed, PPC32 p523)
DIP("sthbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- assign( w1, mkSzNarrow32(ty, getIReg(rS_addr)) );
+ assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
storeBE( mkexpr(EA), unop(Iop_32to16, gen_byterev16(w1)) );
break;
case 0x296: // stwbrx (Store Word Byte-Reverse Indxd, PPC32 p531)
DIP("stwbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
- assign( w1, mkSzNarrow32(ty, getIReg(rS_addr)) );
+ assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
storeBE( mkexpr(EA), gen_byterev32(w1) );
break;
@@ -5403,14 +5413,14 @@
// implementation of mfocr (from the 2.02 arch spec)
if (b11to20 == 0) {
DIP("mfcr r%u\n", rD_addr);
- putIReg( rD_addr, mkSzWiden32(ty, getGST( PPC_GST_CR ),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
+ /* Signed */False) );
break;
}
if (b20 == 1 && b11 == 0) {
DIP("mfocrf r%u,%u\n", rD_addr, CRM);
- putIReg( rD_addr, mkSzWiden32(ty, getGST( PPC_GST_CR ),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
+ /* Signed */False) );
break;
}
/* not decodable */
@@ -5422,8 +5432,8 @@
switch (SPR) { // Choose a register...
case 0x1:
DIP("mfxer r%u\n", rD_addr);
- putIReg( rD_addr, mkSzWiden32(ty, getGST( PPC_GST_XER ),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_XER ),
+ /* Signed */False) );
break;
case 0x8:
DIP("mflr r%u\n", rD_addr);
@@ -5435,8 +5445,8 @@
break;
case 0x100:
DIP("mfvrsave r%u\n", rD_addr);
- putIReg( rD_addr, mkSzWiden32(ty, getGST( PPC_GST_VRSAVE ),
- /* Signed */False) );
+ putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_VRSAVE ),
+ /* Signed */False) );
break;
case 0x103:
@@ -5488,8 +5498,8 @@
case 269:
DIP("mftbu r%u", rD_addr);
putIReg( rD_addr,
- mkSzWiden32(ty, unop(Iop_64HIto32, mkexpr(val)),
- /* Signed */False) );
+ mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(val)),
+ /* Signed */False) );
break;
case 268:
DIP("mftb r%u", rD_addr);
@@ -5530,7 +5540,7 @@
shft = 4*(7-cr);
putGST_field( PPC_GST_CR,
binop(Iop_Shr32,
- mkSzNarrow32(ty, mkexpr(rS)),
+ mkNarrowTo32(ty, mkexpr(rS)),
mkU8(shft)), cr );
}
break;
@@ -5541,7 +5551,7 @@
switch (SPR) { // Choose a register...
case 0x1:
DIP("mtxer r%u\n", rS_addr);
- putGST( PPC_GST_XER, mkSzNarrow32(ty, mkexpr(rS)) );
+ putGST( PPC_GST_XER, mkNarrowTo32(ty, mkexpr(rS)) );
break;
case 0x8:
DIP("mtlr r%u\n", rS_addr);
@@ -5553,7 +5563,7 @@
break;
case 0x100:
DIP("mtvrsave r%u\n", rS_addr);
- putGST( PPC_GST_VRSAVE, mkSzNarrow32(ty, mkexpr(rS)) );
+ putGST( PPC_GST_VRSAVE, mkNarrowTo32(ty, mkexpr(rS)) );
break;
default:
@@ -6908,7 +6918,7 @@
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args = mkIRExprVec_3(
mkU32(vD_off),
- binop(Iop_And32, mkSzNarrow32(ty, mkexpr(EA)),
+ binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
mkU32(0)/*left*/ );
if (!mode64) {
@@ -6941,7 +6951,7 @@
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args = mkIRExprVec_3(
mkU32(vD_off),
- binop(Iop_And32, mkSzNarrow32(ty, mkexpr(EA)),
+ binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
mkU32(1)/*right*/ );
if (!mode64) {
@@ -7040,7 +7050,7 @@
DIP("stvebx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
assign( eb, binop(Iop_And8, mkU8(0xF),
unop(Iop_32to8,
- mkSzNarrow32(ty, mkexpr(EA)) )) );
+ mkNarrowTo32(ty, mkexpr(EA)) )) );
assign( idx, binop(Iop_Shl8,
binop(Iop_Sub8, mkU8(15), mkexpr(eb)),
mkU8(3)) );
@@ -7053,7 +7063,7 @@
DIP("stvehx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
assign( addr_aligned, addr_align(mkexpr(EA), 2) );
assign( eb, binop(Iop_And8, mkU8(0xF),
- mkSzNarrow8(ty, mkexpr(addr_aligned) )) );
+ mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
assign( idx, binop(Iop_Shl8,
binop(Iop_Sub8, mkU8(14), mkexpr(eb)),
mkU8(3)) );
@@ -7066,7 +7076,7 @@
DIP("stvewx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
assign( addr_aligned, addr_align(mkexpr(EA), 4) );
assign( eb, binop(Iop_And8, mkU8(0xF),
- mkSzNarrow8(ty, mkexpr(addr_aligned) )) );
+ mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
assign( idx, binop(Iop_Shl8,
binop(Iop_Sub8, mkU8(12), mkexpr(eb)),
mkU8(3)) );
Modified: branches/DCAS/priv/guest-x86/toIR.c
===================================================================
--- branches/DCAS/priv/guest-x86/toIR.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/guest-x86/toIR.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -641,7 +641,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE,addr,data) );
+ stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -703,7 +703,7 @@
static IRExpr* loadLE ( IRType ty, IRExpr* data )
{
- return IRExpr_Load(Iend_LE,ty,data);
+ return IRExpr_Load(False, Iend_LE, ty, data);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
@@ -7827,9 +7827,6 @@
/* Gets set to True if a LOCK prefix is seen. */
Bool pfx_lock = False;
- /* do we need follow the insn with MBusEvent(BusUnlock) ? */
- Bool unlock_bus_after_insn = False;
-
/* Set result defaults. */
dres.whatNext = Dis_Continue;
dres.len = 0;
@@ -7983,8 +7980,6 @@
if (pfx_lock) {
if (can_be_used_with_LOCK_prefix( (UChar*)&guest_code[delta] )) {
- stmt( IRStmt_MBE(Imbe_BusLock) );
- unlock_bus_after_insn = True;
DIP("lock ");
} else {
*expect_CAS = False;
@@ -13791,18 +13786,6 @@
nameIReg(sz,eregOfRM(modrm)));
} else {
*expect_CAS = True;
- /* Need to add IRStmt_MBE(Imbe_BusLock). */
- if (pfx_lock) {
- /* check it's already been taken care of */
- vassert(unlock_bus_after_insn);
- } else {
- vassert(!unlock_bus_after_insn);
- stmt( IRStmt_MBE(Imbe_BusLock) );
- unlock_bus_after_insn = True;
- }
- /* Because unlock_bus_after_insn is now True, generic logic
- at the bottom of disInstr will add the
- IRStmt_MBE(Imbe_BusUnlock). */
addr = disAMode ( &alen, sorb, delta, dis_buf );
assign( t1, loadLE(ty,mkexpr(addr)) );
assign( t2, getIReg(sz,gregOfRM(modrm)) );
@@ -14726,8 +14709,6 @@
insn, but nevertheless be paranoid and update it again right
now. */
stmt( IRStmt_Put( OFFB_EIP, mkU32(guest_EIP_curr_instr) ) );
- if (unlock_bus_after_insn)
- stmt( IRStmt_MBE(Imbe_BusUnlock) );
jmp_lit(Ijk_NoDecode, guest_EIP_curr_instr);
dres.whatNext = Dis_StopHere;
dres.len = 0;
@@ -14744,8 +14725,6 @@
decode_success:
/* All decode successes end up here. */
DIP("\n");
- if (unlock_bus_after_insn)
- stmt( IRStmt_MBE(Imbe_BusUnlock) );
dres.len = delta - delta_start;
return dres;
}
Modified: branches/DCAS/priv/host-amd64/isel.c
===================================================================
--- branches/DCAS/priv/host-amd64/isel.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/host-amd64/isel.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -857,8 +857,11 @@
HReg dst = newVRegI(env);
AMD64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr );
+ /* We can't handle big-endian loads, nor load-linked. */
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
+ if (e->Iex.Load.isLL)
+ goto irreducible;
if (ty == Ity_I64) {
addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,
@@ -1959,7 +1962,8 @@
}
/* special case: 64-bit load from memory */
- if (e->tag == Iex_Load && ty == Ity_I64 && e->Iex.Load.end == Iend_LE) {
+ if (e->tag == Iex_Load && ty == Ity_I64
+ && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
return AMD64RMI_Mem(am);
}
@@ -2738,7 +2742,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2862,7 +2866,7 @@
return res;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3167,7 +3171,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
HReg dst = newVRegV(env);
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, am ));
@@ -3589,11 +3593,12 @@
/* --------- STORE --------- */
case Ist_Store: {
- IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
- IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
- IREndness end = stmt->Ist.Store.end;
+ IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+ IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+ IREndness end = stmt->Ist.Store.end;
+ IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I64 || end != Iend_LE)
+ if (tya != Ity_I64 || end != Iend_LE || resSC != IRTemp_INVALID)
goto stmt_fail;
if (tyd == Ity_I64) {
@@ -3813,9 +3818,6 @@
case Imbe_Fence:
addInstr(env, AMD64Instr_MFence());
return;
- case Imbe_BusLock:
- case Imbe_BusUnlock:
- return;
default:
break;
}
Modified: branches/DCAS/priv/host-arm/isel.c
===================================================================
--- branches/DCAS/priv/host-arm/isel.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/host-arm/isel.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -757,8 +757,9 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
+ IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I32 || end != Iend_LE)
+ if (tya != Ity_I32 || end != Iend_LE || resSC != IRTemp_INVALID)
goto stmt_fail;
reg = iselIntExpr_R(env, stmt->Ist.Store.data);
Modified: branches/DCAS/priv/host-ppc/hdefs.c
===================================================================
--- branches/DCAS/priv/host-ppc/hdefs.c 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/host-ppc/hdefs.c 2009-06-02 08:18:56 UTC (rev 1898)
@@ -844,7 +844,7 @@
}
PPCInstr* PPCInstr_CMov ( PPCCondCode cond,
HReg dst, PPCRI* src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_CMov;
i->Pin.CMov.cond = cond;
i->Pin.CMov.src = src;
@@ -863,6 +863,18 @@
if (sz == 8) vassert(mode64);
return i;
}
+PPCInstr* PPCInstr_LoadL ( UChar sz,
+ HReg dst, HReg src, Bool mode64 )
+{
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_LoadL;
+ i->Pin.LoadL.sz = sz;
+ i->Pin.LoadL.src = src;
+ i->Pin.LoadL.dst = dst;
+ vassert(sz == 4 || sz == 8);
+ if (sz == 8) vassert(mode64);
+ return i;
+}
PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src,
Bool mode64 ) {
PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
@@ -874,6 +886,16 @@
if (sz == 8) vassert(mode64);
return i;
}
+PPCInstr* PPCInstr_StoreC ( UChar sz, HReg dst, HReg src, Bool mode64 ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_StoreC;
+ i->Pin.StoreC.sz = sz;
+ i->Pin.StoreC.src = src;
+ i->Pin.StoreC.dst = dst;
+ vassert(sz == 4 || sz == 8);
+ if (sz == 8) vassert(mode64);
+ return i;
+}
PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) {
PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Set;
@@ -1311,6 +1333,12 @@
ppPPCAMode(i->Pin.Load.src);
return;
}
+ case Pin_LoadL:
+ vex_printf("l%carx ", i->Pin.LoadL.sz==4 ? 'w' : 'd');
+ ppHRegPPC(i->Pin.LoadL.dst);
+ vex_printf(",%%r0,");
+ ppHRegPPC(i->Pin.LoadL.src);
+ return;
case Pin_Store: {
UChar sz = i->Pin.Store.sz;
Bool idxd = toBool(i->Pin.Store.dst->tag == Pam_RR);
@@ -1321,6 +1349,12 @@
ppPPCAMode(i->Pin.Store.dst);
return;
}
+ case Pin_StoreC:
+ vex_printf("st%ccx. ", i->Pin.StoreC.sz==4 ? 'w' : 'd');
+ ppHRegPPC(i->Pin.StoreC.src);
+ vex_printf(",%%r0,");
+ ppHRegPPC(i->Pin.StoreC.dst);
+ return;
case Pin_Set: {
PPCCondCode cc = i->Pin.Set.cond;
vex_printf("set (%s),", showPPCCondCode(cc));
@@ -1702,7 +1736,7 @@
/* Finally, there is the issue that the insn trashes a
register because the literal target address has to be
loaded into a register. %r10 seems a suitable victim.
- (Can't use %r0, as use ops that interpret it as value zero). */
+ (Can't use %r0, as some insns interpret it as value zero). */
addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
/* Upshot of this is that the assembler really must use %r10,
and no other, as a destination temporary. */
@@ -1728,10 +1762,18 @@
addRegUsage_PPCAMode(u, i->Pin.Load.src);
addHRegUse(u, HRmWrite, i->Pin.Load.dst);
return;
+ case Pin_LoadL:
+ addHRegUse(u, HRmRead, i->Pin.LoadL.src);
+ addHRegUse(u, HRmWrite, i->Pin.LoadL.dst);
+ return;
case Pin_Store:
addHRegUse(u, HRmRead, i->Pin.Store.src);
addRegUsage_PPCAMode(u, i->Pin.Store.dst);
return;
+ case Pin_StoreC:
+ addHRegUse(u, HRmRead, i->Pin.StoreC.src);
+ addHRegUse(u, HRmRead, i->Pin.StoreC.dst);
+ return;
case Pin_Set:
addHRegUse(u, HRmWrite, i->Pin.Set.dst);
return;
@@ -1934,10 +1976,18 @@
mapRegs_PPCAMode(m, i->Pin.Load.src);
mapReg(m, &i->Pin.Load.dst);
return;
+ case Pin_LoadL:
+ mapReg(m, &i->Pin.LoadL.src);
+ mapReg(m, &i->Pin.LoadL.dst);
+ return;
case Pin_Store:
mapReg(m, &i->Pin.Store.src);
mapRegs_PPCAMode(m, i->Pin.Store.dst);
return;
+ case Pin_StoreC:
+ mapReg(m, &i->Pin.StoreC.src);
+ mapReg(m, &i->Pin.StoreC.dst);
+ return;
case Pin_Set:
mapReg(m, &i->Pin.Set.dst);
return;
@@ -2954,6 +3004,7 @@
case Ijk_TInval: trc = VEX_TRC_JMP_TINVAL; break;
case Ijk_NoRedir: trc = VEX_TRC_JMP_NOREDIR; break;
case Ijk_SigTRAP: trc = VEX_TRC_JMP_SIGTRAP; break;
+ case Ijk_SigBUS: trc = VEX_TRC_JMP_SIGBUS; break;
case Ijk_Ret:
case Ijk_Call:
case Ijk_Boring:
@@ -3067,6 +3118,20 @@
}
}
+ case Pin_LoadL: {
+ if (i->Pin.LoadL.sz == 4) {
+ p = mkFormX(p, 31, iregNo(i->Pin.LoadL.dst, mode64),
+ 0, iregNo(i->Pin.LoadL.src, mode64), 20, 0);
+ goto done;
+ }
+ if (i->Pin.LoadL.sz == 8 && mode64) {
+ p = mkFormX(p, 31, iregNo(i->Pin.LoadL.dst, mode64),
+ 0, iregNo(i->Pin.LoadL.src, mode64), 84, 0);
+ goto done;
+ }
+ goto bad;
+ }
+
case Pin_Set: {
/* Make the destination register be 1 or 0, depending on whether
the relevant condition holds. */
@@ -3103,8 +3168,8 @@
case Pin_MFence: {
p = mkFormX(p, 31, 0, 0, 0, 598, 0); // sync, PPC32 p616
-// CAB: Should this be isync?
-// p = mkFormXL(p, 19, 0, 0, 0, 150, 0); // isync, PPC32 p467
+ // CAB: Should this be isync?
+ // p = mkFormXL(p, 19, 0, 0, 0, 150, 0); // isync, PPC32 p467
goto done;
}
@@ -3147,6 +3212,20 @@
goto done;
}
+ case Pin_StoreC: {
+ if (i->Pin.StoreC.sz == 4) {
+ p = mkFormX(p, 31, iregNo(i->Pin.StoreC.src, mode64),
+ 0, iregNo(i->Pin.StoreC.dst, mode64), 150, 1);
+ goto done;
+ }
+ if (i->Pin.StoreC.sz == 8 && mode64) {
+ p = mkFormX(p, 31, iregNo(i->Pin.StoreC.src, mode64),
+ 0, iregNo(i->Pin.StoreC.dst, mode64), 214, 1);
+ goto done;
+ }
+ goto bad;
+ }
+
case Pin_FpUnary: {
UInt fr_dst = fregNo(i->Pin.FpUnary.dst);
UInt fr_src = fregNo(i->Pin.FpUnary.src);
Modified: branches/DCAS/priv/host-ppc/hdefs.h
===================================================================
--- branches/DCAS/priv/host-ppc/hdefs.h 2009-05-21 21:55:50 UTC (rev 1897)
+++ branches/DCAS/priv/host-ppc/hdefs.h 2009-06-02 08:18:56 UTC (rev 1898)
@@ -459,7 +459,9 @@
Pin_Goto, /* conditional/unconditional jmp to dst */
Pin_CMov, /* conditional move */
Pin_Load, /* zero-extending load a 8|16|32|64 bit value from mem */
+ Pin_LoadL, /* load-linked (lwarx/ldarx) 32|64 bit value from mem */
Pin_Store, /* store a 8|16|32|64 bit value to mem */
+ Pin_StoreC, /* store-conditional (stwcx./stdcx.) 32|64 bit val */
Pin_Set, /* convert condition code to value 0 or 1 */
Pin_MfCR, /* move from condition register to GPR */
Pin_MFence, /* mem fence */
@@ -604,12 +606,24 @@
HReg dst;
PPCAMode* src;
} Load;
+ /* Load-and-reserve (lwarx, ldarx) */
+ struct {
+ UChar sz; /* 4|8 */
+ HReg dst;
+ HReg src;
+ } LoadL;
/* 64/32/16/8 bit stores */
struct {
UChar sz; /* 1|2|4|8 */
PPCAMode* dst;
HReg src;
} Store;
+ /* Store-conditional (stwcx., stdcx.) */
+ struct {
+ UChar sz; /* 4|8 */
+ HReg dst;
+ HReg src;
+ } StoreC;
/* Convert a ppc condition code to value 0 or 1. */
struct {
PPCCondCode cond;
@@ -791,8 +805,12 @@
extern PPCInstr* PPCInstr_CMov ( PPCCondCode, HReg dst, PPCRI* src );
extern PPCInstr* PPCInstr_Load ( UChar sz,
HReg dst, PPCAMode* src, Bool mode64 );
+extern PPCInstr* PPCInstr_LoadL ( UChar sz,
+ HReg dst, HReg src, Bool mode64 );
extern PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst...
[truncated message content] |