|
From: <sv...@va...> - 2009-11-27 10:07:36
|
Author: sewardj
Date: 2009-11-27 10:07:21 +0000 (Fri, 27 Nov 2009)
New Revision: 1932
Log:
Merge from trunk, r1930 (the introduction of IRStmt.LLSC)
Modified:
branches/ARM/priv/guest_amd64_toIR.c
branches/ARM/priv/guest_arm_toIR.c
branches/ARM/priv/guest_ppc_toIR.c
branches/ARM/priv/guest_x86_toIR.c
branches/ARM/priv/host_amd64_isel.c
branches/ARM/priv/host_arm_isel.c
branches/ARM/priv/host_ppc_isel.c
branches/ARM/priv/host_x86_isel.c
branches/ARM/priv/ir_defs.c
branches/ARM/priv/ir_match.c
branches/ARM/priv/ir_opt.c
branches/ARM/pub/libvex_ir.h
Modified: branches/ARM/priv/guest_amd64_toIR.c
===================================================================
--- branches/ARM/priv/guest_amd64_toIR.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/guest_amd64_toIR.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -312,12 +312,12 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
Modified: branches/ARM/priv/guest_arm_toIR.c
===================================================================
--- branches/ARM/priv/guest_arm_toIR.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/guest_arm_toIR.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -227,16 +227,11 @@
return IRExpr_Triop(op, a1, a2, a3);
}
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
-static IRExpr* loadLinkedLE ( IRType ty, IRExpr* data )
-{
- return IRExpr_Load(True, Iend_LE, ty, data);
-}
-
/* Add a statement to the list held by "irbb". */
static void stmt ( IRStmt* st )
{
@@ -250,7 +245,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
/* Generate a new temporary of the given type. */
@@ -2922,7 +2917,7 @@
UInt rM = insn_3_0;
IRTemp tRn = newTemp(Ity_I32);
IRTemp tNew = newTemp(Ity_I32);
- IRTemp tOld = newTemp(Ity_I32);
+ IRTemp tOld = IRTemp_INVALID;
IRTemp tSC1 = newTemp(Ity_I1);
UInt isB = (insn >> 22) & 1;
@@ -2939,19 +2934,24 @@
assign(tNew, getIReg(rM));
if (isB) {
/* swpb */
- assign(tOld, unop(Iop_8Uto32,
- loadLinkedLE(Ity_I8, mkexpr(tRn))));
- stmt( IRStmt_Store(Iend_LE, tSC1, mkexpr(tRn),
- unop(Iop_32to8, mkexpr(tNew))) );
+ tOld = newTemp(Ity_I8);
+ stmt( IRStmt_LLSC(Iend_LE, tOld, mkexpr(tRn),
+ NULL/*=>isLL*/) );
+ stmt( IRStmt_LLSC(Iend_LE, tSC1, mkexpr(tRn),
+ unop(Iop_32to8, mkexpr(tNew))) );
} else {
/* swp */
- assign(tOld, loadLinkedLE(Ity_I32, mkexpr(tRn)));
- stmt( IRStmt_Store(Iend_LE, tSC1, mkexpr(tRn), mkexpr(tNew)) );
+ tOld = newTemp(Ity_I32);
+ stmt( IRStmt_LLSC(Iend_LE, tOld, mkexpr(tRn),
+ NULL/*=>isLL*/) );
+ stmt( IRStmt_LLSC(Iend_LE, tSC1, mkexpr(tRn),
+ mkexpr(tNew)) );
}
stmt( IRStmt_Exit(unop(Iop_Not1, mkexpr(tSC1)),
- /*Ijk_NoRedir*/Ijk_Boring,
+ /*Ijk_NoRedir*/Ijk_Boring,
IRConst_U32(guest_R15_curr_instr)) );
- putIReg(rD, mkexpr(tOld), IRTemp_INVALID, Ijk_Boring);
+ putIReg(rD, isB ? unop(Iop_8Uto32, mkexpr(tOld)) : mkexpr(tOld),
+ IRTemp_INVALID, Ijk_Boring);
DIP("swp%s%s r%u, r%u, [r%u]\n",
isB ? "b" : "", nCC(insn_cond), rD, rM, rN);
goto decode_success;
@@ -4067,14 +4067,16 @@
if (rT == 15 || rN == 15 || rT == 14 /* || (rT & 1)*/) {
/* undecodable; fall through */
} else {
+ IRTemp res;
/* make unconditional */
if (condT != IRTemp_INVALID) {
mk_skip_to_next_if_cond_is_false( condT );
condT = IRTemp_INVALID;
}
/* Ok, now we're unconditional. Do the load. */
- putIReg(rT, loadLinkedLE(Ity_I32, getIReg(rN)),
- IRTemp_INVALID, Ijk_Boring);
+ res = newTemp(Ity_I32);
+ stmt( IRStmt_LLSC(Iend_LE, res, getIReg(rN), NULL/*this is a load*/) );
+ putIReg(rT, mkexpr(res), IRTemp_INVALID, Ijk_Boring);
DIP("ldrex%s r%u, [r%u]\n", nCC(insn_cond), rT, rN);
goto decode_success;
}
@@ -4101,7 +4103,7 @@
/* Ok, now we're unconditional. Do the store. */
resSC1 = newTemp(Ity_I1);
- stmt( IRStmt_Store(Iend_LE, resSC1, getIReg(rN), getIReg(rT)) );
+ stmt( IRStmt_LLSC(Iend_LE, resSC1, getIReg(rN), getIReg(rT)) );
/* Set rD to 1 on failure, 0 on success. Currently we have
resSC1 == 0 on failure, 1 on success. */
Modified: branches/ARM/priv/guest_ppc_toIR.c
===================================================================
--- branches/ARM/priv/guest_ppc_toIR.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/guest_ppc_toIR.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -467,7 +467,7 @@
{
IRType tyA = typeOfIRExpr(irsb->tyenv, addr);
vassert(tyA == Ity_I32 || tyA == Ity_I64);
- stmt( IRStmt_Store(Iend_BE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_BE, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -517,22 +517,11 @@
}
/* This generates a normal (non load-linked) load. */
-static IRExpr* loadBE ( IRType ty, IRExpr* data )
+static IRExpr* loadBE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_BE, ty, data);
+ return IRExpr_Load(Iend_BE, ty, addr);
}
-/* And this, a linked load. */
-static IRExpr* loadlinkedBE ( IRType ty, IRExpr* data )
-{
- if (mode64) {
- vassert(ty == Ity_I32 || ty == Ity_I64);
- } else {
- vassert(ty == Ity_I32);
- }
- return IRExpr_Load(True, Iend_BE, ty, data);
-}
-
static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 )
{
vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
@@ -4861,7 +4850,8 @@
stmt( IRStmt_MBE(Imbe_Fence) );
break;
- case 0x014: // lwarx (Load Word and Reserve Indexed, PPC32 p458)
+ case 0x014: { // lwarx (Load Word and Reserve Indexed, PPC32 p458)
+ IRTemp res;
/* According to the PowerPC ISA version 2.05, b0 (called EH
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
@@ -4872,10 +4862,13 @@
gen_SIGBUS_if_misaligned( EA, 4 );
// and actually do the load
- putIReg( rD_addr, mkWidenFrom32(ty, loadlinkedBE(Ity_I32, mkexpr(EA)),
- False) );
+ res = newTemp(Ity_I32);
+ stmt( IRStmt_LLSC(Iend_BE, res, mkexpr(EA), NULL/*this is a load*/) );
+
+ putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(res), False) );
break;
-
+ }
+
case 0x096: {
// stwcx. (Store Word Conditional Indexed, PPC32 p532)
// Note this has to handle stwcx. in both 32- and 64-bit modes,
@@ -4896,7 +4889,7 @@
// Do the store, and get success/failure bit into resSC
resSC = newTemp(Ity_I1);
- stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
+ stmt( IRStmt_LLSC(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
// Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
// Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
@@ -4948,7 +4941,8 @@
break;
/* 64bit Memsync */
- case 0x054: // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
+ case 0x054: { // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
+ IRTemp res;
/* According to the PowerPC ISA version 2.05, b0 (called EH
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
@@ -4961,9 +4955,13 @@
gen_SIGBUS_if_misaligned( EA, 8 );
// and actually do the load
- putIReg( rD_addr, loadlinkedBE(Ity_I64, mkexpr(EA)) );
+ res = newTemp(Ity_I64);
+ stmt( IRStmt_LLSC(Iend_BE, res, mkexpr(EA), NULL/*this is a load*/) );
+
+ putIReg( rD_addr, mkexpr(res) );
break;
-
+ }
+
case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
// A marginally simplified version of the stwcx. case
IRTemp rS = newTemp(Ity_I64);
@@ -4984,7 +4982,7 @@
// Do the store, and get success/failure bit into resSC
resSC = newTemp(Ity_I1);
- stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
+ stmt( IRStmt_LLSC(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
// Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
// Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
Modified: branches/ARM/priv/guest_x86_toIR.c
===================================================================
--- branches/ARM/priv/guest_x86_toIR.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/guest_x86_toIR.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -651,7 +651,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -711,9 +711,9 @@
return IRExpr_Const(IRConst_V128(mask));
}
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
Modified: branches/ARM/priv/host_amd64_isel.c
===================================================================
--- branches/ARM/priv/host_amd64_isel.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/host_amd64_isel.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -860,8 +860,6 @@
/* We can't handle big-endian loads, nor load-linked. */
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
- if (e->Iex.Load.isLL)
- goto irreducible;
if (ty == Ity_I64) {
addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,
@@ -1963,7 +1961,7 @@
/* special case: 64-bit load from memory */
if (e->tag == Iex_Load && ty == Ity_I64
- && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
return AMD64RMI_Mem(am);
}
@@ -2749,7 +2747,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2873,7 +2871,7 @@
return res;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3178,7 +3176,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg dst = newVRegV(env);
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, am ));
@@ -3603,9 +3601,8 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I64 || end != Iend_LE || resSC != IRTemp_INVALID)
+ if (tya != Ity_I64 || end != Iend_LE)
goto stmt_fail;
if (tyd == Ity_I64) {
Modified: branches/ARM/priv/host_arm_isel.c
===================================================================
--- branches/ARM/priv/host_arm_isel.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/host_arm_isel.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -1029,44 +1029,27 @@
/* --------- LOAD --------- */
case Iex_Load: {
HReg dst = newVRegI(env);
- Bool isLL = e->Iex.Load.isLL;
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
- /* Normal (non-Load-Linked) cases */
- if (ty == Ity_I32 && !isLL) {
+ if (ty == Ity_I32) {
ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, dst, amode));
return dst;
}
- if (ty == Ity_I16 && !isLL) {
+ if (ty == Ity_I16) {
ARMAMode2* amode = iselIntExpr_AMode2 ( env, e->Iex.Load.addr );
addInstr(env, ARMInstr_LdSt16(True/*isLoad*/, False/*!signedLoad*/,
dst, amode));
return dst;
}
- if (ty == Ity_I8 && !isLL) {
+ if (ty == Ity_I8) {
ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
addInstr(env, ARMInstr_LdSt8U(True/*isLoad*/, dst, amode));
return dst;
}
- /* Load-Linked cases */
- if (isLL && (ty == Ity_I32 || ty == Ity_I8)) {
- Int szB = 0;
- HReg raddr = iselIntExpr_R ( env, e->Iex.Load.addr );
- switch (ty) {
- case Ity_I8: szB = 1; break;
- case Ity_I32: szB = 4; break;
- default: vassert(0);
- }
- addInstr(env, mk_iMOVds_RR(hregARM_R1(), raddr));
- addInstr(env, ARMInstr_LdrEX(szB));
- addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()));
- return dst;
- }
-
//zz if (ty == Ity_I16) {
//zz addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
//zz return dst;
@@ -1641,7 +1624,7 @@
}
/* 64-bit load */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg tLo, tHi, rA;
vassert(e->Iex.Load.ty == Ity_I64);
rA = iselIntExpr_R(env, e->Iex.Load.addr);
@@ -1879,7 +1862,7 @@
}
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
ARMAModeV* am;
HReg res = newVRegD(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -2033,7 +2016,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
ARMAModeV* am;
HReg res = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2164,32 +2147,30 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- Bool isSC = stmt->Ist.Store.resSC != IRTemp_INVALID;
if (tya != Ity_I32 || end != Iend_LE)
goto stmt_fail;
- /* normal (non-Store-Conditional) cases */
- if (tyd == Ity_I32 && !isSC) {
+ if (tyd == Ity_I32) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
addInstr(env, ARMInstr_LdSt32(False/*!isLoad*/, rD, am));
return;
}
- if (tyd == Ity_I16 && !isSC) {
+ if (tyd == Ity_I16) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode2* am = iselIntExpr_AMode2(env, stmt->Ist.Store.addr);
addInstr(env, ARMInstr_LdSt16(False/*!isLoad*/,
False/*!isSignedLoad*/, rD, am));
return;
}
- if (tyd == Ity_I8 && !isSC) {
+ if (tyd == Ity_I8) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
addInstr(env, ARMInstr_LdSt8U(False/*!isLoad*/, rD, am));
return;
}
- if (tyd == Ity_I64 && !isSC) {
+ if (tyd == Ity_I64) {
HReg rDhi, rDlo, rA;
iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.Store.data);
rA = iselIntExpr_R(env, stmt->Ist.Store.addr);
@@ -2199,43 +2180,19 @@
ARMAMode1_RI(rA,0)));
return;
}
- if (tyd == Ity_F64 && !isSC) {
+ if (tyd == Ity_F64) {
HReg dD = iselDblExpr(env, stmt->Ist.Store.data);
ARMAModeV* am = iselIntExpr_AModeV(env, stmt->Ist.Store.addr);
addInstr(env, ARMInstr_VLdStD(False/*!isLoad*/, dD, am));
return;
}
- if (tyd == Ity_F32 && !isSC) {
+ if (tyd == Ity_F32) {
HReg fD = iselFltExpr(env, stmt->Ist.Store.data);
ARMAModeV* am = iselIntExpr_AModeV(env, stmt->Ist.Store.addr);
addInstr(env, ARMInstr_VLdStS(False/*!isLoad*/, fD, am));
return;
}
- /* Store-Conditional cases */
- if (isSC && (tyd == Ity_I32 || tyd == Ity_I8)) {
- Int szB = 0;
- HReg r_res = lookupIRTemp(env, stmt->Ist.Store.resSC);
- HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
- HReg rA = iselIntExpr_R(env, stmt->Ist.Store.addr);
- ARMRI84* one = ARMRI84_I84(1,0);
- switch (tyd) {
- case Ity_I8: szB = 1; break;
- case Ity_I32: szB = 4; break;
- default: vassert(0);
- }
- addInstr(env, mk_iMOVds_RR(hregARM_R1(), rD));
- addInstr(env, mk_iMOVds_RR(hregARM_R2(), rA));
- addInstr(env, ARMInstr_StrEX(szB));
- /* now r0 is 1 if failed, 0 if success. Change to IR
- conventions (0 is fail, 1 is success). Also transfer
- result to r_res. */
- addInstr(env, ARMInstr_Alu(ARMalu_XOR, r_res, hregARM_R0(), one));
- /* And be conservative -- mask off all but the lowest bit */
- addInstr(env, ARMInstr_Alu(ARMalu_AND, r_res, r_res, one));
- return;
- }
-
break;
}
@@ -2384,6 +2341,60 @@
break;
}
+ /* --------- Load Linked and Store Conditional --------- */
+ case Ist_LLSC: {
+ if (stmt->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ IRTemp res = stmt->Ist.LLSC.result;
+ IRType ty = typeOfIRTemp(env->type_env, res);
+ if (ty == Ity_I32 || ty == Ity_I8) {
+ Int szB = 0;
+ HReg r_dst = lookupIRTemp(env, res);
+ HReg raddr = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+ switch (ty) {
+ case Ity_I8: szB = 1; break;
+ case Ity_I32: szB = 4; break;
+ default: vassert(0);
+ }
+ addInstr(env, mk_iMOVds_RR(hregARM_R1(), raddr));
+ addInstr(env, ARMInstr_LdrEX(szB));
+ addInstr(env, mk_iMOVds_RR(r_dst, hregARM_R0()));
+ return;
+ }
+ /* else fall thru; is unhandled */
+ } else {
+ /* SC */
+ IRTemp res = stmt->Ist.LLSC.result;
+ IRType ty = typeOfIRTemp(env->type_env, res);
+ IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.storedata);
+ vassert(ty == Ity_I1);
+ if (tyd == Ity_I32 || tyd == Ity_I8) {
+ Int szB = 0;
+ HReg r_res = lookupIRTemp(env, res);
+ HReg rD = iselIntExpr_R(env, stmt->Ist.LLSC.storedata);
+ HReg rA = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+ ARMRI84* one = ARMRI84_I84(1,0);
+ switch (tyd) {
+ case Ity_I8: szB = 1; break;
+ case Ity_I32: szB = 4; break;
+ default: vassert(0);
+ }
+ addInstr(env, mk_iMOVds_RR(hregARM_R1(), rD));
+ addInstr(env, mk_iMOVds_RR(hregARM_R2(), rA));
+ addInstr(env, ARMInstr_StrEX(szB));
+ /* now r0 is 1 if failed, 0 if success. Change to IR
+ conventions (0 is fail, 1 is success). Also transfer
+ result to r_res. */
+ addInstr(env, ARMInstr_Alu(ARMalu_XOR, r_res, hregARM_R0(), one));
+ /* And be conservative -- mask off all but the lowest bit */
+ addInstr(env, ARMInstr_Alu(ARMalu_AND, r_res, r_res, one));
+ return;
+ }
+ /* else fall thru; is unhandled */
+ }
+ break;
+ }
+
/* --------- INSTR MARK --------- */
/* Doesn't generate any executable code ... */
case Ist_IMark:
Modified: branches/ARM/priv/host_ppc_isel.c
===================================================================
--- branches/ARM/priv/host_ppc_isel.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/host_ppc_isel.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -1169,32 +1169,14 @@
/* --------- LOAD --------- */
case Iex_Load: {
- HReg r_dst;
-
+ HReg r_dst;
+ PPCAMode* am_addr;
if (e->Iex.Load.end != Iend_BE)
goto irreducible;
-
- r_dst = newVRegI(env);
-
- if (e->Iex.Load.isLL) {
- /* lwarx or ldarx. Be simple; force address into a register. */
- HReg r_addr = iselWordExpr_R( env, e->Iex.Load.addr );
- if (ty == Ity_I32) {
- addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
- }
- else if (ty == Ity_I64 && mode64) {
- addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
- }
- else
- goto irreducible;
- } else {
- /* Normal load; use whatever amodes we can. */
- PPCAMode* am_addr
- = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/ );
- addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
- r_dst, am_addr, mode64 ));
- }
-
+ r_dst = newVRegI(env);
+ am_addr = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/ );
+ addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
+ r_dst, am_addr, mode64 ));
return r_dst;
/*NOTREACHED*/
}
@@ -1551,7 +1533,7 @@
DECLARE_PATTERN(p_LDbe16_then_16Uto32);
DEFINE_PATTERN(p_LDbe16_then_16Uto32,
unop(Iop_16Uto32,
- IRExpr_Load(False,Iend_BE,Ity_I16,bind(0))) );
+ IRExpr_Load(Iend_BE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
HReg r_dst = newVRegI(env);
PPCAMode* amode
@@ -2609,7 +2591,7 @@
vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
/* 64-bit load */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
@@ -2967,7 +2949,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg r_dst = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -3115,7 +3097,7 @@
}
/* --------- LOAD --------- */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg r_dst = newVRegF(env);
PPCAMode* am_addr;
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3366,7 +3348,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg v_dst = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_V128);
@@ -3770,7 +3752,6 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
if (end != Iend_BE)
goto stmt_fail;
@@ -3779,34 +3760,6 @@
if (mode64 && (tya != Ity_I64))
goto stmt_fail;
- if (resSC != IRTemp_INVALID) {
- /* deal with store-conditional */
- HReg r_res = lookupIRTemp(env, resSC);
- HReg r_a = iselWordExpr_R(env, stmt->Ist.Store.addr);
- HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
- HReg r_tmp = newVRegI(env);
- if (tyd == Ity_I32 || (tyd == Ity_I64 && mode64)) {
- addInstr(env, PPCInstr_StoreC( tyd==Ity_I32 ? 4 : 8,
- r_a, r_src, mode64 ));
- addInstr(env, PPCInstr_MfCR( r_tmp ));
- addInstr(env, PPCInstr_Shft(
- Pshft_SHR,
- env->mode64 ? False : True/*F:64-bit, T:32-bit shift*/,
- r_tmp, r_tmp,
- PPCRH_Imm(False/*unsigned*/, 29)));
- /* Probably unnecessary, since the IR dest type is Ity_I1,
- and so we are entitled to leave whatever junk we like
- drifting round in the upper 31 or 63 bits of r_res.
- However, for the sake of conservativeness .. */
- addInstr(env, PPCInstr_Alu(
- Palu_AND,
- r_res, r_tmp,
- PPCRH_Imm(False/*signed*/, 1)));
- return;
- }
- goto stmt_fail;
- }
-
if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
(mode64 && (tyd == Ity_I64))) {
PPCAMode* am_addr
@@ -3979,6 +3932,67 @@
break;
}
+ /* --------- Load Linked or Store Conditional --------- */
+ case Ist_LLSC: {
+ IRTemp res = stmt->Ist.LLSC.result;
+ IRType tyRes = typeOfIRTemp(env->type_env, res);
+ IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
+
+ if (stmt->Ist.LLSC.end != Iend_BE)
+ goto stmt_fail;
+ if (!mode64 && (tyAddr != Ity_I32))
+ goto stmt_fail;
+ if (mode64 && (tyAddr != Ity_I64))
+ goto stmt_fail;
+
+ if (stmt->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ HReg r_addr = iselWordExpr_R( env, stmt->Ist.LLSC.addr );
+ HReg r_dst = lookupIRTemp(env, res);
+ if (tyRes == Ity_I32) {
+ addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
+ return;
+ }
+ if (tyRes == Ity_I64 && mode64) {
+ addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
+ return;
+ }
+ /* fallthru */;
+ } else {
+ /* SC */
+ HReg r_res = lookupIRTemp(env, res); /* :: Ity_I1 */
+ HReg r_a = iselWordExpr_R(env, stmt->Ist.LLSC.addr);
+ HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata);
+ HReg r_tmp = newVRegI(env);
+ IRType tyData = typeOfIRExpr(env->type_env,
+ stmt->Ist.LLSC.storedata);
+ vassert(tyRes == Ity_I1);
+ if (tyData == Ity_I32 || (tyData == Ity_I64 && mode64)) {
+ addInstr(env, PPCInstr_StoreC( tyData==Ity_I32 ? 4 : 8,
+ r_a, r_src, mode64 ));
+ addInstr(env, PPCInstr_MfCR( r_tmp ));
+ addInstr(env, PPCInstr_Shft(
+ Pshft_SHR,
+ env->mode64 ? False : True
+ /*F:64-bit, T:32-bit shift*/,
+ r_tmp, r_tmp,
+ PPCRH_Imm(False/*unsigned*/, 29)));
+ /* Probably unnecessary, since the IR dest type is Ity_I1,
+ and so we are entitled to leave whatever junk we like
+ drifting round in the upper 31 or 63 bits of r_res.
+ However, for the sake of conservativeness .. */
+ addInstr(env, PPCInstr_Alu(
+ Palu_AND,
+ r_res, r_tmp,
+ PPCRH_Imm(False/*signed*/, 1)));
+ return;
+ }
+ /* fallthru */
+ }
+ goto stmt_fail;
+ /*NOTREACHED*/
+ }
+
/* --------- Call to DIRTY helper --------- */
case Ist_Dirty: {
IRType retty;
Modified: branches/ARM/priv/host_x86_isel.c
===================================================================
--- branches/ARM/priv/host_x86_isel.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/host_x86_isel.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -763,8 +763,6 @@
/* We can't handle big-endian loads, nor load-linked. */
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
- if (e->Iex.Load.isLL)
- goto irreducible;
if (ty == Ity_I32) {
addInstr(env, X86Instr_Alu32R(Xalu_MOV,
@@ -1070,7 +1068,7 @@
DECLARE_PATTERN(p_LDle8_then_8Uto32);
DEFINE_PATTERN(p_LDle8_then_8Uto32,
unop(Iop_8Uto32,
- IRExpr_Load(False,Iend_LE,Ity_I8,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
if (matchIRExpr(&mi,p_LDle8_then_8Uto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1084,7 +1082,7 @@
DECLARE_PATTERN(p_LDle8_then_8Sto32);
DEFINE_PATTERN(p_LDle8_then_8Sto32,
unop(Iop_8Sto32,
- IRExpr_Load(False,Iend_LE,Ity_I8,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
if (matchIRExpr(&mi,p_LDle8_then_8Sto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1098,7 +1096,7 @@
DECLARE_PATTERN(p_LDle16_then_16Uto32);
DEFINE_PATTERN(p_LDle16_then_16Uto32,
unop(Iop_16Uto32,
- IRExpr_Load(False,Iend_LE,Ity_I16,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDle16_then_16Uto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1537,7 +1535,7 @@
/* special case: 32-bit load from memory */
if (e->tag == Iex_Load && ty == Ity_I32
- && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ && e->Iex.Load.end == Iend_LE) {
X86AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
return X86RMI_Mem(am);
}
@@ -1956,7 +1954,7 @@
}
/* 64-bit load */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg tLo, tHi;
X86AMode *am0, *am4;
vassert(e->Iex.Load.ty == Ity_I64);
@@ -2744,7 +2742,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
X86AMode* am;
HReg res = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2868,7 +2866,7 @@
return freg;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
X86AMode* am;
HReg res = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3120,7 +3118,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg dst = newVRegV(env);
X86AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
addInstr(env, X86Instr_SseLdSt( True/*load*/, dst, am ));
@@ -3141,7 +3139,7 @@
DECLARE_PATTERN(p_zwiden_load64);
DEFINE_PATTERN(p_zwiden_load64,
unop(Iop_64UtoV128,
- IRExpr_Load(False,Iend_LE,Ity_I64,bind(0))));
+ IRExpr_Load(Iend_LE,Ity_I64,bind(0))));
if (matchIRExpr(&mi, p_zwiden_load64, e)) {
X86AMode* am = iselIntExpr_AMode(env, mi.bindee[0]);
HReg dst = newVRegV(env);
@@ -3609,9 +3607,8 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I32 || end != Iend_LE || resSC != IRTemp_INVALID)
+ if (tya != Ity_I32 || end != Iend_LE)
goto stmt_fail;
if (tyd == Ity_I32) {
Modified: branches/ARM/priv/ir_defs.c
===================================================================
--- branches/ARM/priv/ir_defs.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/ir_defs.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -658,8 +658,7 @@
vex_printf( ")" );
break;
case Iex_Load:
- vex_printf( "LD%s%s:", e->Iex.Load.end==Iend_LE ? "le" : "be",
- e->Iex.Load.isLL ? "-LL" : "" );
+ vex_printf( "LD%s:", e->Iex.Load.end==Iend_LE ? "le" : "be" );
ppIRType(e->Iex.Load.ty);
vex_printf( "(" );
ppIRExpr(e->Iex.Load.addr);
@@ -839,20 +838,31 @@
ppIRExpr(s->Ist.WrTmp.data);
break;
case Ist_Store:
- if (s->Ist.Store.resSC != IRTemp_INVALID) {
- ppIRTemp(s->Ist.Store.resSC);
- vex_printf( " = SC( " );
- }
vex_printf( "ST%s(", s->Ist.Store.end==Iend_LE ? "le" : "be" );
ppIRExpr(s->Ist.Store.addr);
vex_printf( ") = ");
ppIRExpr(s->Ist.Store.data);
- if (s->Ist.Store.resSC != IRTemp_INVALID)
- vex_printf( " )" );
break;
case Ist_CAS:
ppIRCAS(s->Ist.CAS.details);
break;
+ case Ist_LLSC:
+ if (s->Ist.LLSC.storedata == NULL) {
+ ppIRTemp(s->Ist.LLSC.result);
+ vex_printf(" = LD%s-Linked(",
+ s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+ ppIRExpr(s->Ist.LLSC.addr);
+ vex_printf(")");
+ } else {
+ ppIRTemp(s->Ist.LLSC.result);
+ vex_printf(" = ( ST%s-Cond(",
+ s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+ ppIRExpr(s->Ist.LLSC.addr);
+ vex_printf(") = ");
+ ppIRExpr(s->Ist.LLSC.storedata);
+ vex_printf(" )");
+ }
+ break;
case Ist_Dirty:
ppIRDirty(s->Ist.Dirty.details);
break;
@@ -1071,10 +1081,9 @@
e->Iex.Unop.arg = arg;
return e;
}
-IRExpr* IRExpr_Load ( Bool isLL, IREndness end, IRType ty, IRExpr* addr ) {
+IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) {
IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
e->tag = Iex_Load;
- e->Iex.Load.isLL = isLL;
e->Iex.Load.end = end;
e->Iex.Load.ty = ty;
e->Iex.Load.addr = addr;
@@ -1267,14 +1276,12 @@
s->Ist.WrTmp.data = data;
return s;
}
-IRStmt* IRStmt_Store ( IREndness end,
- IRTemp resSC, IRExpr* addr, IRExpr* data ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
- s->tag = Ist_Store;
- s->Ist.Store.end = end;
- s->Ist.Store.resSC = resSC;
- s->Ist.Store.addr = addr;
- s->Ist.Store.data = data;
+IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) {
+ IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ s->tag = Ist_Store;
+ s->Ist.Store.end = end;
+ s->Ist.Store.addr = addr;
+ s->Ist.Store.data = data;
vassert(end == Iend_LE || end == Iend_BE);
return s;
}
@@ -1284,6 +1291,16 @@
s->Ist.CAS.details = cas;
return s;
}
+IRStmt* IRStmt_LLSC ( IREndness end,
+ IRTemp result, IRExpr* addr, IRExpr* storedata ) {
+ IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ s->tag = Ist_LLSC;
+ s->Ist.LLSC.end = end;
+ s->Ist.LLSC.result = result;
+ s->Ist.LLSC.addr = addr;
+ s->Ist.LLSC.storedata = storedata;
+ return s;
+}
IRStmt* IRStmt_Dirty ( IRDirty* d )
{
IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
@@ -1428,8 +1445,7 @@
return IRExpr_Unop(e->Iex.Unop.op,
deepCopyIRExpr(e->Iex.Unop.arg));
case Iex_Load:
- return IRExpr_Load(e->Iex.Load.isLL,
- e->Iex.Load.end,
+ return IRExpr_Load(e->Iex.Load.end,
e->Iex.Load.ty,
deepCopyIRExpr(e->Iex.Load.addr));
case Iex_Const:
@@ -1500,11 +1516,17 @@
deepCopyIRExpr(s->Ist.WrTmp.data));
case Ist_Store:
return IRStmt_Store(s->Ist.Store.end,
- s->Ist.Store.resSC,
deepCopyIRExpr(s->Ist.Store.addr),
deepCopyIRExpr(s->Ist.Store.data));
case Ist_CAS:
return IRStmt_CAS(deepCopyIRCAS(s->Ist.CAS.details));
+ case Ist_LLSC:
+ return IRStmt_LLSC(s->Ist.LLSC.end,
+ s->Ist.LLSC.result,
+ deepCopyIRExpr(s->Ist.LLSC.addr),
+ s->Ist.LLSC.storedata
+ ? deepCopyIRExpr(s->Ist.LLSC.storedata)
+ : NULL);
case Ist_Dirty:
return IRStmt_Dirty(deepCopyIRDirty(s->Ist.Dirty.details));
case Ist_MBE:
@@ -2162,6 +2184,10 @@
&& isIRAtom(cas->expdLo)
&& (cas->dataHi ? isIRAtom(cas->dataHi) : True)
&& isIRAtom(cas->dataLo) );
+ case Ist_LLSC:
+ return toBool( isIRAtom(st->Ist.LLSC.addr)
+ && (st->Ist.LLSC.storedata
+ ? isIRAtom(st->Ist.LLSC.storedata) : True) );
case Ist_Dirty:
di = st->Ist.Dirty.details;
if (!isIRAtom(di->guard))
@@ -2353,6 +2379,11 @@
useBeforeDef_Expr(bb,stmt,cas->dataHi,def_counts);
useBeforeDef_Expr(bb,stmt,cas->dataLo,def_counts);
break;
+ case Ist_LLSC:
+ useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.addr,def_counts);
+ if (stmt->Ist.LLSC.storedata != NULL)
+ useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.storedata,def_counts);
+ break;
case Ist_Dirty:
d = stmt->Ist.Dirty.details;
for (i = 0; d->args[i] != NULL; i++)
@@ -2630,9 +2661,6 @@
sanityCheckFail(bb,stmt,"IRStmt.Store.data: cannot Store :: Ity_I1");
if (stmt->Ist.Store.end != Iend_LE && stmt->Ist.Store.end != Iend_BE)
sanityCheckFail(bb,stmt,"Ist.Store.end: bogus endianness");
- if (stmt->Ist.Store.resSC != IRTemp_INVALID
- && typeOfIRTemp(tyenv, stmt->Ist.Store.resSC) != Ity_I1)
- sanityCheckFail(bb,stmt,"Ist.Store.resSC: not :: Ity_I1");
break;
case Ist_CAS:
cas = stmt->Ist.CAS.details;
@@ -2684,6 +2712,27 @@
bad_cas:
sanityCheckFail(bb,stmt,"IRStmt.CAS: ill-formed");
break;
+ case Ist_LLSC: {
+ IRType tyRes;
+ if (typeOfIRExpr(tyenv, stmt->Ist.LLSC.addr) != gWordTy)
+ sanityCheckFail(bb,stmt,"IRStmt.LLSC.addr: not :: guest word type");
+ if (stmt->Ist.LLSC.end != Iend_LE && stmt->Ist.LLSC.end != Iend_BE)
+ sanityCheckFail(bb,stmt,"Ist.LLSC.end: bogus endianness");
+ tyRes = typeOfIRTemp(tyenv, stmt->Ist.LLSC.result);
+ if (stmt->Ist.LLSC.storedata == NULL) {
+ /* it's a LL */
+ if (tyRes != Ity_I64 && tyRes != Ity_I32 && tyRes != Ity_I8)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(LL).result :: bogus");
+ } else {
+ /* it's a SC */
+ if (tyRes != Ity_I1)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(SC).result: not :: Ity_I1");
+ tyData = typeOfIRExpr(tyenv, stmt->Ist.LLSC.storedata);
+ if (tyData != Ity_I64 && tyData != Ity_I32 &&tyData != Ity_I8)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(SC).result :: storedata bogus");
+ }
+ break;
+ }
case Ist_Dirty:
/* Mostly check for various kinds of ill-formed dirty calls. */
d = stmt->Ist.Dirty.details;
@@ -2814,17 +2863,6 @@
"IRStmt.Tmp: destination tmp is assigned more than once");
break;
case Ist_Store:
- if (stmt->Ist.Store.resSC != IRTemp_INVALID) {
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (resSC < 0 || resSC >= n_temps)
- sanityCheckFail(bb, stmt,
- "IRStmt.Store.resSC: destination tmp is out of range");
- def_counts[resSC]++;
- if (def_counts[resSC] > 1)
- sanityCheckFail(bb, stmt,
- "IRStmt.Store.resSC: destination tmp "
- "is assigned more than once");
- }
break;
case Ist_Dirty:
if (stmt->Ist.Dirty.details->tmp != IRTemp_INVALID) {
@@ -2840,7 +2878,6 @@
break;
case Ist_CAS:
cas = stmt->Ist.CAS.details;
-
if (cas->oldHi != IRTemp_INVALID) {
if (cas->oldHi < 0 || cas->oldHi >= n_temps)
sanityCheckFail(bb, stmt,
@@ -2851,16 +2888,25 @@
"IRStmt.CAS: destination tmpHi is assigned more than once");
}
if (cas->oldLo < 0 || cas->oldLo >= n_temps)
- sanityCheckFail(bb, stmt,
- "IRStmt.CAS: destination tmpLo is out of range");
- def_counts[cas->oldLo]++;
- if (def_counts[cas->oldLo] > 1)
- sanityCheckFail(bb, stmt,
- "IRStmt.CAS: destination tmpLo is assigned more than once");
- break;
+ sanityCheckFail(bb, stmt,
+ "IRStmt.CAS: destination tmpLo is out of range");
+ def_counts[cas->oldLo]++;
+ if (def_counts[cas->oldLo] > 1)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.CAS: destination tmpLo is assigned more than once");
+ break;
+ case Ist_LLSC:
+ if (stmt->Ist.LLSC.result < 0 || stmt->Ist.LLSC.result >= n_temps)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.LLSC: destination tmp is out of range");
+ def_counts[stmt->Ist.LLSC.result]++;
+ if (def_counts[stmt->Ist.LLSC.result] > 1)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.LLSC: destination tmp is assigned more than once");
+ break;
default:
- /* explicitly handle the rest, so as to keep gcc quiet */
- break;
+ /* explicitly handle the rest, so as to keep gcc quiet */
+ break;
}
}
Modified: branches/ARM/priv/ir_match.c
===================================================================
--- branches/ARM/priv/ir_match.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/ir_match.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -90,7 +90,6 @@
return True;
case Iex_Load:
if (e->tag != Iex_Load) return False;
- if (p->Iex.Load.isLL != e->Iex.Load.isLL) return False;
if (p->Iex.Load.end != e->Iex.Load.end) return False;
if (p->Iex.Load.ty != e->Iex.Load.ty) return False;
if (!matchWrk(mi, p->Iex.Load.addr, e->Iex.Load.addr))
Modified: branches/ARM/priv/ir_opt.c
===================================================================
--- branches/ARM/priv/ir_opt.c 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/priv/ir_opt.c 2009-11-27 10:07:21 UTC (rev 1932)
@@ -334,8 +334,7 @@
case Iex_Load:
t1 = newIRTemp(bb->tyenv, ty);
addStmtToIRSB(bb, IRStmt_WrTmp(t1,
- IRExpr_Load(ex->Iex.Load.isLL,
- ex->Iex.Load.end,
+ IRExpr_Load(ex->Iex.Load.end,
ex->Iex.Load.ty,
flatten_Expr(bb, ex->Iex.Load.addr))));
return IRExpr_RdTmp(t1);
@@ -426,8 +425,7 @@
case Ist_Store:
e1 = flatten_Expr(bb, st->Ist.Store.addr);
e2 = flatten_Expr(bb, st->Ist.Store.data);
- addStmtToIRSB(bb, IRStmt_Store(st->Ist.Store.end,
- st->Ist.Store.resSC, e1,e2));
+ addStmtToIRSB(bb, IRStmt_Store(st->Ist.Store.end, e1,e2));
break;
case Ist_CAS:
cas = st->Ist.CAS.details;
@@ -440,6 +438,14 @@
e1, e2, e3, e4, e5 );
addStmtToIRSB(bb, IRStmt_CAS(cas2));
break;
+ case Ist_LLSC:
+ e1 = flatten_Expr(bb, st->Ist.LLSC.addr);
+ e2 = st->Ist.LLSC.storedata
+ ? flatten_Expr(bb, st->Ist.LLSC.storedata)
+ : NULL;
+ addStmtToIRSB(bb, IRStmt_LLSC(st->Ist.LLSC.end,
+ st->Ist.LLSC.result, e1, e2));
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
d2 = emptyIRDirty();
@@ -724,7 +730,7 @@
enough do a lot better if needed. */
/* Probably also overly-conservative, but also dump everything
if we hit a memory bus event (fence, lock, unlock). Ditto
- AbiHints and CASs. */
+ AbiHints, CASs, LLs and SCs. */
case Ist_AbiHint:
vassert(isIRAtom(st->Ist.AbiHint.base));
vassert(isIRAtom(st->Ist.AbiHint.nia));
@@ -732,6 +738,7 @@
case Ist_MBE:
case Ist_Dirty:
case Ist_CAS:
+ case Ist_LLSC:
for (j = 0; j < env->used; j++)
env->inuse[j] = False;
break;
@@ -1706,7 +1713,6 @@
case Iex_Load:
vassert(isIRAtom(ex->Iex.Load.addr));
return IRExpr_Load(
- ex->Iex.Load.isLL,
ex->Iex.Load.end,
ex->Iex.Load.ty,
subst_Expr(env, ex->Iex.Load.addr)
@@ -1795,7 +1801,6 @@
vassert(isIRAtom(st->Ist.Store.data));
return IRStmt_Store(
st->Ist.Store.end,
- st->Ist.Store.resSC,
fold_Expr(subst_Expr(env, st->Ist.Store.addr)),
fold_Expr(subst_Expr(env, st->Ist.Store.data))
);
@@ -1819,6 +1824,19 @@
return IRStmt_CAS(cas2);
}
+ case Ist_LLSC:
+ vassert(isIRAtom(st->Ist.LLSC.addr));
+ if (st->Ist.LLSC.storedata)
+ vassert(isIRAtom(st->Ist.LLSC.storedata));
+ return IRStmt_LLSC(
+ st->Ist.LLSC.end,
+ st->Ist.LLSC.result,
+ fold_Expr(subst_Expr(env, st->Ist.LLSC.addr)),
+ st->Ist.LLSC.storedata
+ ? fold_Expr(subst_Expr(env, st->Ist.LLSC.storedata))
+ : NULL
+ );
+
case Ist_Dirty: {
Int i;
IRDirty *d, *d2;
@@ -2054,6 +2072,11 @@
addUses_Expr(set, cas->dataHi);
addUses_Expr(set, cas->dataLo);
return;
+ case Ist_LLSC:
+ addUses_Expr(set, st->Ist.LLSC.addr);
+ if (st->Ist.LLSC.storedata)
+ addUses_Expr(set, st->Ist.LLSC.storedata);
+ return;
case Ist_Dirty:
d = st->Ist.Dirty.details;
if (d->mFx != Ifx_None)
@@ -2640,7 +2663,8 @@
to do the no-overlap assessments needed for Put/PutI.
*/
switch (st->tag) {
- case Ist_Dirty: case Ist_Store: case Ist_MBE: case Ist_CAS:
+ case Ist_Dirty: case Ist_Store: case Ist_MBE:
+ case Ist_CAS: case Ist_LLSC:
paranoia = 2; break;
case Ist_Put: case Ist_PutI:
paranoia = 1; break;
@@ -3331,8 +3355,6 @@
deltaIRExpr(st->Ist.Exit.guard, delta);
break;
case Ist_Store:
- if (st->Ist.Store.resSC != IRTemp_INVALID)
- st->Ist.Store.resSC += delta;
deltaIRExpr(st->Ist.Store.addr, delta);
deltaIRExpr(st->Ist.Store.data, delta);
break;
@@ -3348,6 +3370,12 @@
deltaIRExpr(st->Ist.CAS.details->dataHi, delta);
deltaIRExpr(st->Ist.CAS.details->dataLo, delta);
break;
+ case Ist_LLSC:
+ st->Ist.LLSC.result += delta;
+ deltaIRExpr(st->Ist.LLSC.addr, delta);
+ if (st->Ist.LLSC.storedata)
+ deltaIRExpr(st->Ist.LLSC.storedata, delta);
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
deltaIRExpr(d->guard, delta);
@@ -3812,6 +3840,11 @@
aoccCount_Expr(uses, cas->dataHi);
aoccCount_Expr(uses, cas->dataLo);
return;
+ case Ist_LLSC:
+ aoccCount_Expr(uses, st->Ist.LLSC.addr);
+ if (st->Ist.LLSC.storedata)
+ aoccCount_Expr(uses, st->Ist.LLSC.storedata);
+ return;
case Ist_Dirty:
d = st->Ist.Dirty.details;
if (d->mFx != Ifx_None)
@@ -4012,7 +4045,6 @@
);
case Iex_Load:
return IRExpr_Load(
- e->Iex.Load.isLL,
e->Iex.Load.end,
e->Iex.Load.ty,
atbSubst_Expr(env, e->Iex.Load.addr)
@@ -4049,7 +4081,6 @@
case Ist_Store:
return IRStmt_Store(
st->Ist.Store.end,
- st->Ist.Store.resSC,
atbSubst_Expr(env, st->Ist.Store.addr),
atbSubst_Expr(env, st->Ist.Store.data)
);
@@ -4094,6 +4125,14 @@
atbSubst_Expr(env, cas->dataLo)
);
return IRStmt_CAS(cas2);
+ case Ist_LLSC:
+ return IRStmt_LLSC(
+ st->Ist.LLSC.end,
+ st->Ist.LLSC.result,
+ atbSubst_Expr(env, st->Ist.LLSC.addr),
+ st->Ist.LLSC.storedata
+ ? atbSubst_Expr(env, st->Ist.LLSC.storedata) : NULL
+ );
case Ist_Dirty:
d = st->Ist.Dirty.details;
d2 = emptyIRDirty();
@@ -4235,15 +4274,13 @@
/* be True if this stmt writes memory or might do (==> we don't
want to reorder other loads or stores relative to it). Also,
- a load-linked falls under this classification, since we
+ both LL and SC fall under this classification, since we
really ought to be conservative and not reorder any other
- memory transactions relative to it. */
+ memory transactions relative to them. */
stmtStores
= toBool( st->tag == Ist_Store
- || (st->tag == Ist_WrTmp
- && st->Ist.WrTmp.data->tag == Iex_Load
- && st->Ist.WrTmp.data->Iex.Load.isLL)
- || st->tag == Ist_Dirty );
+ || st->tag == Ist_Dirty
+ || st->tag == Ist_LLSC );
for (k = A_NENV-1; k >= 0; k--) {
if (env[k].bindee == NULL)
@@ -4434,6 +4471,11 @@
vassert(cas->dataHi == NULL || isIRAtom(cas->dataHi));
vassert(isIRAtom(cas->dataLo));
break;
+ case Ist_LLSC:
+ vassert(isIRAtom(st->Ist.LLSC.addr));
+ if (st->Ist.LLSC.storedata)
+ vassert(isIRAtom(st->Ist.LLSC.storedata));
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
vassert(isIRAtom(d->guard));
@@ -4452,7 +4494,7 @@
default:
bad:
ppIRStmt(st);
- vpanic("hasGetIorPutI");
+ vpanic("considerExpensives");
}
}
}
Modified: branches/ARM/pub/libvex_ir.h
===================================================================
--- branches/ARM/pub/libvex_ir.h 2009-11-26 17:43:09 UTC (rev 1931)
+++ branches/ARM/pub/libvex_ir.h 2009-11-27 10:07:21 UTC (rev 1932)
@@ -1073,20 +1073,13 @@
IRExpr* arg; /* operand */
} Unop;
- /* A load from memory. If .isLL is True then this load also
- lodges a reservation (ppc-style lwarx/ldarx operation). If
- .isLL is True, then also, the address must be naturally
- aligned - any misaligned addresses should be caught by a
- dominating IR check and side exit. This alignment
- restriction exists because on at least some LL/SC platforms
- (ppc), lwarx etc will trap w/ SIGBUS on misaligned addresses,
- and we have to actually generate lwarx on the host, and we
- don't want it trapping on the host.
-
+ /* A load from memory -- a normal load, not a load-linked.
+ Load-Linkeds (and Store-Conditionals) are instead represented
+ by IRStmt.LLSC since Load-Linkeds have side effects and so
+ are not semantically valid IRExpr's.
ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
*/
struct {
- Bool isLL; /* True iff load makes a reservation */
IREndness end; /* Endian-ness of the load */
IRType ty; /* Type of the loaded value */
IRExpr* addr; /* Address being loaded from */
@@ -1170,8 +1163,7 @@
IRExpr* arg2, IRExpr* arg3 );
extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 );
extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg );
-extern IRExpr* IRExpr_Load ( Bool isLL, IREndness end,
- IRType ty, IRExpr* addr );
+extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr );
extern IRExpr* IRExpr_Const ( IRConst* con );
extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX );
@@ -1512,6 +1504,7 @@
Ist_WrTmp,
Ist_Store,
Ist_CAS,
+ Ist_LLSC,
Ist_Dirty,
Ist_MBE, /* META (maybe) */
Ist_Exit
@@ -1607,28 +1600,13 @@
IRExpr* data; /* Expression (RHS of assignment) */
} WrTmp;
- /* Write a value to memory. Normally scRes is
- IRTemp_INVALID, denoting a normal store. If scRes is not
- IRTemp_INVALID, then this is a store-conditional, which
- may fail or succeed depending on the outcome of a
- previously lodged reservation on this address. scRes is
- set to 1 if the store succeeds and 0 if it fails, and
- must have type Ity_I1.
-
- If scRes is not IRTemp_INVALID, then also, the address
- must be naturally aligned - any misaligned addresses
- should be caught by a dominating IR check and side exit.
- This alignment restriction exists because on at least some
- LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
- misaligned addresses, and we have to actually generate
- stwcx. on the host, and we don't want it trapping on the
- host.
-
+ /* Write a value to memory. This is a normal store, not a
+ Store-Conditional. To represent a Store-Conditional,
+ instead use IRStmt.LLSC.
ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
*/
struct {
IREndness end; /* Endianness of the store */
- IRTemp resSC; /* result of SC goes here (1 == success) */
IRExpr* addr; /* store address */
IRExpr* data; /* value to write */
} Store;
@@ -1651,6 +1629,57 @@
IRCAS* details;
} CAS;
+ /* Either Load-Linked or Store-Conditional, depending on
+ STOREDATA.
+
+ If STOREDATA is NULL then this is a Load-Linked, meaning
+ that data is loaded from memory as normal, but a
+ 'reservation' for the address is also lodged in the
+ hardware.
+
+ result = Load-Linked(addr, end)
+
+ The data transfer type is the type of RESULT (I32, I64,
+ etc). ppIRStmt output:
+
+ result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
+
+ If STOREDATA is not NULL then this is a Store-Conditional,
+ hence:
+
+ result = Store-Conditional(addr, storedata, end)
+
+ The data transfer type is the type of STOREDATA and RESULT
+ has type Ity_I1. The store may fail or succeed depending
+ on the state of a previously lodged reservation on this
+ address. RESULT is written 1 if the store succeeds and 0
+ if it fails. eg ppIRStmt output:
+
+ result = ( ST<end>-Cond(<addr>) = <storedata> )
+ eg t3 = ( STbe-Cond(t1, t2) )
+
+ In all cases, the address must be naturally aligned for
+ the transfer type -- any misaligned addresses should be
+ caught by a dominating IR check and side exit. This
+ alignment restriction exists because on at least some
+ LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
+ misaligned addresses, and we have to actually generate
+ stwcx. on the host, and we don't want it trapping on the
+ host.
+
+ Summary of rules for transfer type:
+ STOREDATA == NULL (LL):
+ transfer type = type of RESULT
+ STOREDATA != NULL (SC):
+ transfer type = type of STOREDATA, and RESULT :: Ity_I1
+ */
+ struct {
+ IREndness end;
+ IRTemp result;
+ IRExpr* addr;
+ IRExpr* storedata; /* NULL => LL, non-NULL => SC */
+ } LLSC;
+
/* Call (possibly conditionally) a C function that has side
effects (ie. is "dirty"). See the comments above the
IRDirty type declaration for more information.
@@ -1697,9 +1726,10 @@
extern IRStmt* IRStmt_PutI ( IRRegArray* descr, IRExpr* ix, Int bias,
IRExpr* data );
extern IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data );
-extern IRStmt* IRStmt_Store ( IREndness end,
- IRTemp resSC, IRExpr* addr, IRExpr* data );
+extern IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data );
extern IRStmt* IRStmt_CAS ( IRCAS* details );
+extern IRStmt* IRStmt_LLSC ( IREndness end, IRTemp result,
+ IRExpr* addr, IRExpr* storedata );
extern IRStmt* IRStmt_Dirty ( IRDirty* details );
extern IRStmt* IRStmt_MBE ( IRMBusEvent event );
extern IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst );
|