You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
1
(1) |
2
(4) |
3
(3) |
4
(6) |
5
(14) |
6
(10) |
7
(4) |
|
8
(2) |
9
(4) |
10
(7) |
11
(8) |
12
(5) |
13
(11) |
14
(4) |
|
15
(4) |
16
(9) |
17
(6) |
18
|
19
|
20
|
21
|
|
22
(3) |
23
(1) |
24
(7) |
25
(12) |
26
(8) |
27
(13) |
28
(4) |
|
29
(3) |
30
(4) |
|
|
|
|
|
|
From: <sv...@va...> - 2009-11-26 17:43:26
|
Author: sewardj
Date: 2009-11-26 17:43:09 +0000 (Thu, 26 Nov 2009)
New Revision: 1931
Log:
Merge revs 1924:1929 from trunk (viz, resync to 1929).
Modified:
branches/ARM/priv/guest_amd64_helpers.c
branches/ARM/priv/host_amd64_defs.c
Modified: branches/ARM/priv/guest_amd64_helpers.c
===================================================================
--- branches/ARM/priv/guest_amd64_helpers.c 2009-11-26 17:17:37 UTC (rev 1930)
+++ branches/ARM/priv/guest_amd64_helpers.c 2009-11-26 17:43:09 UTC (rev 1931)
@@ -1014,6 +1014,16 @@
binop(Iop_Shl64,cc_dep1,mkU8(32))));
}
+ if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondS)) {
+ /* long sub/cmp, then S (negative) --> test (dst-src <s 0) */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpLT64S,
+ binop(Iop_Sub64,
+ binop(Iop_Shl64, cc_dep1, mkU8(32)),
+ binop(Iop_Shl64, cc_dep2, mkU8(32))),
+ mkU64(0)));
+ }
+
/*---------------- SUBW ----------------*/
if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondZ)) {
Modified: branches/ARM/priv/host_amd64_defs.c
===================================================================
--- branches/ARM/priv/host_amd64_defs.c 2009-11-26 17:17:37 UTC (rev 1930)
+++ branches/ARM/priv/host_amd64_defs.c 2009-11-26 17:43:09 UTC (rev 1931)
@@ -2335,10 +2335,26 @@
if (i->Ain.Alu64R.op == Aalu_MOV) {
switch (i->Ain.Alu64R.src->tag) {
case Armi_Imm:
- *p++ = toUChar(0x48 + (1 & iregBit3(i->Ain.Alu64R.dst)));
- *p++ = 0xC7;
- *p++ = toUChar(0xC0 + iregBits210(i->Ain.Alu64R.dst));
- p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+ if (0 == (i->Ain.Alu64R.src->Armi.Imm.imm32 & ~0xFFF)) {
+ /* Actually we could use this form for constants in
+ the range 0 through 0x7FFFFFFF inclusive, but
+ limit it to a small range for verifiability
+ purposes. */
+ /* Generate "movl $imm32, 32-bit-register" and let
+ the default zero-extend rule cause the upper half
+ of the dst to be zeroed out too. This saves 1
+ and sometimes 2 bytes compared to the more
+ obvious encoding in the 'else' branch. */
+ if (1 & iregBit3(i->Ain.Alu64R.dst))
+ *p++ = 0x41;
+ *p++ = 0xB8 + iregBits210(i->Ain.Alu64R.dst);
+ p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+ } else {
+ *p++ = toUChar(0x48 + (1 & iregBit3(i->Ain.Alu64R.dst)));
+ *p++ = 0xC7;
+ *p++ = toUChar(0xC0 + iregBits210(i->Ain.Alu64R.dst));
+ p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+ }
goto done;
case Armi_Reg:
*p++ = rexAMode_R( i->Ain.Alu64R.src->Armi.Reg.reg,
|
|
From: <sv...@va...> - 2009-11-26 17:20:33
|
Author: sewardj
Date: 2009-11-26 17:20:21 +0000 (Thu, 26 Nov 2009)
New Revision: 10957
Log:
Track vex r1930 (Change the IR representation of load linked and store
conditional.) Completes the fix of #215771.
Modified:
trunk/cachegrind/cg_main.c
trunk/callgrind/main.c
trunk/drd/drd_load_store.c
trunk/exp-ptrcheck/h_main.c
trunk/helgrind/hg_main.c
trunk/lackey/lk_main.c
trunk/massif/ms_main.c
trunk/memcheck/mc_translate.c
Modified: trunk/cachegrind/cg_main.c
===================================================================
--- trunk/cachegrind/cg_main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/cachegrind/cg_main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -1053,6 +1053,22 @@
break;
}
+ case Ist_LLSC: {
+ IRType dataTy;
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result);
+ addEvent_Dr( &cgs, curr_inode,
+ sizeofIRType(dataTy), st->Ist.LLSC.addr );
+ } else {
+ /* SC */
+ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata);
+ addEvent_Dw( &cgs, curr_inode,
+ sizeofIRType(dataTy), st->Ist.LLSC.addr );
+ }
+ break;
+ }
+
case Ist_Exit: {
/* Stuff to widen the guard expression to a host word, so
we can pass it to the branch predictor simulation
Modified: trunk/callgrind/main.c
===================================================================
--- trunk/callgrind/main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/callgrind/main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -657,14 +657,8 @@
static
void addConstMemStoreStmt( IRSB* bbOut, UWord addr, UInt val, IRType hWordTy)
{
- /* JRS 2009june01: re IRTemp_INVALID, am assuming that this
- function is used only to create instrumentation, and not to
- copy/reconstruct IRStmt_Stores that were in the incoming IR
- superblock. If that is not a correct assumption, then things
- will break badly on PowerPC, esp w/ threaded apps. */
addStmtToIRSB( bbOut,
IRStmt_Store(CLGEndness,
- IRTemp_INVALID,
IRExpr_Const(hWordTy == Ity_I32 ?
IRConst_U32( addr ) :
IRConst_U64( addr )),
@@ -864,8 +858,24 @@
addEvent_Dw( &clgs, curr_inode, dataSize, cas->addr );
break;
}
-
- case Ist_Exit: {
+
+ case Ist_LLSC: {
+ IRType dataTy;
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ dataTy = typeOfIRTemp(sbIn->tyenv, st->Ist.LLSC.result);
+ addEvent_Dr( &clgs, curr_inode,
+ sizeofIRType(dataTy), st->Ist.LLSC.addr );
+ } else {
+ /* SC */
+ dataTy = typeOfIRExpr(sbIn->tyenv, st->Ist.LLSC.storedata);
+ addEvent_Dw( &clgs, curr_inode,
+ sizeofIRType(dataTy), st->Ist.LLSC.addr );
+ }
+ break;
+ }
+
+ case Ist_Exit: {
UInt jmps_passed;
/* We may never reach the next statement, so need to flush
Modified: trunk/drd/drd_load_store.c
===================================================================
--- trunk/drd/drd_load_store.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/drd/drd_load_store.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -460,8 +460,7 @@
{
IRStmt* const st = bb_in->stmts[i];
tl_assert(st);
- if (st->tag == Ist_NoOp)
- continue;
+ tl_assert(isFlatIRStmt(st));
switch (st->tag)
{
@@ -489,8 +488,7 @@
break;
case Ist_Store:
- if (instrument && /* ignore stores resulting from st{d,w}cx. */
- st->Ist.Store.resSC == IRTemp_INVALID)
+ if (instrument)
{
instrument_store(bb,
st->Ist.Store.addr,
@@ -576,10 +574,42 @@
addStmtToIRSB(bb, st);
break;
- default:
+ case Ist_LLSC: {
+ /* Ignore store-conditionals, and handle load-linked's
+ exactly like normal loads. */
+ IRType dataTy;
+ if (st->Ist.LLSC.storedata == NULL)
+ {
+ /* LL */
+ dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result);
+ if (instrument) {
+ instrument_load(bb,
+ st->Ist.LLSC.addr,
+ sizeofIRType(dataTy));
+ }
+ }
+ else
+ {
+ /* SC */
+ /*ignore */
+ }
addStmtToIRSB(bb, st);
break;
}
+
+ case Ist_NoOp:
+ case Ist_AbiHint:
+ case Ist_Put:
+ case Ist_PutI:
+ case Ist_Exit:
+ /* None of these can contain any memory references. */
+ addStmtToIRSB(bb, st);
+ break;
+
+ default:
+ ppIRStmt(st);
+ tl_assert(0);
+ }
}
return bb;
Modified: trunk/exp-ptrcheck/h_main.c
===================================================================
--- trunk/exp-ptrcheck/h_main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/exp-ptrcheck/h_main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -4332,6 +4332,323 @@
}
}
+
+/* schemeS helper for doing stores, pulled out into a function because
+ it needs to handle both normal stores and store-conditionals.
+ Returns False if we see a case we don't know how to handle.
+*/
+static Bool schemeS_store ( PCEnv* pce,
+ IRExpr* data, IRExpr* addr, IRTemp resSC )
+{
+ /* We have: STle(addr) = data
+ if data is int-word sized, do
+ check_store4(addr, addr#, data, data#)
+ for all other stores
+ check_store{1,2}(addr, addr#, data)
+
+ The helper actually *does* the store, so that it can do the
+ post-hoc ugly hack of inspecting and "improving" the shadow data
+ after the store, in the case where it isn't an aligned word
+ store.
+
+ Only word-sized values are shadowed. If this is a
+ store-conditional, .resSC will denote a non-word-typed temp, and
+ so we don't need to shadow it. Assert about the type, tho.
+ However, since we're not re-emitting the original IRStmt_Store,
+ but rather doing it as part of the helper function, we need to
+ actually do a SC in the helper, and assign the result bit to
+ .resSC. Ugly.
+ */
+ IRType d_ty = typeOfIRExpr(pce->sb->tyenv, data);
+ IRExpr* addrv = schemeEw_Atom( pce, addr );
+ if (resSC != IRTemp_INVALID) {
+ tl_assert(typeOfIRTemp(pce->sb->tyenv, resSC) == Ity_I1);
+ /* viz, not something we want to shadow */
+ /* also, throw out all store-conditional cases that
+ we can't handle */
+ if (pce->gWordTy == Ity_I32 && d_ty != Ity_I32)
+ return False;
+ if (pce->gWordTy == Ity_I64 && d_ty != Ity_I32 && d_ty != Ity_I64)
+ return False;
+ }
+ if (pce->gWordTy == Ity_I32) {
+ /* ------ 32 bit host/guest (cough, cough) ------ */
+ switch (d_ty) {
+ /* Integer word case */
+ case Ity_I32: {
+ IRExpr* datav = schemeEw_Atom( pce, data );
+ if (resSC == IRTemp_INVALID) {
+ /* "normal" store */
+ gen_dirty_v_WWWW( pce,
+ &check_store4_P, "check_store4_P",
+ addr, addrv, data, datav );
+ } else {
+ /* store-conditional; need to snarf the success bit */
+ IRTemp resSC32
+ = gen_dirty_W_WWWW( pce,
+ &check_store4C_P,
+ "check_store4C_P",
+ addr, addrv, data, datav );
+ /* presumably resSC32 will really be Ity_I32. In
+ any case we'll get jumped by the IR sanity
+ checker if it's not, when it sees the
+ following statement. */
+ assign( 'I', pce, resSC, unop(Iop_32to1, mkexpr(resSC32)) );
+ }
+ break;
+ }
+ /* Integer subword cases */
+ case Ity_I16:
+ gen_dirty_v_WWW( pce,
+ &check_store2, "check_store2",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ break;
+ case Ity_I8:
+ gen_dirty_v_WWW( pce,
+ &check_store1, "check_store1",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ break;
+ /* 64-bit float. Pass store data in 2 32-bit pieces. */
+ case Ity_F64: {
+ IRAtom* d64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_ReinterpF64asI64, data) );
+ IRAtom* dLo32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64to32, d64) );
+ IRAtom* dHi32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64HIto32, d64) );
+ gen_dirty_v_WWWW( pce,
+ &check_store8_ms4B_ls4B,
+ "check_store8_ms4B_ls4B",
+ addr, addrv, dHi32, dLo32 );
+ break;
+ }
+ /* 32-bit float. We can just use _store4, but need
+ to futz with the argument type. */
+ case Ity_F32: {
+ IRAtom* i32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_ReinterpF32asI32,
+ data ) );
+ gen_dirty_v_WWW( pce,
+ &check_store4,
+ "check_store4",
+ addr, addrv, i32 );
+ break;
+ }
+ /* 64-bit int. Pass store data in 2 32-bit pieces. */
+ case Ity_I64: {
+ IRAtom* dLo32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64to32, data) );
+ IRAtom* dHi32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64HIto32, data) );
+ gen_dirty_v_WWWW( pce,
+ &check_store8_ms4B_ls4B,
+ "check_store8_ms4B_ls4B",
+ addr, addrv, dHi32, dLo32 );
+ break;
+ }
+ /* 128-bit vector. Pass store data in 4 32-bit pieces.
+ This is all very ugly and inefficient, but it is
+ hard to better without considerably complicating the
+ store-handling schemes. */
+ case Ity_V128: {
+ IRAtom* dHi64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_V128HIto64, data) );
+ IRAtom* dLo64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_V128to64, data) );
+ IRAtom* w3 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64HIto32, dHi64) );
+ IRAtom* w2 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64to32, dHi64) );
+ IRAtom* w1 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64HIto32, dLo64) );
+ IRAtom* w0 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_64to32, dLo64) );
+ gen_dirty_v_6W( pce,
+ &check_store16_ms4B_4B_4B_ls4B,
+ "check_store16_ms4B_4B_4B_ls4B",
+ addr, addrv, w3, w2, w1, w0 );
+ break;
+ }
+ default:
+ ppIRType(d_ty); tl_assert(0);
+ }
+ } else {
+ /* ------ 64 bit host/guest (cough, cough) ------ */
+ switch (d_ty) {
+ /* Integer word case */
+ case Ity_I64: {
+ IRExpr* datav = schemeEw_Atom( pce, data );
+ if (resSC == IRTemp_INVALID) {
+ /* "normal" store */
+ gen_dirty_v_WWWW( pce,
+ &check_store8_P, "check_store8_P",
+ addr, addrv, data, datav );
+ } else {
+ IRTemp resSC64
+ = gen_dirty_W_WWWW( pce,
+ &check_store8C_P,
+ "check_store8C_P",
+ addr, addrv, data, datav );
+ assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
+ }
+ break;
+ }
+ /* Integer subword cases */
+ case Ity_I32:
+ if (resSC == IRTemp_INVALID) {
+ /* "normal" store */
+ gen_dirty_v_WWW( pce,
+ &check_store4, "check_store4",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ } else {
+ /* store-conditional; need to snarf the success bit */
+ IRTemp resSC64
+ = gen_dirty_W_WWW( pce,
+ &check_store4C,
+ "check_store4C",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
+ }
+ break;
+ case Ity_I16:
+ gen_dirty_v_WWW( pce,
+ &check_store2, "check_store2",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ break;
+ case Ity_I8:
+ gen_dirty_v_WWW( pce,
+ &check_store1, "check_store1",
+ addr, addrv,
+ uwiden_to_host_word( pce, data ));
+ break;
+ /* 128-bit vector. Pass store data in 2 64-bit pieces. */
+ case Ity_V128: {
+ IRAtom* dHi64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_V128HIto64, data) );
+ IRAtom* dLo64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_V128to64, data) );
+ gen_dirty_v_WWWW( pce,
+ &check_store16_ms8B_ls8B,
+ "check_store16_ms8B_ls8B",
+ addr, addrv, dHi64, dLo64 );
+ break;
+ }
+ /* 64-bit float. */
+ case Ity_F64: {
+ IRAtom* dI = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_ReinterpF64asI64,
+ data ) );
+ gen_dirty_v_WWW( pce,
+ &check_store8_all8B,
+ "check_store8_all8B",
+ addr, addrv, dI );
+ break;
+ }
+ /* 32-bit float. We can just use _store4, but need
+ to futz with the argument type. */
+ case Ity_F32: {
+ IRAtom* i32 = assignNew( 'I', pce, Ity_I32,
+ unop(Iop_ReinterpF32asI32,
+ data ) );
+ IRAtom* i64 = assignNew( 'I', pce, Ity_I64,
+ unop(Iop_32Uto64,
+ i32 ) );
+ gen_dirty_v_WWW( pce,
+ &check_store4,
+ "check_store4",
+ addr, addrv, i64 );
+ break;
+ }
+ default:
+ ppIRType(d_ty); tl_assert(0);
+ }
+ }
+ /* And don't copy the original, since the helper does the store.
+ Ick. */
+ return True; /* store was successfully instrumented */
+}
+
+
+/* schemeS helper for doing loads, pulled out into a function because
+ it needs to handle both normal loads and load-linked's.
+*/
+static void schemeS_load ( PCEnv* pce, IRExpr* addr, IRType e_ty, IRTemp dstv )
+{
+ HChar* h_nm = NULL;
+ void* h_fn = NULL;
+ IRExpr* addrv = NULL;
+ if (e_ty == pce->gWordTy) {
+ tl_assert(dstv != IRTemp_INVALID);
+ } else {
+ tl_assert(dstv == IRTemp_INVALID);
+ }
+ if (pce->gWordTy == Ity_I32) {
+ /* 32 bit host/guest (cough, cough) */
+ switch (e_ty) {
+ /* Ity_I32: helper returns shadow value. */
+ case Ity_I32: h_fn = &check_load4_P;
+ h_nm = "check_load4_P"; break;
+ /* all others: helper does not return a shadow
+ value. */
+ case Ity_V128: h_fn = &check_load16;
+ h_nm = "check_load16"; break;
+ case Ity_I64:
+ case Ity_F64: h_fn = &check_load8;
+ h_nm = "check_load8"; break;
+ case Ity_F32: h_fn = &check_load4;
+ h_nm = "check_load4"; break;
+ case Ity_I16: h_fn = &check_load2;
+ h_nm = "check_load2"; break;
+ case Ity_I8: h_fn = &check_load1;
+ h_nm = "check_load1"; break;
+ default: ppIRType(e_ty); tl_assert(0);
+ }
+ addrv = schemeEw_Atom( pce, addr );
+ if (e_ty == Ity_I32) {
+ assign( 'I', pce, dstv,
+ mkexpr( gen_dirty_W_WW( pce, h_fn, h_nm,
+ addr, addrv )) );
+ } else {
+ gen_dirty_v_WW( pce, NULL, h_fn, h_nm, addr, addrv );
+ }
+ } else {
+ /* 64 bit host/guest (cough, cough) */
+ switch (e_ty) {
+ /* Ity_I64: helper returns shadow value. */
+ case Ity_I64: h_fn = &check_load8_P;
+ h_nm = "check_load8_P"; break;
+ /* all others: helper does not return a shadow
+ value. */
+ case Ity_V128: h_fn = &check_load16;
+ h_nm = "check_load16"; break;
+ case Ity_F64: h_fn = &check_load8;
+ h_nm = "check_load8"; break;
+ case Ity_F32:
+ case Ity_I32: h_fn = &check_load4;
+ h_nm = "check_load4"; break;
+ case Ity_I16: h_fn = &check_load2;
+ h_nm = "check_load2"; break;
+ case Ity_I8: h_fn = &check_load1;
+ h_nm = "check_load1"; break;
+ default: ppIRType(e_ty); tl_assert(0);
+ }
+ addrv = schemeEw_Atom( pce, addr );
+ if (e_ty == Ity_I64) {
+ assign( 'I', pce, dstv,
+ mkexpr( gen_dirty_W_WW( pce, h_fn, h_nm,
+ addr, addrv )) );
+ } else {
+ gen_dirty_v_WW( pce, NULL, h_fn, h_nm, addr, addrv );
+ }
+ }
+}
+
+
/* Generate into 'pce', instrumentation for 'st'. Also copy 'st'
itself into 'pce' (the caller does not do so). This is somewhat
complex and relies heavily on the assumption that the incoming IR
@@ -4571,6 +4888,29 @@
break;
}
+ case Ist_LLSC: {
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ IRTemp dst = st->Ist.LLSC.result;
+ IRType dataTy = typeOfIRTemp(pce->sb->tyenv, dst);
+ Bool isWord = dataTy == pce->gWordTy;
+ IRTemp dstv = isWord ? newShadowTmp( pce, dst )
+ : IRTemp_INVALID;
+ schemeS_load( pce, st->Ist.LLSC.addr, dataTy, dstv );
+ /* copy the original -- must happen after the helper call */
+ stmt( 'C', pce, st );
+ } else {
+ /* SC */
+ schemeS_store( pce,
+ st->Ist.LLSC.storedata,
+ st->Ist.LLSC.addr,
+ st->Ist.LLSC.result );
+ /* Don't copy the original, since the helper does the
+ store itself. */
+ }
+ break;
+ }
+
case Ist_Dirty: {
Int i;
IRDirty* di;
@@ -4702,244 +5042,15 @@
} /* case Ist_Put */
case Ist_Store: {
- /* We have: STle(addr) = data
- if data is int-word sized, do
- check_store4(addr, addr#, data, data#)
- for all other stores
- check_store{1,2}(addr, addr#, data)
-
- The helper actually *does* the store, so that it can do
- the post-hoc ugly hack of inspecting and "improving" the
- shadow data after the store, in the case where it isn't an
- aligned word store.
-
- Only word-sized values are shadowed. If this is a
- store-conditional, .resSC will denote a non-word-typed
- temp, and so we don't need to shadow it. Assert about the
- type, tho. However, since we're not re-emitting the
- original IRStmt_Store, but rather doing it as part of the
- helper function, we need to actually do a SC in the
- helper, and assign the result bit to .resSC. Ugly.
- */
- IRExpr* data = st->Ist.Store.data;
- IRExpr* addr = st->Ist.Store.addr;
- IRType d_ty = typeOfIRExpr(pce->sb->tyenv, data);
- IRExpr* addrv = schemeEw_Atom( pce, addr );
- IRTemp resSC = st->Ist.Store.resSC;
- if (resSC != IRTemp_INVALID) {
- tl_assert(typeOfIRTemp(pce->sb->tyenv, resSC) == Ity_I1);
- /* viz, not something we want to shadow */
- /* also, throw out all store-conditional cases that
- we can't handle */
- if (pce->gWordTy == Ity_I32 && d_ty != Ity_I32)
- goto unhandled;
- if (pce->gWordTy == Ity_I64 && d_ty != Ity_I32 && d_ty != Ity_I64)
- goto unhandled;
- }
- if (pce->gWordTy == Ity_I32) {
- /* ------ 32 bit host/guest (cough, cough) ------ */
- switch (d_ty) {
- /* Integer word case */
- case Ity_I32: {
- IRExpr* datav = schemeEw_Atom( pce, data );
- if (resSC == IRTemp_INVALID) {
- /* "normal" store */
- gen_dirty_v_WWWW( pce,
- &check_store4_P, "check_store4_P",
- addr, addrv, data, datav );
- } else {
- /* store-conditional; need to snarf the success bit */
- IRTemp resSC32
- = gen_dirty_W_WWWW( pce,
- &check_store4C_P,
- "check_store4C_P",
- addr, addrv, data, datav );
- /* presumably resSC32 will really be Ity_I32. In
- any case we'll get jumped by the IR sanity
- checker if it's not, when it sees the
- following statement. */
- assign( 'I', pce, resSC, unop(Iop_32to1, mkexpr(resSC32)) );
- }
- break;
- }
- /* Integer subword cases */
- case Ity_I16:
- gen_dirty_v_WWW( pce,
- &check_store2, "check_store2",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- break;
- case Ity_I8:
- gen_dirty_v_WWW( pce,
- &check_store1, "check_store1",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- break;
- /* 64-bit float. Pass store data in 2 32-bit pieces. */
- case Ity_F64: {
- IRAtom* d64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_ReinterpF64asI64, data) );
- IRAtom* dLo32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64to32, d64) );
- IRAtom* dHi32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64HIto32, d64) );
- gen_dirty_v_WWWW( pce,
- &check_store8_ms4B_ls4B,
- "check_store8_ms4B_ls4B",
- addr, addrv, dHi32, dLo32 );
- break;
- }
- /* 32-bit float. We can just use _store4, but need
- to futz with the argument type. */
- case Ity_F32: {
- IRAtom* i32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_ReinterpF32asI32,
- data ) );
- gen_dirty_v_WWW( pce,
- &check_store4,
- "check_store4",
- addr, addrv, i32 );
- break;
- }
- /* 64-bit int. Pass store data in 2 32-bit pieces. */
- case Ity_I64: {
- IRAtom* dLo32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64to32, data) );
- IRAtom* dHi32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64HIto32, data) );
- gen_dirty_v_WWWW( pce,
- &check_store8_ms4B_ls4B,
- "check_store8_ms4B_ls4B",
- addr, addrv, dHi32, dLo32 );
- break;
- }
-
- /* 128-bit vector. Pass store data in 4 32-bit pieces.
- This is all very ugly and inefficient, but it is
- hard to better without considerably complicating the
- store-handling schemes. */
- case Ity_V128: {
- IRAtom* dHi64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_V128HIto64, data) );
- IRAtom* dLo64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_V128to64, data) );
- IRAtom* w3 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64HIto32, dHi64) );
- IRAtom* w2 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64to32, dHi64) );
- IRAtom* w1 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64HIto32, dLo64) );
- IRAtom* w0 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_64to32, dLo64) );
- gen_dirty_v_6W( pce,
- &check_store16_ms4B_4B_4B_ls4B,
- "check_store16_ms4B_4B_4B_ls4B",
- addr, addrv, w3, w2, w1, w0 );
- break;
- }
-
-
- default:
- ppIRType(d_ty); tl_assert(0);
- }
- } else {
- /* ------ 64 bit host/guest (cough, cough) ------ */
- switch (d_ty) {
- /* Integer word case */
- case Ity_I64: {
- IRExpr* datav = schemeEw_Atom( pce, data );
- if (resSC == IRTemp_INVALID) {
- /* "normal" store */
- gen_dirty_v_WWWW( pce,
- &check_store8_P, "check_store8_P",
- addr, addrv, data, datav );
- } else {
- IRTemp resSC64
- = gen_dirty_W_WWWW( pce,
- &check_store8C_P,
- "check_store8C_P",
- addr, addrv, data, datav );
- assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
- }
- break;
- }
- /* Integer subword cases */
- case Ity_I32:
- if (resSC == IRTemp_INVALID) {
- /* "normal" store */
- gen_dirty_v_WWW( pce,
- &check_store4, "check_store4",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- } else {
- /* store-conditional; need to snarf the success bit */
- IRTemp resSC64
- = gen_dirty_W_WWW( pce,
- &check_store4C,
- "check_store4C",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
- }
- break;
- case Ity_I16:
- gen_dirty_v_WWW( pce,
- &check_store2, "check_store2",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- break;
- case Ity_I8:
- gen_dirty_v_WWW( pce,
- &check_store1, "check_store1",
- addr, addrv,
- uwiden_to_host_word( pce, data ));
- break;
- /* 128-bit vector. Pass store data in 2 64-bit pieces. */
- case Ity_V128: {
- IRAtom* dHi64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_V128HIto64, data) );
- IRAtom* dLo64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_V128to64, data) );
- gen_dirty_v_WWWW( pce,
- &check_store16_ms8B_ls8B,
- "check_store16_ms8B_ls8B",
- addr, addrv, dHi64, dLo64 );
- break;
- }
- /* 64-bit float. */
- case Ity_F64: {
- IRAtom* dI = assignNew( 'I', pce, Ity_I64,
- unop(Iop_ReinterpF64asI64,
- data ) );
- gen_dirty_v_WWW( pce,
- &check_store8_all8B,
- "check_store8_all8B",
- addr, addrv, dI );
- break;
- }
- /* 32-bit float. We can just use _store4, but need
- to futz with the argument type. */
- case Ity_F32: {
- IRAtom* i32 = assignNew( 'I', pce, Ity_I32,
- unop(Iop_ReinterpF32asI32,
- data ) );
- IRAtom* i64 = assignNew( 'I', pce, Ity_I64,
- unop(Iop_32Uto64,
- i32 ) );
- gen_dirty_v_WWW( pce,
- &check_store4,
- "check_store4",
- addr, addrv, i64 );
- break;
- }
- default:
- ppIRType(d_ty); tl_assert(0);
- }
- }
- /* And don't copy the original, since the helper does the
- store. Ick. */
+ Bool ok = schemeS_store( pce,
+ st->Ist.Store.data,
+ st->Ist.Store.addr,
+ IRTemp_INVALID/*not a SC*/ );
+ if (!ok) goto unhandled;
+ /* Don't copy the original, since the helper does the store
+ itself. */
break;
- } /* case Ist_Store */
+ }
case Ist_WrTmp: {
/* This is the only place we have to deal with the full
@@ -4992,69 +5103,7 @@
}
case Iex_Load: {
- IRExpr* addr = e->Iex.Load.addr;
- HChar* h_nm = NULL;
- void* h_fn = NULL;
- IRExpr* addrv = NULL;
- if (pce->gWordTy == Ity_I32) {
- /* 32 bit host/guest (cough, cough) */
- switch (e_ty) {
- /* Ity_I32: helper returns shadow value. */
- case Ity_I32: h_fn = &check_load4_P;
- h_nm = "check_load4_P"; break;
- /* all others: helper does not return a shadow
- value. */
- case Ity_V128: h_fn = &check_load16;
- h_nm = "check_load16"; break;
- case Ity_I64:
- case Ity_F64: h_fn = &check_load8;
- h_nm = "check_load8"; break;
- case Ity_F32: h_fn = &check_load4;
- h_nm = "check_load4"; break;
- case Ity_I16: h_fn = &check_load2;
- h_nm = "check_load2"; break;
- case Ity_I8: h_fn = &check_load1;
- h_nm = "check_load1"; break;
- default: ppIRType(e_ty); tl_assert(0);
- }
- addrv = schemeEw_Atom( pce, addr );
- if (e_ty == Ity_I32) {
- assign( 'I', pce, dstv,
- mkexpr( gen_dirty_W_WW( pce, h_fn, h_nm,
- addr, addrv )) );
- } else {
- gen_dirty_v_WW( pce, NULL, h_fn, h_nm, addr, addrv );
- }
- } else {
- /* 64 bit host/guest (cough, cough) */
- switch (e_ty) {
- /* Ity_I64: helper returns shadow value. */
- case Ity_I64: h_fn = &check_load8_P;
- h_nm = "check_load8_P"; break;
- /* all others: helper does not return a shadow
- value. */
- case Ity_V128: h_fn = &check_load16;
- h_nm = "check_load16"; break;
- case Ity_F64: h_fn = &check_load8;
- h_nm = "check_load8"; break;
- case Ity_F32:
- case Ity_I32: h_fn = &check_load4;
- h_nm = "check_load4"; break;
- case Ity_I16: h_fn = &check_load2;
- h_nm = "check_load2"; break;
- case Ity_I8: h_fn = &check_load1;
- h_nm = "check_load1"; break;
- default: ppIRType(e_ty); tl_assert(0);
- }
- addrv = schemeEw_Atom( pce, addr );
- if (e_ty == Ity_I64) {
- assign( 'I', pce, dstv,
- mkexpr( gen_dirty_W_WW( pce, h_fn, h_nm,
- addr, addrv )) );
- } else {
- gen_dirty_v_WW( pce, NULL, h_fn, h_nm, addr, addrv );
- }
- }
+ schemeS_load( pce, e->Iex.Load.addr, e_ty, dstv );
/* copy the original -- must happen after the helper call */
stmt( 'C', pce, st );
break;
Modified: trunk/helgrind/hg_main.c
===================================================================
--- trunk/helgrind/hg_main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/helgrind/hg_main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -4008,22 +4008,44 @@
break;
}
- case Ist_Store:
- /* It seems we pretend that store-conditionals don't
- exist, viz, just ignore them ... */
- if (st->Ist.Store.resSC == IRTemp_INVALID) {
+ case Ist_LLSC: {
+ /* We pretend store-conditionals don't exist, viz, ignore
+ them. Whereas load-linked's are treated the same as
+ normal loads. */
+ IRType dataTy;
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
if (!inLDSO) {
- instrument_mem_access(
- bbOut,
- st->Ist.Store.addr,
- sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
- True/*isStore*/,
+ instrument_mem_access(
+ bbOut,
+ st->Ist.LLSC.addr,
+ sizeofIRType(dataTy),
+ False/*!isStore*/,
sizeofIRType(hWordTy)
);
}
+ } else {
+ /* SC */
+ /*ignore */
}
break;
+ }
+ case Ist_Store:
+ /* It seems we pretend that store-conditionals don't
+ exist, viz, just ignore them ... */
+ if (!inLDSO) {
+ instrument_mem_access(
+ bbOut,
+ st->Ist.Store.addr,
+ sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
+ True/*isStore*/,
+ sizeofIRType(hWordTy)
+ );
+ }
+ break;
+
case Ist_WrTmp: {
/* ... whereas here we don't care whether a load is a
vanilla one or a load-linked. */
Modified: trunk/lackey/lk_main.c
===================================================================
--- trunk/lackey/lk_main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/lackey/lk_main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -790,21 +790,54 @@
was introduced, since prior to that point, the Vex
front ends would translate a lock-prefixed instruction
into a (normal) read followed by a (normal) write. */
+ Int dataSize;
+ IRType dataTy;
+ IRCAS* cas = st->Ist.CAS.details;
+ tl_assert(cas->addr != NULL);
+ tl_assert(cas->dataLo != NULL);
+ dataTy = typeOfIRExpr(tyenv, cas->dataLo);
+ dataSize = sizeofIRType(dataTy);
+ if (cas->dataHi != NULL)
+ dataSize *= 2; /* since it's a doubleword-CAS */
if (clo_trace_mem) {
- Int dataSize;
- IRCAS* cas = st->Ist.CAS.details;
- tl_assert(cas->addr != NULL);
- tl_assert(cas->dataLo != NULL);
- dataSize = sizeofIRType(typeOfIRExpr(tyenv, cas->dataLo));
- if (cas->dataHi != NULL)
- dataSize *= 2; /* since it's a doubleword-CAS */
addEvent_Dr( sbOut, cas->addr, dataSize );
addEvent_Dw( sbOut, cas->addr, dataSize );
}
+ if (clo_detailed_counts) {
+ instrument_detail( sbOut, OpLoad, dataTy );
+ if (cas->dataHi != NULL) /* dcas */
+ instrument_detail( sbOut, OpLoad, dataTy );
+ instrument_detail( sbOut, OpStore, dataTy );
+ if (cas->dataHi != NULL) /* dcas */
+ instrument_detail( sbOut, OpStore, dataTy );
+ }
addStmtToIRSB( sbOut, st );
break;
}
+ case Ist_LLSC: {
+ IRType dataTy;
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result);
+ if (clo_trace_mem)
+ addEvent_Dr( sbOut, st->Ist.LLSC.addr,
+ sizeofIRType(dataTy) );
+ if (clo_detailed_counts)
+ instrument_detail( sbOut, OpLoad, dataTy );
+ } else {
+ /* SC */
+ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata);
+ if (clo_trace_mem)
+ addEvent_Dw( sbOut, st->Ist.LLSC.addr,
+ sizeofIRType(dataTy) );
+ if (clo_detailed_counts)
+ instrument_detail( sbOut, OpStore, dataTy );
+ }
+ addStmtToIRSB( sbOut, st );
+ break;
+ }
+
case Ist_Exit:
if (clo_basic_counts) {
// The condition of a branch was inverted by VEX if a taken
@@ -821,7 +854,8 @@
mkIRExprVec_0() );
else
di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc",
- VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ),
+ VG_(fnptr_to_fnentry)(
+ &add_one_inverted_Jcc ),
mkIRExprVec_0() );
addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
Modified: trunk/massif/ms_main.c
===================================================================
--- trunk/massif/ms_main.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/massif/ms_main.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -1903,14 +1903,12 @@
IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
- IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(False/*!isLL*/,
- END, Ity_I64, counter_addr));
+ IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
IRStmt* st2 =
IRStmt_WrTmp(t2,
IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
IRExpr_Const(IRConst_U64(n))));
- IRStmt* st3 = IRStmt_Store(END, IRTemp_INVALID/*"not store-conditional"*/,
- counter_addr, IRExpr_RdTmp(t2));
+ IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
addStmtToIRSB( sbOut, st1 );
addStmtToIRSB( sbOut, st2 );
Modified: trunk/memcheck/mc_translate.c
===================================================================
--- trunk/memcheck/mc_translate.c 2009-11-25 13:57:47 UTC (rev 10956)
+++ trunk/memcheck/mc_translate.c 2009-11-26 17:20:21 UTC (rev 10957)
@@ -3862,6 +3862,68 @@
}
+/* ------ Dealing with LL/SC (not difficult) ------ */
+
+static void do_shadow_LLSC ( MCEnv* mce,
+ IREndness stEnd,
+ IRTemp stResult,
+ IRExpr* stAddr,
+ IRExpr* stStoredata )
+{
+ /* In short: treat a load-linked like a normal load followed by an
+ assignment of the loaded (shadow) data to the result temporary.
+ Treat a store-conditional like a normal store, and mark the
+ result temporary as defined. */
+ IRType resTy = typeOfIRTemp(mce->sb->tyenv, stResult);
+ IRTemp resTmp = findShadowTmpV(mce, stResult);
+
+ tl_assert(isIRAtom(stAddr));
+ if (stStoredata)
+ tl_assert(isIRAtom(stStoredata));
+
+ if (stStoredata == NULL) {
+ /* Load Linked */
+ /* Just treat this as a normal load, followed by an assignment of
+ the value to .result. */
+ /* Stay sane */
+ tl_assert(resTy == Ity_I64 || resTy == Ity_I32
+ || resTy == Ity_I16 || resTy == Ity_I8);
+ assign( 'V', mce, resTmp,
+ expr2vbits_Load(
+ mce, stEnd, resTy, stAddr, 0/*addr bias*/));
+ } else {
+ /* Store Conditional */
+ /* Stay sane */
+ IRType dataTy = typeOfIRExpr(mce->sb->tyenv,
+ stStoredata);
+ tl_assert(dataTy == Ity_I64 || dataTy == Ity_I32
+ || dataTy == Ity_I16 || dataTy == Ity_I8);
+ do_shadow_Store( mce, stEnd,
+ stAddr, 0/* addr bias */,
+ stStoredata,
+ NULL /* shadow data */,
+ NULL/*guard*/ );
+ /* This is a store conditional, so it writes to .result a value
+ indicating whether or not the store succeeded. Just claim
+ this value is always defined. In the PowerPC interpretation
+ of store-conditional, definedness of the success indication
+ depends on whether the address of the store matches the
+ reservation address. But we can't tell that here (and
+ anyway, we're not being PowerPC-specific). At least we are
+ guaranteed that the definedness of the store address, and its
+ addressibility, will be checked as per normal. So it seems
+ pretty safe to just say that the success indication is always
+ defined.
+
+ In schemeS, for origin tracking, we must correspondingly set
+ a no-origin value for the origin shadow of .result.
+ */
+ tl_assert(resTy == Ity_I1);
+ assign( 'V', mce, resTmp, definedOfType(resTy) );
+ }
+}
+
+
/*------------------------------------------------------------*/
/*--- Memcheck main ---*/
/*------------------------------------------------------------*/
@@ -3979,6 +4041,11 @@
|| isBogusAtom(cas->expdLo)
|| (cas->dataHi ? isBogusAtom(cas->dataHi) : False)
|| isBogusAtom(cas->dataLo);
+ case Ist_LLSC:
+ return isBogusAtom(st->Ist.LLSC.addr)
+ || (st->Ist.LLSC.storedata
+ ? isBogusAtom(st->Ist.LLSC.storedata)
+ : False);
default:
unhandled:
ppIRStmt(st);
@@ -4182,32 +4249,6 @@
st->Ist.Store.data,
NULL /* shadow data */,
NULL/*guard*/ );
- /* If this is a store conditional, it writes to .resSC a
- value indicating whether or not the store succeeded.
- Just claim this value is always defined. In the
- PowerPC interpretation of store-conditional,
- definedness of the success indication depends on
- whether the address of the store matches the
- reservation address. But we can't tell that here (and
- anyway, we're not being PowerPC-specific). At least we
- are guarantted that the definedness of the store
- address, and its addressibility, will be checked as per
- normal. So it seems pretty safe to just say that the
- success indication is always defined.
-
- In schemeS, for origin tracking, we must
- correspondingly set a no-origin value for the origin
- shadow of resSC.
- */
- if (st->Ist.Store.resSC != IRTemp_INVALID) {
- assign( 'V', &mce,
- findShadowTmpV(&mce, st->Ist.Store.resSC),
- definedOfType(
- shadowTypeV(
- typeOfIRTemp(mce.sb->tyenv,
- st->Ist.Store.resSC)
- )));
- }
break;
case Ist_Exit:
@@ -4241,6 +4282,14 @@
does it all. */
break;
+ case Ist_LLSC:
+ do_shadow_LLSC( &mce,
+ st->Ist.LLSC.end,
+ st->Ist.LLSC.result,
+ st->Ist.LLSC.addr,
+ st->Ist.LLSC.storedata );
+ break;
+
default:
VG_(printf)("\n");
ppIRStmt(st);
@@ -4597,6 +4646,7 @@
tl_assert(0);
}
+
static IRAtom* schemeE ( MCEnv* mce, IRExpr* e )
{
tl_assert(MC_(clo_mc_level) == 3);
@@ -4732,6 +4782,7 @@
}
}
+
static void do_origins_Dirty ( MCEnv* mce, IRDirty* d )
{
// This is a hacked version of do_shadow_Dirty
@@ -4888,6 +4939,26 @@
}
}
+
+static void do_origins_Store ( MCEnv* mce,
+ IREndness stEnd,
+ IRExpr* stAddr,
+ IRExpr* stData )
+{
+ Int dszB;
+ IRAtom* dataB;
+ /* assert that the B value for the address is already available
+ (somewhere), since the call to schemeE will want to see it.
+ XXXX how does this actually ensure that?? */
+ tl_assert(isIRAtom(stAddr));
+ tl_assert(isIRAtom(stData));
+ dszB = sizeofIRType( typeOfIRExpr(mce->sb->tyenv, stData ) );
+ dataB = schemeE( mce, stData );
+ gen_store_b( mce, dszB, stAddr, 0/*offset*/, dataB,
+ NULL/*guard*/ );
+}
+
+
static void schemeS ( MCEnv* mce, IRStmt* st )
{
tl_assert(MC_(clo_mc_level) == 3);
@@ -4928,30 +4999,47 @@
st->Ist.PutI.bias, t4 ));
break;
}
+
case Ist_Dirty:
do_origins_Dirty( mce, st->Ist.Dirty.details );
break;
- case Ist_Store: {
- Int dszB;
- IRAtom* dataB;
- /* assert that the B value for the address is already
- available (somewhere) */
- tl_assert(isIRAtom(st->Ist.Store.addr));
- dszB = sizeofIRType(
- typeOfIRExpr(mce->sb->tyenv, st->Ist.Store.data ));
- dataB = schemeE( mce, st->Ist.Store.data );
- gen_store_b( mce, dszB, st->Ist.Store.addr, 0/*offset*/, dataB,
- NULL/*guard*/ );
- /* For the rationale behind this, see comments at the place
- where the V-shadow for .resSC is constructed, in the main
- loop in MC_(instrument). In short, wee regard .resSc as
- always-defined. */
- if (st->Ist.Store.resSC != IRTemp_INVALID) {
- assign( 'B', mce, findShadowTmpB(mce, st->Ist.Store.resSC),
- mkU32(0) );
+
+ case Ist_Store:
+ do_origins_Store( mce, st->Ist.Store.end,
+ st->Ist.Store.addr,
+ st->Ist.Store.data );
+ break;
+
+ case Ist_LLSC: {
+ /* In short: treat a load-linked like a normal load followed
+ by an assignment of the loaded (shadow) data the result
+ temporary. Treat a store-conditional like a normal store,
+ and mark the result temporary as defined. */
+ if (st->Ist.LLSC.storedata == NULL) {
+ /* Load Linked */
+ IRType resTy
+ = typeOfIRTemp(mce->sb->tyenv, st->Ist.LLSC.result);
+ IRExpr* vanillaLoad
+ = IRExpr_Load(st->Ist.LLSC.end, resTy, st->Ist.LLSC.addr);
+ tl_assert(resTy == Ity_I64 || resTy == Ity_I32
+ || resTy == Ity_I16 || resTy == Ity_I8);
+ assign( 'B', mce, findShadowTmpB(mce, st->Ist.LLSC.result),
+ schemeE(mce, vanillaLoad));
+ } else {
+ /* Store conditional */
+ do_origins_Store( mce, st->Ist.LLSC.end,
+ st->Ist.LLSC.addr,
+ st->Ist.LLSC.storedata );
+ /* For the rationale behind this, see comments at the
+ place where the V-shadow for .result is constructed, in
+ do_shadow_LLSC. In short, we regard .result as
+ always-defined. */
+ assign( 'B', mce, findShadowTmpB(mce, st->Ist.LLSC.result),
+ mkU32(0) );
}
break;
}
+
case Ist_Put: {
Int b_offset
= MC_(get_otrack_shadow_offset)(
@@ -4965,15 +5053,18 @@
}
break;
}
+
case Ist_WrTmp:
assign( 'B', mce, findShadowTmpB(mce, st->Ist.WrTmp.tmp),
schemeE(mce, st->Ist.WrTmp.data) );
break;
+
case Ist_MBE:
case Ist_NoOp:
case Ist_Exit:
case Ist_IMark:
break;
+
default:
VG_(printf)("mc_translate.c: schemeS: unhandled: ");
ppIRStmt(st);
|
|
From: <sv...@va...> - 2009-11-26 17:17:52
|
Author: sewardj
Date: 2009-11-26 17:17:37 +0000 (Thu, 26 Nov 2009)
New Revision: 1930
Log:
Change the IR representation of load linked and store conditional.
They are now moved out into their own new IRStmt kind (IRStmt_LLSC),
and are not treated merely as variants of standard loads (IRExpr_Load)
or store (IRStmt_Store). This is necessary because load linked is a
load with a side effect (lodging a reservation), hence it cannot be an
IRExpr since IRExprs denote side-effect free value computations.
Fix up all front and back ends accordingly; also iropt.
Fixes #215771.
Modified:
trunk/priv/guest_amd64_toIR.c
trunk/priv/guest_arm_toIR.c
trunk/priv/guest_ppc_toIR.c
trunk/priv/guest_x86_toIR.c
trunk/priv/host_amd64_isel.c
trunk/priv/host_arm_isel.c
trunk/priv/host_ppc_isel.c
trunk/priv/host_x86_isel.c
trunk/priv/ir_defs.c
trunk/priv/ir_match.c
trunk/priv/ir_opt.c
trunk/pub/libvex_ir.h
Modified: trunk/priv/guest_amd64_toIR.c
===================================================================
--- trunk/priv/guest_amd64_toIR.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/guest_amd64_toIR.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -312,12 +312,12 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
Modified: trunk/priv/guest_arm_toIR.c
===================================================================
--- trunk/priv/guest_arm_toIR.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/guest_arm_toIR.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -495,7 +495,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -543,9 +543,9 @@
}
#endif
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
#if 0
Modified: trunk/priv/guest_ppc_toIR.c
===================================================================
--- trunk/priv/guest_ppc_toIR.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/guest_ppc_toIR.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -467,7 +467,7 @@
{
IRType tyA = typeOfIRExpr(irsb->tyenv, addr);
vassert(tyA == Ity_I32 || tyA == Ity_I64);
- stmt( IRStmt_Store(Iend_BE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_BE, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -517,22 +517,11 @@
}
/* This generates a normal (non load-linked) load. */
-static IRExpr* loadBE ( IRType ty, IRExpr* data )
+static IRExpr* loadBE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_BE, ty, data);
+ return IRExpr_Load(Iend_BE, ty, addr);
}
-/* And this, a linked load. */
-static IRExpr* loadlinkedBE ( IRType ty, IRExpr* data )
-{
- if (mode64) {
- vassert(ty == Ity_I32 || ty == Ity_I64);
- } else {
- vassert(ty == Ity_I32);
- }
- return IRExpr_Load(True, Iend_BE, ty, data);
-}
-
static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 )
{
vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
@@ -4861,7 +4850,8 @@
stmt( IRStmt_MBE(Imbe_Fence) );
break;
- case 0x014: // lwarx (Load Word and Reserve Indexed, PPC32 p458)
+ case 0x014: { // lwarx (Load Word and Reserve Indexed, PPC32 p458)
+ IRTemp res;
/* According to the PowerPC ISA version 2.05, b0 (called EH
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
@@ -4872,10 +4862,13 @@
gen_SIGBUS_if_misaligned( EA, 4 );
// and actually do the load
- putIReg( rD_addr, mkWidenFrom32(ty, loadlinkedBE(Ity_I32, mkexpr(EA)),
- False) );
+ res = newTemp(Ity_I32);
+ stmt( IRStmt_LLSC(Iend_BE, res, mkexpr(EA), NULL/*this is a load*/) );
+
+ putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(res), False) );
break;
-
+ }
+
case 0x096: {
// stwcx. (Store Word Conditional Indexed, PPC32 p532)
// Note this has to handle stwcx. in both 32- and 64-bit modes,
@@ -4896,7 +4889,7 @@
// Do the store, and get success/failure bit into resSC
resSC = newTemp(Ity_I1);
- stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
+ stmt( IRStmt_LLSC(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
// Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
// Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
@@ -4948,7 +4941,8 @@
break;
/* 64bit Memsync */
- case 0x054: // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
+ case 0x054: { // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
+ IRTemp res;
/* According to the PowerPC ISA version 2.05, b0 (called EH
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
@@ -4961,9 +4955,13 @@
gen_SIGBUS_if_misaligned( EA, 8 );
// and actually do the load
- putIReg( rD_addr, loadlinkedBE(Ity_I64, mkexpr(EA)) );
+ res = newTemp(Ity_I64);
+ stmt( IRStmt_LLSC(Iend_BE, res, mkexpr(EA), NULL/*this is a load*/) );
+
+ putIReg( rD_addr, mkexpr(res) );
break;
-
+ }
+
case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
// A marginally simplified version of the stwcx. case
IRTemp rS = newTemp(Ity_I64);
@@ -4984,7 +4982,7 @@
// Do the store, and get success/failure bit into resSC
resSC = newTemp(Ity_I1);
- stmt( IRStmt_Store(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
+ stmt( IRStmt_LLSC(Iend_BE, resSC, mkexpr(EA), mkexpr(rS)) );
// Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
// Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
Modified: trunk/priv/guest_x86_toIR.c
===================================================================
--- trunk/priv/guest_x86_toIR.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/guest_x86_toIR.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -648,7 +648,7 @@
static void storeLE ( IRExpr* addr, IRExpr* data )
{
- stmt( IRStmt_Store(Iend_LE, IRTemp_INVALID, addr, data) );
+ stmt( IRStmt_Store(Iend_LE, addr, data) );
}
static IRExpr* unop ( IROp op, IRExpr* a )
@@ -708,9 +708,9 @@
return IRExpr_Const(IRConst_V128(mask));
}
-static IRExpr* loadLE ( IRType ty, IRExpr* data )
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
{
- return IRExpr_Load(False, Iend_LE, ty, data);
+ return IRExpr_Load(Iend_LE, ty, addr);
}
static IROp mkSizedOp ( IRType ty, IROp op8 )
Modified: trunk/priv/host_amd64_isel.c
===================================================================
--- trunk/priv/host_amd64_isel.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/host_amd64_isel.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -860,8 +860,6 @@
/* We can't handle big-endian loads, nor load-linked. */
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
- if (e->Iex.Load.isLL)
- goto irreducible;
if (ty == Ity_I64) {
addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,
@@ -1963,7 +1961,7 @@
/* special case: 64-bit load from memory */
if (e->tag == Iex_Load && ty == Ity_I64
- && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
return AMD64RMI_Mem(am);
}
@@ -2749,7 +2747,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2873,7 +2871,7 @@
return res;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
AMD64AMode* am;
HReg res = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3178,7 +3176,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg dst = newVRegV(env);
AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, am ));
@@ -3603,9 +3601,8 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I64 || end != Iend_LE || resSC != IRTemp_INVALID)
+ if (tya != Ity_I64 || end != Iend_LE)
goto stmt_fail;
if (tyd == Ity_I64) {
Modified: trunk/priv/host_arm_isel.c
===================================================================
--- trunk/priv/host_arm_isel.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/host_arm_isel.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -757,9 +757,8 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I32 || end != Iend_LE || resSC != IRTemp_INVALID)
+ if (tya != Ity_I32 || end != Iend_LE)
goto stmt_fail;
reg = iselIntExpr_R(env, stmt->Ist.Store.data);
Modified: trunk/priv/host_ppc_isel.c
===================================================================
--- trunk/priv/host_ppc_isel.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/host_ppc_isel.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -1169,32 +1169,14 @@
/* --------- LOAD --------- */
case Iex_Load: {
- HReg r_dst;
-
+ HReg r_dst;
+ PPCAMode* am_addr;
if (e->Iex.Load.end != Iend_BE)
goto irreducible;
-
- r_dst = newVRegI(env);
-
- if (e->Iex.Load.isLL) {
- /* lwarx or ldarx. Be simple; force address into a register. */
- HReg r_addr = iselWordExpr_R( env, e->Iex.Load.addr );
- if (ty == Ity_I32) {
- addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
- }
- else if (ty == Ity_I64 && mode64) {
- addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
- }
- else
- goto irreducible;
- } else {
- /* Normal load; use whatever amodes we can. */
- PPCAMode* am_addr
- = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/ );
- addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
- r_dst, am_addr, mode64 ));
- }
-
+ r_dst = newVRegI(env);
+ am_addr = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/ );
+ addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
+ r_dst, am_addr, mode64 ));
return r_dst;
/*NOTREACHED*/
}
@@ -1551,7 +1533,7 @@
DECLARE_PATTERN(p_LDbe16_then_16Uto32);
DEFINE_PATTERN(p_LDbe16_then_16Uto32,
unop(Iop_16Uto32,
- IRExpr_Load(False,Iend_BE,Ity_I16,bind(0))) );
+ IRExpr_Load(Iend_BE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
HReg r_dst = newVRegI(env);
PPCAMode* amode
@@ -2609,7 +2591,7 @@
vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
/* 64-bit load */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
@@ -2967,7 +2949,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg r_dst = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -3115,7 +3097,7 @@
}
/* --------- LOAD --------- */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg r_dst = newVRegF(env);
PPCAMode* am_addr;
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3366,7 +3348,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg v_dst = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_V128);
@@ -3770,7 +3752,6 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
if (end != Iend_BE)
goto stmt_fail;
@@ -3779,34 +3760,6 @@
if (mode64 && (tya != Ity_I64))
goto stmt_fail;
- if (resSC != IRTemp_INVALID) {
- /* deal with store-conditional */
- HReg r_res = lookupIRTemp(env, resSC);
- HReg r_a = iselWordExpr_R(env, stmt->Ist.Store.addr);
- HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
- HReg r_tmp = newVRegI(env);
- if (tyd == Ity_I32 || (tyd == Ity_I64 && mode64)) {
- addInstr(env, PPCInstr_StoreC( tyd==Ity_I32 ? 4 : 8,
- r_a, r_src, mode64 ));
- addInstr(env, PPCInstr_MfCR( r_tmp ));
- addInstr(env, PPCInstr_Shft(
- Pshft_SHR,
- env->mode64 ? False : True/*F:64-bit, T:32-bit shift*/,
- r_tmp, r_tmp,
- PPCRH_Imm(False/*unsigned*/, 29)));
- /* Probably unnecessary, since the IR dest type is Ity_I1,
- and so we are entitled to leave whatever junk we like
- drifting round in the upper 31 or 63 bits of r_res.
- However, for the sake of conservativeness .. */
- addInstr(env, PPCInstr_Alu(
- Palu_AND,
- r_res, r_tmp,
- PPCRH_Imm(False/*signed*/, 1)));
- return;
- }
- goto stmt_fail;
- }
-
if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
(mode64 && (tyd == Ity_I64))) {
PPCAMode* am_addr
@@ -3979,6 +3932,67 @@
break;
}
+ /* --------- Load Linked or Store Conditional --------- */
+ case Ist_LLSC: {
+ IRTemp res = stmt->Ist.LLSC.result;
+ IRType tyRes = typeOfIRTemp(env->type_env, res);
+ IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
+
+ if (stmt->Ist.LLSC.end != Iend_BE)
+ goto stmt_fail;
+ if (!mode64 && (tyAddr != Ity_I32))
+ goto stmt_fail;
+ if (mode64 && (tyAddr != Ity_I64))
+ goto stmt_fail;
+
+ if (stmt->Ist.LLSC.storedata == NULL) {
+ /* LL */
+ HReg r_addr = iselWordExpr_R( env, stmt->Ist.LLSC.addr );
+ HReg r_dst = lookupIRTemp(env, res);
+ if (tyRes == Ity_I32) {
+ addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
+ return;
+ }
+ if (tyRes == Ity_I64 && mode64) {
+ addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
+ return;
+ }
+ /* fallthru */;
+ } else {
+ /* SC */
+ HReg r_res = lookupIRTemp(env, res); /* :: Ity_I1 */
+ HReg r_a = iselWordExpr_R(env, stmt->Ist.LLSC.addr);
+ HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata);
+ HReg r_tmp = newVRegI(env);
+ IRType tyData = typeOfIRExpr(env->type_env,
+ stmt->Ist.LLSC.storedata);
+ vassert(tyRes == Ity_I1);
+ if (tyData == Ity_I32 || (tyData == Ity_I64 && mode64)) {
+ addInstr(env, PPCInstr_StoreC( tyData==Ity_I32 ? 4 : 8,
+ r_a, r_src, mode64 ));
+ addInstr(env, PPCInstr_MfCR( r_tmp ));
+ addInstr(env, PPCInstr_Shft(
+ Pshft_SHR,
+ env->mode64 ? False : True
+ /*F:64-bit, T:32-bit shift*/,
+ r_tmp, r_tmp,
+ PPCRH_Imm(False/*unsigned*/, 29)));
+ /* Probably unnecessary, since the IR dest type is Ity_I1,
+ and so we are entitled to leave whatever junk we like
+ drifting round in the upper 31 or 63 bits of r_res.
+ However, for the sake of conservativeness .. */
+ addInstr(env, PPCInstr_Alu(
+ Palu_AND,
+ r_res, r_tmp,
+ PPCRH_Imm(False/*signed*/, 1)));
+ return;
+ }
+ /* fallthru */
+ }
+ goto stmt_fail;
+ /*NOTREACHED*/
+ }
+
/* --------- Call to DIRTY helper --------- */
case Ist_Dirty: {
IRType retty;
Modified: trunk/priv/host_x86_isel.c
===================================================================
--- trunk/priv/host_x86_isel.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/host_x86_isel.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -763,8 +763,6 @@
/* We can't handle big-endian loads, nor load-linked. */
if (e->Iex.Load.end != Iend_LE)
goto irreducible;
- if (e->Iex.Load.isLL)
- goto irreducible;
if (ty == Ity_I32) {
addInstr(env, X86Instr_Alu32R(Xalu_MOV,
@@ -1069,7 +1067,7 @@
DECLARE_PATTERN(p_LDle8_then_8Uto32);
DEFINE_PATTERN(p_LDle8_then_8Uto32,
unop(Iop_8Uto32,
- IRExpr_Load(False,Iend_LE,Ity_I8,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
if (matchIRExpr(&mi,p_LDle8_then_8Uto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1083,7 +1081,7 @@
DECLARE_PATTERN(p_LDle8_then_8Sto32);
DEFINE_PATTERN(p_LDle8_then_8Sto32,
unop(Iop_8Sto32,
- IRExpr_Load(False,Iend_LE,Ity_I8,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
if (matchIRExpr(&mi,p_LDle8_then_8Sto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1097,7 +1095,7 @@
DECLARE_PATTERN(p_LDle16_then_16Uto32);
DEFINE_PATTERN(p_LDle16_then_16Uto32,
unop(Iop_16Uto32,
- IRExpr_Load(False,Iend_LE,Ity_I16,bind(0))) );
+ IRExpr_Load(Iend_LE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDle16_then_16Uto32,e)) {
HReg dst = newVRegI(env);
X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
@@ -1536,7 +1534,7 @@
/* special case: 32-bit load from memory */
if (e->tag == Iex_Load && ty == Ity_I32
- && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ && e->Iex.Load.end == Iend_LE) {
X86AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
return X86RMI_Mem(am);
}
@@ -1955,7 +1953,7 @@
}
/* 64-bit load */
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg tLo, tHi;
X86AMode *am0, *am4;
vassert(e->Iex.Load.ty == Ity_I64);
@@ -2743,7 +2741,7 @@
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
X86AMode* am;
HReg res = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
@@ -2867,7 +2865,7 @@
return freg;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
X86AMode* am;
HReg res = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F64);
@@ -3119,7 +3117,7 @@
return dst;
}
- if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE && !e->Iex.Load.isLL) {
+ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
HReg dst = newVRegV(env);
X86AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
addInstr(env, X86Instr_SseLdSt( True/*load*/, dst, am ));
@@ -3140,7 +3138,7 @@
DECLARE_PATTERN(p_zwiden_load64);
DEFINE_PATTERN(p_zwiden_load64,
unop(Iop_64UtoV128,
- IRExpr_Load(False,Iend_LE,Ity_I64,bind(0))));
+ IRExpr_Load(Iend_LE,Ity_I64,bind(0))));
if (matchIRExpr(&mi, p_zwiden_load64, e)) {
X86AMode* am = iselIntExpr_AMode(env, mi.bindee[0]);
HReg dst = newVRegV(env);
@@ -3608,9 +3606,8 @@
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (tya != Ity_I32 || end != Iend_LE || resSC != IRTemp_INVALID)
+ if (tya != Ity_I32 || end != Iend_LE)
goto stmt_fail;
if (tyd == Ity_I32) {
Modified: trunk/priv/ir_defs.c
===================================================================
--- trunk/priv/ir_defs.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/ir_defs.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -648,8 +648,7 @@
vex_printf( ")" );
break;
case Iex_Load:
- vex_printf( "LD%s%s:", e->Iex.Load.end==Iend_LE ? "le" : "be",
- e->Iex.Load.isLL ? "-LL" : "" );
+ vex_printf( "LD%s:", e->Iex.Load.end==Iend_LE ? "le" : "be" );
ppIRType(e->Iex.Load.ty);
vex_printf( "(" );
ppIRExpr(e->Iex.Load.addr);
@@ -829,20 +828,31 @@
ppIRExpr(s->Ist.WrTmp.data);
break;
case Ist_Store:
- if (s->Ist.Store.resSC != IRTemp_INVALID) {
- ppIRTemp(s->Ist.Store.resSC);
- vex_printf( " = SC( " );
- }
vex_printf( "ST%s(", s->Ist.Store.end==Iend_LE ? "le" : "be" );
ppIRExpr(s->Ist.Store.addr);
vex_printf( ") = ");
ppIRExpr(s->Ist.Store.data);
- if (s->Ist.Store.resSC != IRTemp_INVALID)
- vex_printf( " )" );
break;
case Ist_CAS:
ppIRCAS(s->Ist.CAS.details);
break;
+ case Ist_LLSC:
+ if (s->Ist.LLSC.storedata == NULL) {
+ ppIRTemp(s->Ist.LLSC.result);
+ vex_printf(" = LD%s-Linked(",
+ s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+ ppIRExpr(s->Ist.LLSC.addr);
+ vex_printf(")");
+ } else {
+ ppIRTemp(s->Ist.LLSC.result);
+ vex_printf(" = ( ST%s-Cond(",
+ s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+ ppIRExpr(s->Ist.LLSC.addr);
+ vex_printf(") = ");
+ ppIRExpr(s->Ist.LLSC.storedata);
+ vex_printf(" )");
+ }
+ break;
case Ist_Dirty:
ppIRDirty(s->Ist.Dirty.details);
break;
@@ -1061,10 +1071,9 @@
e->Iex.Unop.arg = arg;
return e;
}
-IRExpr* IRExpr_Load ( Bool isLL, IREndness end, IRType ty, IRExpr* addr ) {
+IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) {
IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
e->tag = Iex_Load;
- e->Iex.Load.isLL = isLL;
e->Iex.Load.end = end;
e->Iex.Load.ty = ty;
e->Iex.Load.addr = addr;
@@ -1257,14 +1266,12 @@
s->Ist.WrTmp.data = data;
return s;
}
-IRStmt* IRStmt_Store ( IREndness end,
- IRTemp resSC, IRExpr* addr, IRExpr* data ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
- s->tag = Ist_Store;
- s->Ist.Store.end = end;
- s->Ist.Store.resSC = resSC;
- s->Ist.Store.addr = addr;
- s->Ist.Store.data = data;
+IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) {
+ IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ s->tag = Ist_Store;
+ s->Ist.Store.end = end;
+ s->Ist.Store.addr = addr;
+ s->Ist.Store.data = data;
vassert(end == Iend_LE || end == Iend_BE);
return s;
}
@@ -1274,6 +1281,16 @@
s->Ist.CAS.details = cas;
return s;
}
+IRStmt* IRStmt_LLSC ( IREndness end,
+ IRTemp result, IRExpr* addr, IRExpr* storedata ) {
+ IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ s->tag = Ist_LLSC;
+ s->Ist.LLSC.end = end;
+ s->Ist.LLSC.result = result;
+ s->Ist.LLSC.addr = addr;
+ s->Ist.LLSC.storedata = storedata;
+ return s;
+}
IRStmt* IRStmt_Dirty ( IRDirty* d )
{
IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
@@ -1418,8 +1435,7 @@
return IRExpr_Unop(e->Iex.Unop.op,
deepCopyIRExpr(e->Iex.Unop.arg));
case Iex_Load:
- return IRExpr_Load(e->Iex.Load.isLL,
- e->Iex.Load.end,
+ return IRExpr_Load(e->Iex.Load.end,
e->Iex.Load.ty,
deepCopyIRExpr(e->Iex.Load.addr));
case Iex_Const:
@@ -1490,11 +1506,17 @@
deepCopyIRExpr(s->Ist.WrTmp.data));
case Ist_Store:
return IRStmt_Store(s->Ist.Store.end,
- s->Ist.Store.resSC,
deepCopyIRExpr(s->Ist.Store.addr),
deepCopyIRExpr(s->Ist.Store.data));
case Ist_CAS:
return IRStmt_CAS(deepCopyIRCAS(s->Ist.CAS.details));
+ case Ist_LLSC:
+ return IRStmt_LLSC(s->Ist.LLSC.end,
+ s->Ist.LLSC.result,
+ deepCopyIRExpr(s->Ist.LLSC.addr),
+ s->Ist.LLSC.storedata
+ ? deepCopyIRExpr(s->Ist.LLSC.storedata)
+ : NULL);
case Ist_Dirty:
return IRStmt_Dirty(deepCopyIRDirty(s->Ist.Dirty.details));
case Ist_MBE:
@@ -2138,6 +2160,10 @@
&& isIRAtom(cas->expdLo)
&& (cas->dataHi ? isIRAtom(cas->dataHi) : True)
&& isIRAtom(cas->dataLo) );
+ case Ist_LLSC:
+ return toBool( isIRAtom(st->Ist.LLSC.addr)
+ && (st->Ist.LLSC.storedata
+ ? isIRAtom(st->Ist.LLSC.storedata) : True) );
case Ist_Dirty:
di = st->Ist.Dirty.details;
if (!isIRAtom(di->guard))
@@ -2329,6 +2355,11 @@
useBeforeDef_Expr(bb,stmt,cas->dataHi,def_counts);
useBeforeDef_Expr(bb,stmt,cas->dataLo,def_counts);
break;
+ case Ist_LLSC:
+ useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.addr,def_counts);
+ if (stmt->Ist.LLSC.storedata != NULL)
+ useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.storedata,def_counts);
+ break;
case Ist_Dirty:
d = stmt->Ist.Dirty.details;
for (i = 0; d->args[i] != NULL; i++)
@@ -2606,9 +2637,6 @@
sanityCheckFail(bb,stmt,"IRStmt.Store.data: cannot Store :: Ity_I1");
if (stmt->Ist.Store.end != Iend_LE && stmt->Ist.Store.end != Iend_BE)
sanityCheckFail(bb,stmt,"Ist.Store.end: bogus endianness");
- if (stmt->Ist.Store.resSC != IRTemp_INVALID
- && typeOfIRTemp(tyenv, stmt->Ist.Store.resSC) != Ity_I1)
- sanityCheckFail(bb,stmt,"Ist.Store.resSC: not :: Ity_I1");
break;
case Ist_CAS:
cas = stmt->Ist.CAS.details;
@@ -2660,6 +2688,27 @@
bad_cas:
sanityCheckFail(bb,stmt,"IRStmt.CAS: ill-formed");
break;
+ case Ist_LLSC: {
+ IRType tyRes;
+ if (typeOfIRExpr(tyenv, stmt->Ist.LLSC.addr) != gWordTy)
+ sanityCheckFail(bb,stmt,"IRStmt.LLSC.addr: not :: guest word type");
+ if (stmt->Ist.LLSC.end != Iend_LE && stmt->Ist.LLSC.end != Iend_BE)
+ sanityCheckFail(bb,stmt,"Ist.LLSC.end: bogus endianness");
+ tyRes = typeOfIRTemp(tyenv, stmt->Ist.LLSC.result);
+ if (stmt->Ist.LLSC.storedata == NULL) {
+ /* it's a LL */
+ if (tyRes != Ity_I64 && tyRes != Ity_I32)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(LL).result :: bogus");
+ } else {
+ /* it's a SC */
+ if (tyRes != Ity_I1)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(SC).result: not :: Ity_I1");
+ tyData = typeOfIRExpr(tyenv, stmt->Ist.LLSC.storedata);
+ if (tyData != Ity_I64 && tyData != Ity_I32)
+ sanityCheckFail(bb,stmt,"Ist.LLSC(SC).result :: storedata bogus");
+ }
+ break;
+ }
case Ist_Dirty:
/* Mostly check for various kinds of ill-formed dirty calls. */
d = stmt->Ist.Dirty.details;
@@ -2790,17 +2839,6 @@
"IRStmt.Tmp: destination tmp is assigned more than once");
break;
case Ist_Store:
- if (stmt->Ist.Store.resSC != IRTemp_INVALID) {
- IRTemp resSC = stmt->Ist.Store.resSC;
- if (resSC < 0 || resSC >= n_temps)
- sanityCheckFail(bb, stmt,
- "IRStmt.Store.resSC: destination tmp is out of range");
- def_counts[resSC]++;
- if (def_counts[resSC] > 1)
- sanityCheckFail(bb, stmt,
- "IRStmt.Store.resSC: destination tmp "
- "is assigned more than once");
- }
break;
case Ist_Dirty:
if (stmt->Ist.Dirty.details->tmp != IRTemp_INVALID) {
@@ -2816,7 +2854,6 @@
break;
case Ist_CAS:
cas = stmt->Ist.CAS.details;
-
if (cas->oldHi != IRTemp_INVALID) {
if (cas->oldHi < 0 || cas->oldHi >= n_temps)
sanityCheckFail(bb, stmt,
@@ -2827,16 +2864,25 @@
"IRStmt.CAS: destination tmpHi is assigned more than once");
}
if (cas->oldLo < 0 || cas->oldLo >= n_temps)
- sanityCheckFail(bb, stmt,
- "IRStmt.CAS: destination tmpLo is out of range");
- def_counts[cas->oldLo]++;
- if (def_counts[cas->oldLo] > 1)
- sanityCheckFail(bb, stmt,
- "IRStmt.CAS: destination tmpLo is assigned more than once");
- break;
+ sanityCheckFail(bb, stmt,
+ "IRStmt.CAS: destination tmpLo is out of range");
+ def_counts[cas->oldLo]++;
+ if (def_counts[cas->oldLo] > 1)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.CAS: destination tmpLo is assigned more than once");
+ break;
+ case Ist_LLSC:
+ if (stmt->Ist.LLSC.result < 0 || stmt->Ist.LLSC.result >= n_temps)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.LLSC: destination tmp is out of range");
+ def_counts[stmt->Ist.LLSC.result]++;
+ if (def_counts[stmt->Ist.LLSC.result] > 1)
+ sanityCheckFail(bb, stmt,
+ "IRStmt.LLSC: destination tmp is assigned more than once");
+ break;
default:
- /* explicitly handle the rest, so as to keep gcc quiet */
- break;
+ /* explicitly handle the rest, so as to keep gcc quiet */
+ break;
}
}
Modified: trunk/priv/ir_match.c
===================================================================
--- trunk/priv/ir_match.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/ir_match.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -90,7 +90,6 @@
return True;
case Iex_Load:
if (e->tag != Iex_Load) return False;
- if (p->Iex.Load.isLL != e->Iex.Load.isLL) return False;
if (p->Iex.Load.end != e->Iex.Load.end) return False;
if (p->Iex.Load.ty != e->Iex.Load.ty) return False;
if (!matchWrk(mi, p->Iex.Load.addr, e->Iex.Load.addr))
Modified: trunk/priv/ir_opt.c
===================================================================
--- trunk/priv/ir_opt.c 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/priv/ir_opt.c 2009-11-26 17:17:37 UTC (rev 1930)
@@ -334,8 +334,7 @@
case Iex_Load:
t1 = newIRTemp(bb->tyenv, ty);
addStmtToIRSB(bb, IRStmt_WrTmp(t1,
- IRExpr_Load(ex->Iex.Load.isLL,
- ex->Iex.Load.end,
+ IRExpr_Load(ex->Iex.Load.end,
ex->Iex.Load.ty,
flatten_Expr(bb, ex->Iex.Load.addr))));
return IRExpr_RdTmp(t1);
@@ -426,8 +425,7 @@
case Ist_Store:
e1 = flatten_Expr(bb, st->Ist.Store.addr);
e2 = flatten_Expr(bb, st->Ist.Store.data);
- addStmtToIRSB(bb, IRStmt_Store(st->Ist.Store.end,
- st->Ist.Store.resSC, e1,e2));
+ addStmtToIRSB(bb, IRStmt_Store(st->Ist.Store.end, e1,e2));
break;
case Ist_CAS:
cas = st->Ist.CAS.details;
@@ -440,6 +438,14 @@
e1, e2, e3, e4, e5 );
addStmtToIRSB(bb, IRStmt_CAS(cas2));
break;
+ case Ist_LLSC:
+ e1 = flatten_Expr(bb, st->Ist.LLSC.addr);
+ e2 = st->Ist.LLSC.storedata
+ ? flatten_Expr(bb, st->Ist.LLSC.storedata)
+ : NULL;
+ addStmtToIRSB(bb, IRStmt_LLSC(st->Ist.LLSC.end,
+ st->Ist.LLSC.result, e1, e2));
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
d2 = emptyIRDirty();
@@ -724,7 +730,7 @@
enough do a lot better if needed. */
/* Probably also overly-conservative, but also dump everything
if we hit a memory bus event (fence, lock, unlock). Ditto
- AbiHints and CASs. */
+ AbiHints, CASs, LLs and SCs. */
case Ist_AbiHint:
vassert(isIRAtom(st->Ist.AbiHint.base));
vassert(isIRAtom(st->Ist.AbiHint.nia));
@@ -732,6 +738,7 @@
case Ist_MBE:
case Ist_Dirty:
case Ist_CAS:
+ case Ist_LLSC:
for (j = 0; j < env->used; j++)
env->inuse[j] = False;
break;
@@ -1674,7 +1681,6 @@
case Iex_Load:
vassert(isIRAtom(ex->Iex.Load.addr));
return IRExpr_Load(
- ex->Iex.Load.isLL,
ex->Iex.Load.end,
ex->Iex.Load.ty,
subst_Expr(env, ex->Iex.Load.addr)
@@ -1763,7 +1769,6 @@
vassert(isIRAtom(st->Ist.Store.data));
return IRStmt_Store(
st->Ist.Store.end,
- st->Ist.Store.resSC,
fold_Expr(subst_Expr(env, st->Ist.Store.addr)),
fold_Expr(subst_Expr(env, st->Ist.Store.data))
);
@@ -1787,6 +1792,19 @@
return IRStmt_CAS(cas2);
}
+ case Ist_LLSC:
+ vassert(isIRAtom(st->Ist.LLSC.addr));
+ if (st->Ist.LLSC.storedata)
+ vassert(isIRAtom(st->Ist.LLSC.storedata));
+ return IRStmt_LLSC(
+ st->Ist.LLSC.end,
+ st->Ist.LLSC.result,
+ fold_Expr(subst_Expr(env, st->Ist.LLSC.addr)),
+ st->Ist.LLSC.storedata
+ ? fold_Expr(subst_Expr(env, st->Ist.LLSC.storedata))
+ : NULL
+ );
+
case Ist_Dirty: {
Int i;
IRDirty *d, *d2;
@@ -2022,6 +2040,11 @@
addUses_Expr(set, cas->dataHi);
addUses_Expr(set, cas->dataLo);
return;
+ case Ist_LLSC:
+ addUses_Expr(set, st->Ist.LLSC.addr);
+ if (st->Ist.LLSC.storedata)
+ addUses_Expr(set, st->Ist.LLSC.storedata);
+ return;
case Ist_Dirty:
d = st->Ist.Dirty.details;
if (d->mFx != Ifx_None)
@@ -2608,7 +2631,8 @@
to do the no-overlap assessments needed for Put/PutI.
*/
switch (st->tag) {
- case Ist_Dirty: case Ist_Store: case Ist_MBE: case Ist_CAS:
+ case Ist_Dirty: case Ist_Store: case Ist_MBE:
+ case Ist_CAS: case Ist_LLSC:
paranoia = 2; break;
case Ist_Put: case Ist_PutI:
paranoia = 1; break;
@@ -3299,8 +3323,6 @@
deltaIRExpr(st->Ist.Exit.guard, delta);
break;
case Ist_Store:
- if (st->Ist.Store.resSC != IRTemp_INVALID)
- st->Ist.Store.resSC += delta;
deltaIRExpr(st->Ist.Store.addr, delta);
deltaIRExpr(st->Ist.Store.data, delta);
break;
@@ -3316,6 +3338,12 @@
deltaIRExpr(st->Ist.CAS.details->dataHi, delta);
deltaIRExpr(st->Ist.CAS.details->dataLo, delta);
break;
+ case Ist_LLSC:
+ st->Ist.LLSC.result += delta;
+ deltaIRExpr(st->Ist.LLSC.addr, delta);
+ if (st->Ist.LLSC.storedata)
+ deltaIRExpr(st->Ist.LLSC.storedata, delta);
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
deltaIRExpr(d->guard, delta);
@@ -3780,6 +3808,11 @@
aoccCount_Expr(uses, cas->dataHi);
aoccCount_Expr(uses, cas->dataLo);
return;
+ case Ist_LLSC:
+ aoccCount_Expr(uses, st->Ist.LLSC.addr);
+ if (st->Ist.LLSC.storedata)
+ aoccCount_Expr(uses, st->Ist.LLSC.storedata);
+ return;
case Ist_Dirty:
d = st->Ist.Dirty.details;
if (d->mFx != Ifx_None)
@@ -3966,7 +3999,6 @@
);
case Iex_Load:
return IRExpr_Load(
- e->Iex.Load.isLL,
e->Iex.Load.end,
e->Iex.Load.ty,
atbSubst_Expr(env, e->Iex.Load.addr)
@@ -4003,7 +4035,6 @@
case Ist_Store:
return IRStmt_Store(
st->Ist.Store.end,
- st->Ist.Store.resSC,
atbSubst_Expr(env, st->Ist.Store.addr),
atbSubst_Expr(env, st->Ist.Store.data)
);
@@ -4048,6 +4079,14 @@
atbSubst_Expr(env, cas->dataLo)
);
return IRStmt_CAS(cas2);
+ case Ist_LLSC:
+ return IRStmt_LLSC(
+ st->Ist.LLSC.end,
+ st->Ist.LLSC.result,
+ atbSubst_Expr(env, st->Ist.LLSC.addr),
+ st->Ist.LLSC.storedata
+ ? atbSubst_Expr(env, st->Ist.LLSC.storedata) : NULL
+ );
case Ist_Dirty:
d = st->Ist.Dirty.details;
d2 = emptyIRDirty();
@@ -4189,15 +4228,13 @@
/* be True if this stmt writes memory or might do (==> we don't
want to reorder other loads or stores relative to it). Also,
- a load-linked falls under this classification, since we
+ both LL and SC fall under this classification, since we
really ought to be conservative and not reorder any other
- memory transactions relative to it. */
+ memory transactions relative to them. */
stmtStores
= toBool( st->tag == Ist_Store
- || (st->tag == Ist_WrTmp
- && st->Ist.WrTmp.data->tag == Iex_Load
- && st->Ist.WrTmp.data->Iex.Load.isLL)
- || st->tag == Ist_Dirty );
+ || st->tag == Ist_Dirty
+ || st->tag == Ist_LLSC );
for (k = A_NENV-1; k >= 0; k--) {
if (env[k].bindee == NULL)
@@ -4388,6 +4425,11 @@
vassert(cas->dataHi == NULL || isIRAtom(cas->dataHi));
vassert(isIRAtom(cas->dataLo));
break;
+ case Ist_LLSC:
+ vassert(isIRAtom(st->Ist.LLSC.addr));
+ if (st->Ist.LLSC.storedata)
+ vassert(isIRAtom(st->Ist.LLSC.storedata));
+ break;
case Ist_Dirty:
d = st->Ist.Dirty.details;
vassert(isIRAtom(d->guard));
@@ -4406,7 +4448,7 @@
default:
bad:
ppIRStmt(st);
- vpanic("hasGetIorPutI");
+ vpanic("considerExpensives");
}
}
}
Modified: trunk/pub/libvex_ir.h
===================================================================
--- trunk/pub/libvex_ir.h 2009-11-22 23:43:17 UTC (rev 1929)
+++ trunk/pub/libvex_ir.h 2009-11-26 17:17:37 UTC (rev 1930)
@@ -1044,20 +1044,13 @@
IRExpr* arg; /* operand */
} Unop;
- /* A load from memory. If .isLL is True then this load also
- lodges a reservation (ppc-style lwarx/ldarx operation). If
- .isLL is True, then also, the address must be naturally
- aligned - any misaligned addresses should be caught by a
- dominating IR check and side exit. This alignment
- restriction exists because on at least some LL/SC platforms
- (ppc), lwarx etc will trap w/ SIGBUS on misaligned addresses,
- and we have to actually generate lwarx on the host, and we
- don't want it trapping on the host.
-
+ /* A load from memory -- a normal load, not a load-linked.
+ Load-Linkeds (and Store-Conditionals) are instead represented
+ by IRStmt.LLSC since Load-Linkeds have side effects and so
+ are not semantically valid IRExpr's.
ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
*/
struct {
- Bool isLL; /* True iff load makes a reservation */
IREndness end; /* Endian-ness of the load */
IRType ty; /* Type of the loaded value */
IRExpr* addr; /* Address being loaded from */
@@ -1141,8 +1134,7 @@
IRExpr* arg2, IRExpr* arg3 );
extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 );
extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg );
-extern IRExpr* IRExpr_Load ( Bool isLL, IREndness end,
- IRType ty, IRExpr* addr );
+extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr );
extern IRExpr* IRExpr_Const ( IRConst* con );
extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX );
@@ -1483,6 +1475,7 @@
Ist_WrTmp,
Ist_Store,
Ist_CAS,
+ Ist_LLSC,
Ist_Dirty,
Ist_MBE, /* META (maybe) */
Ist_Exit
@@ -1578,28 +1571,13 @@
IRExpr* data; /* Expression (RHS of assignment) */
} WrTmp;
- /* Write a value to memory. Normally scRes is
- IRTemp_INVALID, denoting a normal store. If scRes is not
- IRTemp_INVALID, then this is a store-conditional, which
- may fail or succeed depending on the outcome of a
- previously lodged reservation on this address. scRes is
- written 1 if the store succeeds and 0 if it fails, and
- must have type Ity_I1.
-
- If scRes is not IRTemp_INVALID, then also, the address
- must be naturally aligned - any misaligned addresses
- should be caught by a dominating IR check and side exit.
- This alignment restriction exists because on at least some
- LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
- misaligned addresses, and we have to actually generate
- stwcx. on the host, and we don't want it trapping on the
- host.
-
+ /* Write a value to memory. This is a normal store, not a
+ Store-Conditional. To represent a Store-Conditional,
+ instead use IRStmt.LLSC.
ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
*/
struct {
IREndness end; /* Endianness of the store */
- IRTemp resSC; /* result of SC goes here (1 == success) */
IRExpr* addr; /* store address */
IRExpr* data; /* value to write */
} Store;
@@ -1622,6 +1600,57 @@
IRCAS* details;
} CAS;
+ /* Either Load-Linked or Store-Conditional, depending on
+ STOREDATA.
+
+ If STOREDATA is NULL then this is a Load-Linked, meaning
+ that data is loaded from memory as normal, but a
+ 'reservation' for the address is also lodged in the
+ hardware.
+
+ result = Load-Linked(addr, end)
+
+ The data transfer type is the type of RESULT (I32, I64,
+ etc). ppIRStmt output:
+
+ result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
+
+ If STOREDATA is not NULL then this is a Store-Conditional,
+ hence:
+
+ result = Store-Conditional(addr, storedata, end)
+
+ The data transfer type is the type of STOREDATA and RESULT
+ has type Ity_I1. The store may fail or succeed depending
+ on the state of a previously lodged reservation on this
+ address. RESULT is written 1 if the store succeeds and 0
+ if it fails. eg ppIRStmt output:
+
+ result = ( ST<end>-Cond(<addr>) = <storedata> )
+ eg t3 = ( STbe-Cond(t1, t2) )
+
+ In all cases, the address must be naturally aligned for
+ the transfer type -- any misaligned addresses should be
+ caught by a dominating IR check and side exit. This
+ alignment restriction exists because on at least some
+ LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
+ misaligned addresses, and we have to actually generate
+ stwcx. on the host, and we don't want it trapping on the
+ host.
+
+ Summary of rules for transfer type:
+ STOREDATA == NULL (LL):
+ transfer type = type of RESULT
+ STOREDATA != NULL (SC):
+ transfer type = type of STOREDATA, and RESULT :: Ity_I1
+ */
+ struct {
+ IREndness end;
+ IRTemp result;
+ IRExpr* addr;
+ IRExpr* storedata; /* NULL => LL, non-NULL => SC */
+ } LLSC;
+
/* Call (possibly conditionally) a C function that has side
effects (ie. is "dirty"). See the comments above the
IRDirty type declaration for more information.
@@ -1668,9 +1697,10 @@
extern IRStmt* IRStmt_PutI ( IRRegArray* descr, IRExpr* ix, Int bias,
IRExpr* data );
extern IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data );
-extern IRStmt* IRStmt_Store ( IREndness end,
- IRTemp resSC, IRExpr* addr, IRExpr* data );
+extern IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data );
extern IRStmt* IRStmt_CAS ( IRCAS* details );
+extern IRStmt* IRStmt_LLSC ( IREndness end, IRTemp result,
+ IRExpr* addr, IRExpr* storedata );
extern IRStmt* IRStmt_Dirty ( IRDirty* details );
extern IRStmt* IRStmt_MBE ( IRMBusEvent event );
extern IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst );
|
|
From: Nicholas N. <n.n...@gm...> - 2009-11-26 10:25:03
|
Nightly build on ocean ( Ubuntu 9.04, x86_64 )
Started at 2009-11-26 02:00:01 PST
Ended at 2009-11-26 02:24:43 PST
Results differ from 24 hours ago
Checking out valgrind source tree ... done
Configuring valgrind ... done
Building valgrind ... done
Running regression tests ... failed
Regression test results follow
== 537 tests, 3 stderr failures, 0 stdout failures, 0 post failures ==
memcheck/tests/x86-linux/scalar (stderr)
helgrind/tests/pth_spinlock (stderr)
helgrind/tests/tc06_two_races_xml (stderr)
=================================================
== Results from 24 hours ago ==
=================================================
Checking out valgrind source tree ... done
Configuring valgrind ... done
Building valgrind ... done
Running regression tests ... failed
Regression test results follow
== 537 tests, 2 stderr failures, 0 stdout failures, 0 post failures ==
helgrind/tests/pth_spinlock (stderr)
helgrind/tests/tc06_two_races_xml (stderr)
=================================================
== Difference between 24 hours ago and now ==
=================================================
*** old.short 2009-11-26 02:12:28.000000000 -0800
--- new.short 2009-11-26 02:24:43.000000000 -0800
***************
*** 8,10 ****
! == 537 tests, 2 stderr failures, 0 stdout failures, 0 post failures ==
helgrind/tests/pth_spinlock (stderr)
--- 8,11 ----
! == 537 tests, 3 stderr failures, 0 stdout failures, 0 post failures ==
! memcheck/tests/x86-linux/scalar (stderr)
helgrind/tests/pth_spinlock (stderr)
=================================================
./valgrind-new/helgrind/tests/pth_spinlock.stderr.diff
=================================================
--- pth_spinlock.stderr.exp 2009-11-26 02:12:42.000000000 -0800
+++ pth_spinlock.stderr.out 2009-11-26 02:22:08.000000000 -0800
@@ -1,2 +1,13 @@
Start of test.
+Thread #x was created
+ ...
+ by 0x........: pthread_create@* (hg_intercepts.c:...)
+ by 0x........: main (pth_spinlock.c:46)
+
+Thread #x: Bug in libpthread: recursive write lock granted on mutex/wrlock which does not support recursion
+ at 0x........: pthread_spin_lock (hg_intercepts.c:...)
+ by 0x........: thread_func (pth_spinlock.c:27)
+ by 0x........: mythread_wrapper (hg_intercepts.c:...)
+ ...
+
Test successful.
=================================================
./valgrind-new/helgrind/tests/tc06_two_races_xml.stderr.diff
=================================================
--- tc06_two_races_xml.stderr.exp 2009-11-26 02:12:42.000000000 -0800
+++ tc06_two_races_xml.stderr.out 2009-11-26 02:22:14.000000000 -0800
@@ -29,12 +29,12 @@
</status>
<announcethread>
- <hthreadid>1</threadid>
+ <hthreadid>1</hthreadid>
<isrootthread></isrootthread>
</announcethread>
<announcethread>
- <hthreadid>2</threadid>
+ <hthreadid>2</hthreadid>
<stack>
<frame>
<ip>0x........</ip>
@@ -44,11 +44,6 @@
<frame>
<ip>0x........</ip>
<obj>...</obj>
- <fn>do_clone</fn>
- </frame>
- <frame>
- <ip>0x........</ip>
- <obj>...</obj>
<fn>pthread_create@@GLIBC_2.2.5</fn>
</frame>
<frame>
@@ -294,6 +289,7 @@
<xauxwhat><text>declared at tc06_two_races.c:9</text> <file>tc06_two_races.c</file> <line>...</line> </xauxwhat>
</error>
+
<status>
<state>FINISHED</state>
<time>...</time>
=================================================
./valgrind-new/memcheck/tests/x86-linux/scalar.stderr.diff
=================================================
--- scalar.stderr.exp 2009-11-26 02:12:54.000000000 -0800
+++ scalar.stderr.out 2009-11-26 02:20:11.000000000 -0800
@@ -1863,10 +1863,10 @@
Syscall param pread64(count) contains uninitialised byte(s)
...
-Syscall param pread64(offset_low32) contains uninitialised byte(s)
+Syscall param pread64(offset_low) contains uninitialised byte(s)
...
-Syscall param pread64(offset_high32) contains uninitialised byte(s)
+Syscall param pread64(offset_high) contains uninitialised byte(s)
...
Syscall param pread64(buf) points to unaddressable byte(s)
@@ -1885,10 +1885,10 @@
Syscall param pwrite64(count) contains uninitialised byte(s)
...
-Syscall param pwrite64(offset_low32) contains uninitialised byte(s)
+Syscall param pwrite64(offset_low) contains uninitialised byte(s)
...
-Syscall param pwrite64(offset_high32) contains uninitialised byte(s)
+Syscall param pwrite64(offset_high) contains uninitialised byte(s)
...
Syscall param pwrite64(buf) points to unaddressable byte(s)
@@ -2073,10 +2073,10 @@
Syscall param truncate64(path) contains uninitialised byte(s)
...
-Syscall param truncate64(length_low32) contains uninitialised byte(s)
+Syscall param truncate64(length_low) contains uninitialised byte(s)
...
-Syscall param truncate64(length_high32) contains uninitialised byte(s)
+Syscall param truncate64(length_high) contains uninitialised byte(s)
...
Syscall param truncate64(path) points to unaddressable byte(s)
@@ -2089,10 +2089,10 @@
Syscall param ftruncate64(fd) contains uninitialised byte(s)
...
-Syscall param ftruncate64(length_low32) contains uninitialised byte(s)
+Syscall param ftruncate64(length_low) contains uninitialised byte(s)
...
-Syscall param ftruncate64(length_high32) contains uninitialised byte(s)
+Syscall param ftruncate64(length_high) contains uninitialised byte(s)
...
-----------------------------------------------------
@@ -2874,10 +2874,10 @@
-----------------------------------------------------
253: __NR_lookup_dcookie 4s 1m
-----------------------------------------------------
-Syscall param lookup_dcookie(cookie_low32) contains uninitialised byte(s)
+Syscall param lookup_dcookie(cookie_low) contains uninitialised byte(s)
...
-Syscall param lookup_dcookie(cookie_high32) contains uninitialised byte(s)
+Syscall param lookup_dcookie(cookie_high) contains uninitialised byte(s)
...
Syscall param lookup_dcookie(buf) contains uninitialised byte(s)
=================================================
./valgrind-old/helgrind/tests/pth_spinlock.stderr.diff
=================================================
--- pth_spinlock.stderr.exp 2009-11-26 02:00:24.000000000 -0800
+++ pth_spinlock.stderr.out 2009-11-26 02:09:56.000000000 -0800
@@ -1,2 +1,24 @@
Start of test.
+Thread #x was created
+ ...
+ by 0x........: pthread_create@* (hg_intercepts.c:...)
+ by 0x........: main (pth_spinlock.c:46)
+
+Thread #x: Bug in libpthread: recursive write lock granted on mutex/wrlock which does not support recursion
+ at 0x........: pthread_spin_lock (hg_intercepts.c:...)
+ by 0x........: thread_func (pth_spinlock.c:27)
+ by 0x........: mythread_wrapper (hg_intercepts.c:...)
+ ...
+
+Thread #x was created
+ ...
+ by 0x........: pthread_create@* (hg_intercepts.c:...)
+ by 0x........: main (pth_spinlock.c:46)
+
+Thread #x: Bug in libpthread: recursive write lock granted on mutex/wrlock which does not support recursion
+ at 0x........: pthread_spin_lock (hg_intercepts.c:...)
+ by 0x........: thread_func (pth_spinlock.c:27)
+ by 0x........: mythread_wrapper (hg_intercepts.c:...)
+ ...
+
Test successful.
=================================================
./valgrind-old/helgrind/tests/tc06_two_races_xml.stderr.diff
=================================================
--- tc06_two_races_xml.stderr.exp 2009-11-26 02:00:24.000000000 -0800
+++ tc06_two_races_xml.stderr.out 2009-11-26 02:10:01.000000000 -0800
@@ -29,12 +29,12 @@
</status>
<announcethread>
- <hthreadid>1</threadid>
+ <hthreadid>1</hthreadid>
<isrootthread></isrootthread>
</announcethread>
<announcethread>
- <hthreadid>2</threadid>
+ <hthreadid>2</hthreadid>
<stack>
<frame>
<ip>0x........</ip>
@@ -44,11 +44,6 @@
<frame>
<ip>0x........</ip>
<obj>...</obj>
- <fn>do_clone</fn>
- </frame>
- <frame>
- <ip>0x........</ip>
- <obj>...</obj>
<fn>pthread_create@@GLIBC_2.2.5</fn>
</frame>
<frame>
@@ -294,6 +289,7 @@
<xauxwhat><text>declared at tc06_two_races.c:9</text> <file>tc06_two_races.c</file> <line>...</line> </xauxwhat>
</error>
+
<status>
<state>FINISHED</state>
<time>...</time>
|
|
From: Alexander P. <gl...@go...> - 2009-11-26 09:39:26
|
Nightly build on mcgrind ( Darwin 9.7.0 i386 ) Started at 2009-11-26 09:06:01 MSK Ended at 2009-11-26 09:24:58 MSK Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 433 tests, 22 stderr failures, 1 stdout failure, 0 post failures == memcheck/tests/null_socket (stdout) memcheck/tests/origin5-bz2 (stderr) memcheck/tests/varinfo1 (stderr) memcheck/tests/varinfo2 (stderr) memcheck/tests/varinfo3 (stderr) memcheck/tests/varinfo4 (stderr) memcheck/tests/varinfo5 (stderr) memcheck/tests/varinfo6 (stderr) none/tests/async-sigs (stderr) none/tests/faultstatus (stderr) none/tests/pth_blockedsig (stderr) helgrind/tests/hg03_inherit (stderr) helgrind/tests/hg04_race (stderr) helgrind/tests/hg05_race2 (stderr) helgrind/tests/rwlock_race (stderr) helgrind/tests/tc01_simple_race (stderr) helgrind/tests/tc05_simple_race (stderr) helgrind/tests/tc06_two_races (stderr) helgrind/tests/tc06_two_races_xml (stderr) helgrind/tests/tc16_byterace (stderr) helgrind/tests/tc18_semabuse (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc23_bogus_condwait (stderr) -- Alexander Potapenko Software Engineer Google Moscow |
|
From: Bart V. A. <bar...@gm...> - 2009-11-26 08:27:15
|
Nightly build on cellbuzz-native ( cellbuzz, ppc64, Fedora 7, native ) Started at 2009-11-26 02:00:05 EST Ended at 2009-11-26 03:26:55 EST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... done Regression test results follow == 449 tests, 45 stderr failures, 10 stdout failures, 0 post failures == memcheck/tests/deep_templates (stdout) memcheck/tests/leak-cases-full (stderr) memcheck/tests/leak-cases-summary (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/linux/timerfd-syscall (stdout) memcheck/tests/linux-syscalls-2007 (stderr) memcheck/tests/origin5-bz2 (stderr) memcheck/tests/partiallydefinedeq (stderr) memcheck/tests/varinfo1 (stderr) memcheck/tests/varinfo2 (stderr) memcheck/tests/varinfo3 (stderr) memcheck/tests/varinfo4 (stderr) memcheck/tests/varinfo5 (stderr) memcheck/tests/varinfo6 (stderr) memcheck/tests/wrap8 (stdout) memcheck/tests/wrap8 (stderr) none/tests/empty-exe (stderr) none/tests/linux/mremap (stderr) none/tests/ppc32/jm-fp (stdout) none/tests/ppc32/jm-vmx (stdout) none/tests/ppc32/round (stdout) none/tests/ppc32/test_gx (stdout) none/tests/ppc64/jm-fp (stdout) none/tests/ppc64/jm-vmx (stdout) none/tests/ppc64/round (stdout) none/tests/shell_valid2 (stderr) none/tests/shell_valid3 (stderr) none/tests/shell_zerolength (stderr) helgrind/tests/hg05_race2 (stderr) helgrind/tests/tc06_two_races_xml (stderr) helgrind/tests/tc22_exit_w_lock (stderr) helgrind/tests/tc23_bogus_condwait (stderr) drd/tests/tc23_bogus_condwait (stderr) exp-ptrcheck/tests/bad_percentify (stderr) exp-ptrcheck/tests/base (stderr) exp-ptrcheck/tests/ccc (stderr) exp-ptrcheck/tests/fp (stderr) exp-ptrcheck/tests/globalerr (stderr) exp-ptrcheck/tests/hackedbz2 (stderr) exp-ptrcheck/tests/hp_bounds (stderr) exp-ptrcheck/tests/hp_dangle (stderr) exp-ptrcheck/tests/hsg (stderr) exp-ptrcheck/tests/justify (stderr) exp-ptrcheck/tests/partial_bad (stderr) exp-ptrcheck/tests/partial_good (stderr) exp-ptrcheck/tests/preen_invars (stderr) exp-ptrcheck/tests/pth_create (stderr) exp-ptrcheck/tests/pth_specific (stderr) exp-ptrcheck/tests/realloc (stderr) exp-ptrcheck/tests/stackerr (stderr) exp-ptrcheck/tests/strcpy (stderr) exp-ptrcheck/tests/supp (stderr) exp-ptrcheck/tests/tricky (stderr) exp-ptrcheck/tests/unaligned (stderr) exp-ptrcheck/tests/zero (stderr) |
|
From: Tom H. <th...@cy...> - 2009-11-26 03:49:35
|
Nightly build on lloyd ( x86_64, Fedora 7 ) Started at 2009-11-26 03:05:04 GMT Ended at 2009-11-26 03:49:08 GMT Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 531 tests, 2 stderr failures, 0 stdout failures, 0 post failures == memcheck/tests/x86-linux/scalar (stderr) helgrind/tests/tc06_two_races_xml (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 531 tests, 1 stderr failure, 0 stdout failures, 0 post failures == helgrind/tests/tc06_two_races_xml (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Thu Nov 26 03:27:21 2009 --- new.short Thu Nov 26 03:49:08 2009 *************** *** 8,10 **** ! == 531 tests, 1 stderr failure, 0 stdout failures, 0 post failures == helgrind/tests/tc06_two_races_xml (stderr) --- 8,11 ---- ! == 531 tests, 2 stderr failures, 0 stdout failures, 0 post failures == ! memcheck/tests/x86-linux/scalar (stderr) helgrind/tests/tc06_two_races_xml (stderr) |
|
From: Tom H. <th...@cy...> - 2009-11-26 03:36:06
|
Nightly build on mg ( x86_64, Fedora 9 ) Started at 2009-11-26 03:10:03 GMT Ended at 2009-11-26 03:35:47 GMT Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 538 tests, 2 stderr failures, 0 stdout failures, 0 post failures == memcheck/tests/x86-linux/scalar (stderr) helgrind/tests/tc06_two_races_xml (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 538 tests, 1 stderr failure, 0 stdout failures, 0 post failures == helgrind/tests/tc06_two_races_xml (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Thu Nov 26 03:22:56 2009 --- new.short Thu Nov 26 03:35:47 2009 *************** *** 8,10 **** ! == 538 tests, 1 stderr failure, 0 stdout failures, 0 post failures == helgrind/tests/tc06_two_races_xml (stderr) --- 8,11 ---- ! == 538 tests, 2 stderr failures, 0 stdout failures, 0 post failures == ! memcheck/tests/x86-linux/scalar (stderr) helgrind/tests/tc06_two_races_xml (stderr) |