You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
1
(11) |
2
(9) |
3
(11) |
4
(12) |
5
(11) |
|
6
(9) |
7
(13) |
8
(6) |
9
(7) |
10
(7) |
11
(11) |
12
(13) |
|
13
(7) |
14
(6) |
15
(7) |
16
(19) |
17
(20) |
18
(9) |
19
(9) |
|
20
(6) |
21
(7) |
22
(11) |
23
(16) |
24
(14) |
25
(24) |
26
(16) |
|
27
(20) |
28
(58) |
29
(7) |
30
(10) |
31
(15) |
|
|
|
From: <sv...@va...> - 2006-08-28 13:32:50
|
Author: sewardj
Date: 2006-08-28 14:32:48 +0100 (Mon, 28 Aug 2006)
New Revision: 1650
Log:
Merge r1640,1 (ppc cmp reg,reg fix)
Modified:
branches/VEX_3_2_BRANCH/priv/guest-ppc/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-ppc/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-ppc/toIR.c 2006-08-28 13:30:48 UTC=
(rev 1649)
+++ branches/VEX_3_2_BRANCH/priv/guest-ppc/toIR.c 2006-08-28 13:32:48 UTC=
(rev 1650)
@@ -2936,6 +2936,13 @@
switch (opc2) {
case 0x000: // cmp (Compare, PPC32 p367)
DIP("cmp cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
+ /* Comparing a reg with itself produces a result which
+ doesn't depend on the contents of the reg. Therefore
+ remove the false dependency, which has been known to cause
+ memcheck to produce false errors. */
+ if (rA_addr =3D=3D rB_addr)
+ a =3D b =3D typeOfIRExpr(irbb->tyenv,a) =3D=3D Ity_I64
+ ? mkU64(0) : mkU32(0);
if (flag_L =3D=3D 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
} else {
@@ -2948,6 +2955,13 @@
=20
case 0x020: // cmpl (Compare Logical, PPC32 p369)
DIP("cmpl cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
+ /* Comparing a reg with itself produces a result which
+ doesn't depend on the contents of the reg. Therefore
+ remove the false dependency, which has been known to cause
+ memcheck to produce false errors. */
+ if (rA_addr =3D=3D rB_addr)
+ a =3D b =3D typeOfIRExpr(irbb->tyenv,a) =3D=3D Ity_I64
+ ? mkU64(0) : mkU32(0);
if (flag_L =3D=3D 1) {
putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
} else {
|
|
From: <sv...@va...> - 2006-08-28 13:30:51
|
Author: sewardj
Date: 2006-08-28 14:30:48 +0100 (Mon, 28 Aug 2006)
New Revision: 1649
Log:
Merge r1639 (amd64->IR: 0xF0 0x48 0xF 0xC7 (cmpxchg8b))
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:28:48 U=
TC (rev 1648)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:30:48 U=
TC (rev 1649)
@@ -7136,9 +7136,10 @@
//.. //--=20
=20
static
-ULong dis_cmpxchg_G_E ( Prefix pfx,
- Int size,=20
- Long delta0 )
+ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok,
+ Prefix pfx,
+ Int size,=20
+ Long delta0 )
{
HChar dis_buf[50];
Int len;
@@ -7154,7 +7155,9 @@
UChar rm =3D getUChar(delta0);
=20
if (epartIsReg(rm)) {
- vassert(0); /* awaiting test case */
+ *ok =3D False;
+ return delta0;
+ /* awaiting test case */
assign( dest, getIRegE(size, pfx, rm) );
delta0++;
DIP("cmpxchg%c %s,%s\n", nameISize(size),
@@ -7182,10 +7185,126 @@
storeLE( mkexpr(addr), mkexpr(dest2) );
}
=20
+ *ok =3D True;
return delta0;
}
=20
+static
+ULong dis_cmpxchg8b ( /*OUT*/Bool* ok,
+ Prefix pfx,
+ Int sz,=20
+ Long delta0 )
+{
+ HChar dis_buf[50];
+ Int len;
=20
+ IRType ty =3D szToITy(sz);
+ IRTemp eq =3D newTemp(Ity_I8);
+ IRTemp olda =3D newTemp(ty);
+ IRTemp oldb =3D newTemp(ty);
+ IRTemp oldc =3D newTemp(ty);
+ IRTemp oldd =3D newTemp(ty);
+ IRTemp newa =3D newTemp(Ity_I64);
+ IRTemp newd =3D newTemp(Ity_I64);
+ IRTemp oldml =3D newTemp(ty);
+ IRTemp oldmh =3D newTemp(ty);
+ IRTemp newml =3D newTemp(ty);
+ IRTemp newmh =3D newTemp(ty);
+ IRTemp addr =3D IRTemp_INVALID;
+ IRTemp oldrf =3D newTemp(Ity_I64);
+ IRTemp newrf =3D newTemp(Ity_I64);
+ UChar rm =3D getUChar(delta0);
+ vassert(sz =3D=3D 4 || sz =3D=3D 8); /* guaranteed by caller */
+
+ if (epartIsReg(rm)) {
+ *ok =3D False;
+ return delta0;
+ }
+
+ addr =3D disAMode ( &len, pfx, delta0, dis_buf, 0 );
+ delta0 +=3D len;
+ DIP("cmpxchg%s %s\n", sz =3D=3D 4 ? "8" : "16", dis_buf);
+
+ if (sz =3D=3D 4) {
+ assign( olda, getIReg32( R_RAX ) );
+ assign( oldb, getIReg32( R_RBX ) );
+ assign( oldc, getIReg32( R_RCX ) );
+ assign( oldd, getIReg32( R_RDX ) );
+ assign( oldml, loadLE( Ity_I32, mkexpr(addr) ));
+ assign( oldmh, loadLE( Ity_I32,=20
+ binop(Iop_Add64,mkexpr(addr),mkU64(4)) ));
+ assign(eq,=20
+ unop(Iop_1Uto8,
+ binop(Iop_CmpEQ32,=20
+ binop(Iop_Or32,
+ binop(Iop_Xor32,mkexpr(olda),mkexpr(oldml)),
+ binop(Iop_Xor32,mkexpr(oldd),mkexpr(oldmh))),
+ mkU32(0))));
+ assign( newml, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldml),mkexpr(oldb))=
);
+ assign( newmh, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldmh),mkexpr(oldc))=
);
+ assign( newa, IRExpr_Mux0X(mkexpr(eq),
+ unop(Iop_32Uto64,mkexpr(oldml)),
+ getIRegRAX(8)) );
+ assign( newd, IRExpr_Mux0X(mkexpr(eq),
+ unop(Iop_32Uto64,mkexpr(oldmh)),
+ getIRegRDX(8)) );
+
+ storeLE( mkexpr(addr), mkexpr(newml) );
+ storeLE( binop(Iop_Add64,mkexpr(addr),mkU64(4)),
+ mkexpr(newmh) );
+ putIRegRAX( 8, mkexpr(newa) );
+ putIRegRDX( 8, mkexpr(newd) );
+ } else {
+ assign( olda, getIReg64( R_RAX ) );
+ assign( oldb, getIReg64( R_RBX ) );
+ assign( oldc, getIReg64( R_RCX ) );
+ assign( oldd, getIReg64( R_RDX ) );
+ assign( oldml, loadLE( Ity_I64, mkexpr(addr) ));
+ assign( oldmh, loadLE( Ity_I64,=20
+ binop(Iop_Add64,mkexpr(addr),mkU64(8)) ));
+ assign(eq,=20
+ unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64,=20
+ binop(Iop_Or64,
+ binop(Iop_Xor64,mkexpr(olda),mkexpr(oldml)),
+ binop(Iop_Xor64,mkexpr(oldd),mkexpr(oldmh))),
+ mkU64(0))));
+ assign( newml, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldml),mkexpr(oldb))=
);
+ assign( newmh, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldmh),mkexpr(oldc))=
);
+ assign( newa, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldml),mkexpr(olda))=
);
+ assign( newd, IRExpr_Mux0X(mkexpr(eq),mkexpr(oldmh),mkexpr(oldd))=
);
+
+ storeLE( mkexpr(addr), mkexpr(newml) );
+ storeLE( binop(Iop_Add64,mkexpr(addr),mkU64(8)),
+ mkexpr(newmh) );
+ putIRegRAX( 8, mkexpr(newa) );
+ putIRegRDX( 8, mkexpr(newd) );
+ }
+
+ /* And set the flags. Z is set if original d:a =3D=3D mem, else
+ cleared. All others unchanged. (This is different from normal
+ cmpxchg which just sets them according to SUB.). */
+ assign( oldrf, binop(Iop_And64,=20
+ mk_amd64g_calculate_rflags_all(),
+ mkU64(~AMD64G_CC_MASK_Z)) );
+ assign( newrf,
+ binop(Iop_Or64,
+ mkexpr(oldrf),
+ binop(Iop_Shl64,=20
+ binop(Iop_And64, unop(Iop_8Uto64, mkexpr(eq)), mkU64(1)),
+ mkU8(AMD64G_CC_SHIFT_Z))
+ ));
+ stmt( IRStmt_Put( OFFB_CC_OP, mkU64(AMD64G_CC_OP_COPY) ));
+ stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+ stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(newrf) ));
+ /* Set NDEP even though it isn't used. This makes redundant-PUT
+ elimination of previous stores to this field work better. */
+ stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+ *ok =3D True;
+ return delta0;
+}
+
//.. //-- static
//.. //-- Addr dis_cmpxchg8b ( UCodeBlock* cb,=20
//.. //-- UChar sorb,
@@ -13750,14 +13869,22 @@
//.. case 0xB0: /* CMPXCHG Gb,Eb */
//.. delta =3D dis_cmpxchg_G_E ( sorb, 1, delta );
//.. break;
- case 0xB1: /* CMPXCHG Gv,Ev */
+ case 0xB1: { /* CMPXCHG Gv,Ev (allowed in 16,32,64 bit) */
+ Bool ok =3D True;
if (haveF2orF3(pfx)) goto decode_failure;
- delta =3D dis_cmpxchg_G_E ( pfx, sz, delta );
+ if (sz !=3D 2 && sz !=3D 4 && sz !=3D 8) goto decode_failure;
+ delta =3D dis_cmpxchg_G_E ( &ok, pfx, sz, delta );
+ if (!ok) goto decode_failure;
break;
-//.. //-- case 0xC7: /* CMPXCHG8B Gv */
-//.. //-- eip =3D dis_cmpxchg8b ( cb, sorb, eip );
-//.. //-- break;
-//.. //--=20
+ }
+ case 0xC7: { /* CMPXCHG8B Ev, CMPXCHG16B Ev */
+ Bool ok =3D True;
+ if (have66orF2orF3(pfx)) goto decode_failure;
+ if (sz !=3D 4 && sz !=3D 8) goto decode_failure;
+ delta =3D dis_cmpxchg8b ( &ok, pfx, sz, delta );
+ break;
+ }
+
/* =3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D- CPUID -=3D-=3D-=3D-=3D-=3D=
-=3D-=3D-=3D-=3D-=3D-=3D */
=20
case 0xA2: { /* CPUID */
|
|
From: <sv...@va...> - 2006-08-28 13:28:54
|
Author: sewardj
Date: 2006-08-28 14:28:48 +0100 (Mon, 28 Aug 2006)
New Revision: 1648
Log:
Merge r1638 (Programs with long sequences of bswap[l,q]s)
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
branches/VEX_3_2_BRANCH/priv/main/vex_util.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:24:08 U=
TC (rev 1647)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:28:48 U=
TC (rev 1648)
@@ -13631,7 +13631,7 @@
binop(Iop_Or32,
binop(Iop_Shl32, mkexpr(t1), mkU8(24)),
binop(Iop_Or32,
- binop(Iop_And32, binop(Iop_Shl32, mkexpr(t1), mkU8(8))=
,=20
+ binop(Iop_And32, binop(Iop_Shl32, mkexpr(t1), mkU8(8))=
,
mkU32(0x00FF0000)),
binop(Iop_Or32,
binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(8))=
,
@@ -13645,33 +13645,50 @@
break;
}
else if (sz =3D=3D 8) {
+ IRTemp m8 =3D newTemp(Ity_I64);
+ IRTemp s8 =3D newTemp(Ity_I64);
+ IRTemp m16 =3D newTemp(Ity_I64);
+ IRTemp s16 =3D newTemp(Ity_I64);
+ IRTemp m32 =3D newTemp(Ity_I64);
t1 =3D newTemp(Ity_I64);
t2 =3D newTemp(Ity_I64);
assign( t1, getIRegRexB(8, pfx, opc-0xC8) );
=20
-# define LANE(_nn) \
- binop( Iop_Shl64, \
- binop( Iop_And64, \
- binop(Iop_Shr64, mkexpr(t1), \
- mkU8(8 * (7 - (_nn)))), \
- mkU64(0xFF)), \
- mkU8(8 * (_nn)))
+ assign( m8, mkU64(0xFF00FF00FF00FF00ULL) );
+ assign( s8,
+ binop(Iop_Or64,
+ binop(Iop_Shr64,
+ binop(Iop_And64,mkexpr(t1),mkexpr(m8)),
+ mkU8(8)),
+ binop(Iop_And64,
+ binop(Iop_Shl64,mkexpr(t1),mkU8(8)),
+ mkexpr(m8))
+ )=20
+ );
=20
- assign(=20
- t2,
- binop(Iop_Or64,
- binop(Iop_Or64,
- binop(Iop_Or64,LANE(0),LANE(1)),
- binop(Iop_Or64,LANE(2),LANE(3))
- ),
- binop(Iop_Or64,
- binop(Iop_Or64,LANE(4),LANE(5)),
- binop(Iop_Or64,LANE(6),LANE(7))
- )
- )
- );
+ assign( m16, mkU64(0xFFFF0000FFFF0000ULL) );
+ assign( s16,
+ binop(Iop_Or64,
+ binop(Iop_Shr64,
+ binop(Iop_And64,mkexpr(s8),mkexpr(m16)),
+ mkU8(16)),
+ binop(Iop_And64,
+ binop(Iop_Shl64,mkexpr(s8),mkU8(16)),
+ mkexpr(m16))
+ )=20
+ );
=20
-# undef LANE
+ assign( m32, mkU64(0xFFFFFFFF00000000ULL) );
+ assign( t2,
+ binop(Iop_Or64,
+ binop(Iop_Shr64,
+ binop(Iop_And64,mkexpr(s16),mkexpr(m32))=
,
+ mkU8(32)),
+ binop(Iop_And64,
+ binop(Iop_Shl64,mkexpr(s16),mkU8(32)),
+ mkexpr(m32))
+ )=20
+ );
=20
putIRegRexB(8, pfx, opc-0xC8, mkexpr(t2));
DIP("bswapq %s\n", nameIRegRexB(8, pfx, opc-0xC8));
Modified: branches/VEX_3_2_BRANCH/priv/main/vex_util.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/main/vex_util.c 2006-08-28 13:24:08 UTC =
(rev 1647)
+++ branches/VEX_3_2_BRANCH/priv/main/vex_util.c 2006-08-28 13:28:48 UTC =
(rev 1648)
@@ -62,7 +62,7 @@
MByte/sec. Once the size increases enough to fall out of the cache
into memory, the rate falls by about a factor of 3.=20
*/
-#define N_TEMPORARY_BYTES 2400000
+#define N_TEMPORARY_BYTES 4000000
=20
static HChar temporary[N_TEMPORARY_BYTES] __attribute__((aligned(8)));
static HChar* temporary_first =3D &temporary[0];
|
|
From: <sv...@va...> - 2006-08-28 13:24:11
|
Author: sewardj
Date: 2006-08-28 14:24:08 +0100 (Mon, 28 Aug 2006)
New Revision: 1647
Log:
Merge r1637 (amd64 insn printing fix)
Modified:
branches/VEX_3_2_BRANCH/priv/host-amd64/isel.c
Modified: branches/VEX_3_2_BRANCH/priv/host-amd64/isel.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/host-amd64/isel.c 2006-08-28 13:22:14 UT=
C (rev 1646)
+++ branches/VEX_3_2_BRANCH/priv/host-amd64/isel.c 2006-08-28 13:24:08 UT=
C (rev 1647)
@@ -172,7 +172,7 @@
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppAMD64Instr(instr, False);
+ ppAMD64Instr(instr, True);
vex_printf("\n");
}
}
|
|
From: <sv...@va...> - 2006-08-28 13:22:21
|
Author: sewardj
Date: 2006-08-28 14:22:14 +0100 (Mon, 28 Aug 2006)
New Revision: 1646
Log:
Merge r1635,6 (SSE3 support for x86 and amd64)
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:19:06 U=
TC (rev 1645)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:22:14 U=
TC (rev 1646)
@@ -5127,6 +5127,13 @@
loadLE(Ity_I32, mkexpr(addr))));
break;
=20
+ case 1: /* FISTTPL m32 (SSE3) */
+ DIP("fisttpl %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ binop(Iop_F64toI32, mkU32(Irrm_ZERO), get_ST(0))=
);
+ fp_pop();
+ break;
+
case 2: /* FIST m32 */
DIP("fistl %s\n", dis_buf);
storeLE( mkexpr(addr),=20
@@ -5444,6 +5451,13 @@
put_ST(0, loadLE(Ity_F64, mkexpr(addr)));
break;
=20
+ case 1: /* FISTTPQ m64 (SSE3) */
+ DIP("fistppll %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ binop(Iop_F64toI64, mkU32(Irrm_ZERO), get_ST(0))=
);
+ fp_pop();
+ break;
+
case 2: /* FST double-real */
DIP("fstl %s\n", dis_buf);
storeLE(mkexpr(addr), get_ST(0));
@@ -5776,6 +5790,14 @@
loadLE(Ity_I16, mkexpr(addr)))));
break;
=20
+ case 1: /* FISTTPS m16 (SSE3) */
+ DIP("fisttps %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ unop(Iop_32to16,
+ binop(Iop_F64toI32, mkU32(Irrm_ZERO), get_S=
T(0))) );
+ fp_pop();
+ break;
+
//.. case 2: /* FIST m16 */
//.. DIP("fistp %s\n", dis_buf);
//.. storeLE( mkexpr(addr),=20
@@ -11745,11 +11767,255 @@
goto decode_success;
}
=20
-
/* ---------------------------------------------------- */
/* --- end of the SSE/SSE2 decoder. --- */
/* ---------------------------------------------------- */
=20
+ /* ---------------------------------------------------- */
+ /* --- start of the SSE3 decoder. --- */
+ /* ---------------------------------------------------- */
+
+ /* F3 0F 12 =3D MOVSLDUP -- move from E (mem or xmm) to G (xmm),
+ duplicating some lanes (2:2:0:0). */
+ /* F3 0F 16 =3D MOVSHDUP -- move from E (mem or xmm) to G (xmm),
+ duplicating some lanes (3:3:1:1). */
+ if (haveF3no66noF2(pfx) && sz =3D=3D 4
+ && insn[0] =3D=3D 0x0F && (insn[1] =3D=3D 0x12 || insn[1] =3D=3D =
0x16)) {
+ IRTemp s3, s2, s1, s0;
+ IRTemp sV =3D newTemp(Ity_V128);
+ Bool isH =3D insn[1] =3D=3D 0x16;
+ s3 =3D s2 =3D s1 =3D s0 =3D IRTemp_INVALID;
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( sV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
+ nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
+ dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ isH ? mk128from32s( s3, s3, s1, s1 )
+ : mk128from32s( s2, s2, s0, s0 ) );
+ goto decode_success;
+ }
+
+ /* F2 0F 12 =3D MOVDDUP -- move from E (mem or xmm) to G (xmm),
+ duplicating some lanes (0:1:0:1). */
+ if (haveF2no66noF3(pfx) && sz =3D=3D 4=20
+ && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0x12) {
+ IRTemp sV =3D newTemp(Ity_V128);
+ IRTemp d0 =3D newTemp(Ity_I64);
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( sV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("movddup %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ assign ( d0, unop(Iop_V128to64, mkexpr(sV)) );
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( d0, loadLE(Ity_I64, mkexpr(addr)) );
+ DIP("movddup %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ binop(Iop_64HLtoV128,mkexpr(d0),mkexpr(d0)) );
+ goto decode_success;
+ }
+
+ /* F2 0F D0 =3D ADDSUBPS -- 32x4 +/-/+/- from E (mem or xmm) to G (xm=
m). */
+ if (haveF2no66noF3(pfx) && sz =3D=3D 4=20
+ && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0xD0) {
+ IRTemp a3, a2, a1, a0, s3, s2, s1, s0;
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp addV =3D newTemp(Ity_V128);
+ IRTemp subV =3D newTemp(Ity_V128);
+ a3 =3D a2 =3D a1 =3D a0 =3D s3 =3D s2 =3D s1 =3D s0 =3D IRTemp_INV=
ALID;
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("addsubps %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("addsubps %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+ assign( addV, binop(Iop_Add32Fx4, mkexpr(gV), mkexpr(eV)) );
+ assign( subV, binop(Iop_Sub32Fx4, mkexpr(gV), mkexpr(eV)) );
+
+ breakup128to32s( addV, &a3, &a2, &a1, &a0 );
+ breakup128to32s( subV, &s3, &s2, &s1, &s0 );
+
+ putXMMReg( gregOfRexRM(pfx,modrm), mk128from32s( a3, s2, a1, s0 ))=
;
+ goto decode_success;
+ }
+
+ /* 66 0F D0 =3D ADDSUBPD -- 64x4 +/- from E (mem or xmm) to G (xmm). =
*/
+ if (have66noF2noF3(pfx) && sz =3D=3D 2=20
+ && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0xD0) {
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp addV =3D newTemp(Ity_V128);
+ IRTemp subV =3D newTemp(Ity_V128);
+ IRTemp a1 =3D newTemp(Ity_I64);
+ IRTemp s0 =3D newTemp(Ity_I64);
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("addsubpd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("addsubpd %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+ assign( addV, binop(Iop_Add64Fx2, mkexpr(gV), mkexpr(eV)) );
+ assign( subV, binop(Iop_Sub64Fx2, mkexpr(gV), mkexpr(eV)) );
+
+ assign( a1, unop(Iop_V128HIto64, mkexpr(addV) ));
+ assign( s0, unop(Iop_V128to64, mkexpr(subV) ));
+
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ binop(Iop_64HLtoV128, mkexpr(a1), mkexpr(s0)) );
+ goto decode_success;
+ }
+
+ /* F2 0F 7D =3D HSUBPS -- 32x4 sub across from E (mem or xmm) to G (x=
mm). */
+ /* F2 0F 7C =3D HADDPS -- 32x4 add across from E (mem or xmm) to G (x=
mm). */
+ if (haveF2no66noF3(pfx) && sz =3D=3D 4=20
+ && insn[0] =3D=3D 0x0F && (insn[1] =3D=3D 0x7C || insn[1] =3D=3D =
0x7D)) {
+ IRTemp e3, e2, e1, e0, g3, g2, g1, g0;
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp leftV =3D newTemp(Ity_V128);
+ IRTemp rightV =3D newTemp(Ity_V128);
+ Bool isAdd =3D insn[1] =3D=3D 0x7C;
+ HChar* str =3D isAdd ? "add" : "sub";
+ e3 =3D e2 =3D e1 =3D e0 =3D g3 =3D g2 =3D g1 =3D g0 =3D IRTemp_INV=
ALID;
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("h%sps %s,%s\n", str, nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("h%sps %s,%s\n", str, dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+ breakup128to32s( eV, &e3, &e2, &e1, &e0 );
+ breakup128to32s( gV, &g3, &g2, &g1, &g0 );
+
+ assign( leftV, mk128from32s( e2, e0, g2, g0 ) );
+ assign( rightV, mk128from32s( e3, e1, g3, g1 ) );
+
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ binop(isAdd ? Iop_Add32Fx4 : Iop_Sub32Fx4,=20
+ mkexpr(leftV), mkexpr(rightV) ) );
+ goto decode_success;
+ }
+
+ /* 66 0F 7D =3D HSUBPD -- 64x2 sub across from E (mem or xmm) to G (x=
mm). */
+ /* 66 0F 7C =3D HADDPD -- 64x2 add across from E (mem or xmm) to G (x=
mm). */
+ if (have66noF2noF3(pfx) && sz =3D=3D 2=20
+ && insn[0] =3D=3D 0x0F && (insn[1] =3D=3D 0x7C || insn[1] =3D=3D =
0x7D)) {
+ IRTemp e1 =3D newTemp(Ity_I64);
+ IRTemp e0 =3D newTemp(Ity_I64);
+ IRTemp g1 =3D newTemp(Ity_I64);
+ IRTemp g0 =3D newTemp(Ity_I64);
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp leftV =3D newTemp(Ity_V128);
+ IRTemp rightV =3D newTemp(Ity_V128);
+ Bool isAdd =3D insn[1] =3D=3D 0x7C;
+ HChar* str =3D isAdd ? "add" : "sub";
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRexRM(pfx,modrm)) );
+ DIP("h%spd %s,%s\n", str, nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("h%spd %s,%s\n", str, dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+ assign( e1, unop(Iop_V128HIto64, mkexpr(eV) ));
+ assign( e0, unop(Iop_V128to64, mkexpr(eV) ));
+ assign( g1, unop(Iop_V128HIto64, mkexpr(gV) ));
+ assign( g0, unop(Iop_V128to64, mkexpr(gV) ));
+
+ assign( leftV, binop(Iop_64HLtoV128, mkexpr(e0),mkexpr(g0)) );
+ assign( rightV, binop(Iop_64HLtoV128, mkexpr(e1),mkexpr(g1)) );
+
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ binop(isAdd ? Iop_Add64Fx2 : Iop_Sub64Fx2,=20
+ mkexpr(leftV), mkexpr(rightV) ) );
+ goto decode_success;
+ }
+
+ /* F2 0F F0 =3D LDDQU -- move from E (mem or xmm) to G (xmm). */
+ if (haveF2no66noF3(pfx) && sz =3D=3D 4=20
+ && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0xF0) {
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ goto decode_failure;
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ putXMMReg( gregOfRexRM(pfx,modrm),=20
+ loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("lddqu %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ delta +=3D 2+alen;
+ }
+ goto decode_success;
+ }
+
+ /* ---------------------------------------------------- */
+ /* --- end of the SSE3 decoder. --- */
+ /* ---------------------------------------------------- */
+
/*after_sse_decoders:*/
=20
/* Get the primary opcode. */
Modified: branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c 2006-08-28 13:19:06 UTC=
(rev 1645)
+++ branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c 2006-08-28 13:22:14 UTC=
(rev 1646)
@@ -4274,6 +4274,13 @@
loadLE(Ity_I32, mkexpr(addr))));
break;
=20
+ case 1: /* FISTTPL m32 (SSE3) */
+ DIP("fisttpl %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ binop(Iop_F64toI32, mkU32(Irrm_ZERO), get_ST(0))=
);
+ fp_pop();
+ break;
+
case 2: /* FIST m32 */
DIP("fistl %s\n", dis_buf);
storeLE( mkexpr(addr),=20
@@ -4576,6 +4583,13 @@
put_ST(0, loadLE(Ity_F64, mkexpr(addr)));
break;
=20
+ case 1: /* FISTTPQ m64 (SSE3) */
+ DIP("fistppll %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ binop(Iop_F64toI64, mkU32(Irrm_ZERO), get_ST(0))=
);
+ fp_pop();
+ break;
+
case 2: /* FST double-real */
DIP("fstl %s\n", dis_buf);
storeLE(mkexpr(addr), get_ST(0));
@@ -4939,6 +4953,13 @@
loadLE(Ity_I16, mkexpr(addr)))));
break;
=20
+ case 1: /* FISTTPS m16 (SSE3) */
+ DIP("fisttps %s\n", dis_buf);
+ storeLE( mkexpr(addr),=20
+ binop(Iop_F64toI16, mkU32(Irrm_ZERO), get_ST(0))=
);
+ fp_pop();
+ break;
+
case 2: /* FIST m16 */
DIP("fistp %s\n", dis_buf);
storeLE( mkexpr(addr),=20
@@ -10603,6 +10624,31 @@
goto decode_success;
}
=20
+ /* F2 0F 12 =3D MOVDDUP -- move from E (mem or xmm) to G (xmm),
+ duplicating some lanes (0:1:0:1). */
+ if (sz =3D=3D 4 && insn[0] =3D=3D 0xF2 && insn[1] =3D=3D 0x0F && insn=
[2] =3D=3D 0x12) {
+ IRTemp sV =3D newTemp(Ity_V128);
+ IRTemp d0 =3D newTemp(Ity_I64);
+
+ modrm =3D insn[3];
+ if (epartIsReg(modrm)) {
+ assign( sV, getXMMReg( eregOfRM(modrm)) );
+ DIP("movddup %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 3+1;
+ assign ( d0, unop(Iop_V128to64, mkexpr(sV)) );
+ } else {
+ addr =3D disAMode ( &alen, sorb, delta+3, dis_buf );
+ assign( d0, loadLE(Ity_I64, mkexpr(addr)) );
+ DIP("movddup %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 3+alen;
+ }
+
+ putXMMReg( gregOfRM(modrm), binop(Iop_64HLtoV128,mkexpr(d0),mkexpr=
(d0)) );
+ goto decode_success;
+ }
+
/* F2 0F D0 =3D ADDSUBPS -- 32x4 +/-/+/- from E (mem or xmm) to G (xm=
m). */
if (sz =3D=3D 4 && insn[0] =3D=3D 0xF2 && insn[1] =3D=3D 0x0F && insn=
[2] =3D=3D 0xD0) {
IRTemp a3, a2, a1, a0, s3, s2, s1, s0;
@@ -10638,6 +10684,143 @@
goto decode_success;
}
=20
+ /* 66 0F D0 =3D ADDSUBPD -- 64x4 +/- from E (mem or xmm) to G (xmm). =
*/
+ if (sz =3D=3D 2 && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0xD0) {
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp addV =3D newTemp(Ity_V128);
+ IRTemp subV =3D newTemp(Ity_V128);
+ IRTemp a1 =3D newTemp(Ity_I64);
+ IRTemp s0 =3D newTemp(Ity_I64);
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRM(modrm)) );
+ DIP("addsubpd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, sorb, delta+2, dis_buf );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("addsubpd %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+ assign( addV, binop(Iop_Add64Fx2, mkexpr(gV), mkexpr(eV)) );
+ assign( subV, binop(Iop_Sub64Fx2, mkexpr(gV), mkexpr(eV)) );
+
+ assign( a1, unop(Iop_V128HIto64, mkexpr(addV) ));
+ assign( s0, unop(Iop_V128to64, mkexpr(subV) ));
+
+ putXMMReg( gregOfRM(modrm),=20
+ binop(Iop_64HLtoV128, mkexpr(a1), mkexpr(s0)) );
+ goto decode_success;
+ }
+
+ /* F2 0F 7D =3D HSUBPS -- 32x4 sub across from E (mem or xmm) to G (x=
mm). */
+ /* F2 0F 7C =3D HADDPS -- 32x4 add across from E (mem or xmm) to G (x=
mm). */
+ if (sz =3D=3D 4 && insn[0] =3D=3D 0xF2 && insn[1] =3D=3D 0x0F=20
+ && (insn[2] =3D=3D 0x7C || insn[2] =3D=3D 0x7D)) {
+ IRTemp e3, e2, e1, e0, g3, g2, g1, g0;
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp leftV =3D newTemp(Ity_V128);
+ IRTemp rightV =3D newTemp(Ity_V128);
+ Bool isAdd =3D insn[2] =3D=3D 0x7C;
+ HChar* str =3D isAdd ? "add" : "sub";
+ e3 =3D e2 =3D e1 =3D e0 =3D g3 =3D g2 =3D g1 =3D g0 =3D IRTemp_INV=
ALID;
+
+ modrm =3D insn[3];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRM(modrm)) );
+ DIP("h%sps %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 3+1;
+ } else {
+ addr =3D disAMode ( &alen, sorb, delta+3, dis_buf );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("h%sps %s,%s\n", str, dis_buf,
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 3+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+ breakup128to32s( eV, &e3, &e2, &e1, &e0 );
+ breakup128to32s( gV, &g3, &g2, &g1, &g0 );
+
+ assign( leftV, mk128from32s( e2, e0, g2, g0 ) );
+ assign( rightV, mk128from32s( e3, e1, g3, g1 ) );
+
+ putXMMReg( gregOfRM(modrm),=20
+ binop(isAdd ? Iop_Add32Fx4 : Iop_Sub32Fx4,=20
+ mkexpr(leftV), mkexpr(rightV) ) );
+ goto decode_success;
+ }
+
+ /* 66 0F 7D =3D HSUBPD -- 64x2 sub across from E (mem or xmm) to G (x=
mm). */
+ /* 66 0F 7C =3D HADDPD -- 64x2 add across from E (mem or xmm) to G (x=
mm). */
+ if (sz =3D=3D 2 && insn[0] =3D=3D 0x0F && (insn[1] =3D=3D 0x7C || ins=
n[1] =3D=3D 0x7D)) {
+ IRTemp e1 =3D newTemp(Ity_I64);
+ IRTemp e0 =3D newTemp(Ity_I64);
+ IRTemp g1 =3D newTemp(Ity_I64);
+ IRTemp g0 =3D newTemp(Ity_I64);
+ IRTemp eV =3D newTemp(Ity_V128);
+ IRTemp gV =3D newTemp(Ity_V128);
+ IRTemp leftV =3D newTemp(Ity_V128);
+ IRTemp rightV =3D newTemp(Ity_V128);
+ Bool isAdd =3D insn[1] =3D=3D 0x7C;
+ HChar* str =3D isAdd ? "add" : "sub";
+
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( eV, getXMMReg( eregOfRM(modrm)) );
+ DIP("h%spd %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 2+1;
+ } else {
+ addr =3D disAMode ( &alen, sorb, delta+2, dis_buf );
+ assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("h%spd %s,%s\n", str, dis_buf,
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 2+alen;
+ }
+
+ assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+ assign( e1, unop(Iop_V128HIto64, mkexpr(eV) ));
+ assign( e0, unop(Iop_V128to64, mkexpr(eV) ));
+ assign( g1, unop(Iop_V128HIto64, mkexpr(gV) ));
+ assign( g0, unop(Iop_V128to64, mkexpr(gV) ));
+
+ assign( leftV, binop(Iop_64HLtoV128, mkexpr(e0),mkexpr(g0)) );
+ assign( rightV, binop(Iop_64HLtoV128, mkexpr(e1),mkexpr(g1)) );
+
+ putXMMReg( gregOfRM(modrm),=20
+ binop(isAdd ? Iop_Add64Fx2 : Iop_Sub64Fx2,=20
+ mkexpr(leftV), mkexpr(rightV) ) );
+ goto decode_success;
+ }
+
+ /* F2 0F F0 =3D LDDQU -- move from E (mem or xmm) to G (xmm). */
+ if (sz =3D=3D 4 && insn[0] =3D=3D 0xF2 && insn[1] =3D=3D 0x0F && insn=
[2] =3D=3D 0xF0) {
+ modrm =3D getIByte(delta+3);
+ if (epartIsReg(modrm)) {
+ goto decode_failure;
+ } else {
+ addr =3D disAMode ( &alen, sorb, delta+3, dis_buf );
+ putXMMReg( gregOfRM(modrm),=20
+ loadLE(Ity_V128, mkexpr(addr)) );
+ DIP("lddqu %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRM(modrm)));
+ delta +=3D 3+alen;
+ }
+ goto decode_success;
+ }
+
/* ---------------------------------------------------- */
/* --- end of the SSE3 decoder. --- */
/* ---------------------------------------------------- */
|
|
From: <sv...@va...> - 2006-08-28 13:19:11
|
Author: sewardj
Date: 2006-08-28 14:19:06 +0100 (Mon, 28 Aug 2006)
New Revision: 1645
Log:
Merge r1634 (fix for:(HINT_NOP) vex x86->IR: 0xF 0x1F 0x0 0xF)=20
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:17:08 U=
TC (rev 1644)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:19:06 U=
TC (rev 1645)
@@ -13570,6 +13570,17 @@
delta =3D dis_mul_E_G ( pfx, sz, delta );
break;
=20
+ /* =3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D- NOPs =3D-=3D-=3D-=3D-=3D-=3D=
-=3D-=3D-=3D-=3D-=3D-=3D */
+
+ case 0x1F:
+ if (haveF2orF3(pfx)) goto decode_failure;
+ modrm =3D getUChar(delta);
+ if (epartIsReg(modrm)) goto decode_failure;
+ addr =3D disAMode ( &alen, pfx, delta, dis_buf, 0 );
+ delta +=3D alen;
+ DIP("nop%c %s\n", nameISize(sz), dis_buf);
+ break;
+
/* =3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D- Jcond d32 -=3D-=3D-=3D-=3D=
-=3D-=3D-=3D-=3D-=3D */
case 0x80:
case 0x81:
Modified: branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c 2006-08-28 13:17:08 UTC=
(rev 1644)
+++ branches/VEX_3_2_BRANCH/priv/guest-x86/toIR.c 2006-08-28 13:19:06 UTC=
(rev 1645)
@@ -12388,6 +12388,16 @@
delta =3D dis_mul_E_G ( sorb, sz, delta );
break;
=20
+ /* =3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D- NOPs =3D-=3D-=3D-=3D-=3D-=3D=
-=3D-=3D-=3D-=3D-=3D-=3D */
+
+ case 0x1F:
+ modrm =3D getUChar(delta);
+ if (epartIsReg(modrm)) goto decode_failure;
+ addr =3D disAMode ( &alen, sorb, delta, dis_buf );
+ delta +=3D alen;
+ DIP("nop%c %s\n", nameISize(sz), dis_buf);
+ break;
+
/* =3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D-=3D- Jcond d32 -=3D-=3D-=3D-=3D=
-=3D-=3D-=3D-=3D-=3D */
case 0x80:
case 0x81:
|
|
From: <sv...@va...> - 2006-08-28 13:17:11
|
Author: sewardj
Date: 2006-08-28 14:17:08 +0100 (Mon, 28 Aug 2006)
New Revision: 1644
Log:
Merge r1633 (fix for: amd64->IR: unhandled instruction "pushfq")
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:15:19 U=
TC (rev 1643)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:17:08 U=
TC (rev 1644)
@@ -12756,8 +12756,10 @@
case 0x9C: /* PUSHF */ {
/* Note. There is no encoding for a 32-bit pushf in 64-bit
mode. So sz=3D=3D4 actually means sz=3D=3D8. */
+ /* 24 July 06: has also been seen with a redundant REX prefix,
+ so must also allow sz=3D=3D8. */
if (haveF2orF3(pfx)) goto decode_failure;
- vassert(sz =3D=3D 2 || sz =3D=3D 4);
+ vassert(sz =3D=3D 2 || sz =3D=3D 4 || sz =3D=3D 8);
if (sz =3D=3D 4) sz =3D 8;
if (sz !=3D 8) goto decode_failure; // until we know a sz=3D=3D2 t=
est case exists
=20
|
|
From: <sv...@va...> - 2006-08-28 13:15:25
|
Author: sewardj
Date: 2006-08-28 14:15:19 +0100 (Mon, 28 Aug 2006)
New Revision: 1643
Log:
Merge r1632 (fix for: amd64->IR: 0x66 0xF 0xF6 0xC4 (psadbw,SSE2))
Modified:
branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
Modified: branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-25 12:52:19 U=
TC (rev 1642)
+++ branches/VEX_3_2_BRANCH/priv/guest-amd64/toIR.c 2006-08-28 13:15:19 U=
TC (rev 1643)
@@ -11111,6 +11111,54 @@
goto decode_success;
}
=20
+ /* 66 0F F6 =3D PSADBW -- 2 x (8x8 -> 48 zeroes ++ u16) Sum Abs Diffs
+ from E(xmm or mem) to G(xmm) */
+ if (have66noF2noF3(pfx) && sz =3D=3D 2=20
+ && insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0xF6) {
+ IRTemp s1V =3D newTemp(Ity_V128);
+ IRTemp s2V =3D newTemp(Ity_V128);
+ IRTemp dV =3D newTemp(Ity_V128);
+ IRTemp s1Hi =3D newTemp(Ity_I64);
+ IRTemp s1Lo =3D newTemp(Ity_I64);
+ IRTemp s2Hi =3D newTemp(Ity_I64);
+ IRTemp s2Lo =3D newTemp(Ity_I64);
+ IRTemp dHi =3D newTemp(Ity_I64);
+ IRTemp dLo =3D newTemp(Ity_I64);
+ modrm =3D insn[2];
+ if (epartIsReg(modrm)) {
+ assign( s1V, getXMMReg(eregOfRexRM(pfx,modrm)) );
+ delta +=3D 2+1;
+ DIP("psadbw %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ } else {
+ addr =3D disAMode ( &alen, pfx, delta+2, dis_buf, 0 );
+ assign( s1V, loadLE(Ity_V128, mkexpr(addr)) );
+ delta +=3D 2+alen;
+ DIP("psadbw %s,%s\n", dis_buf,
+ nameXMMReg(gregOfRexRM(pfx,modrm)));
+ }
+ assign( s2V, getXMMReg(gregOfRexRM(pfx,modrm)) );
+ assign( s1Hi, unop(Iop_V128HIto64, mkexpr(s1V)) );
+ assign( s1Lo, unop(Iop_V128to64, mkexpr(s1V)) );
+ assign( s2Hi, unop(Iop_V128HIto64, mkexpr(s2V)) );
+ assign( s2Lo, unop(Iop_V128to64, mkexpr(s2V)) );
+ assign( dHi, mkIRExprCCall(
+ Ity_I64, 0/*regparms*/,
+ "amd64g_calculate_mmx_psadbw",=20
+ &amd64g_calculate_mmx_psadbw,
+ mkIRExprVec_2( mkexpr(s1Hi), mkexpr(s2Hi))
+ ));
+ assign( dLo, mkIRExprCCall(
+ Ity_I64, 0/*regparms*/,
+ "amd64g_calculate_mmx_psadbw",=20
+ &amd64g_calculate_mmx_psadbw,
+ mkIRExprVec_2( mkexpr(s1Lo), mkexpr(s2Lo))
+ ));
+ assign( dV, binop(Iop_64HLtoV128, mkexpr(dHi), mkexpr(dLo))) ;
+ putXMMReg(gregOfRexRM(pfx,modrm), mkexpr(dV));
+ goto decode_success;
+ }
+
/* 66 0F 70 =3D PSHUFD -- rearrange 4x32 from E(xmm or mem) to G(xmm)=
*/
if (have66noF2noF3(pfx) && sz =3D=3D 2=20
&& insn[0] =3D=3D 0x0F && insn[1] =3D=3D 0x70) {
|
|
From: <sv...@va...> - 2006-08-28 13:13:05
|
Author: sewardj Date: 2006-08-28 14:13:01 +0100 (Mon, 28 Aug 2006) New Revision: 6032 Log: Merge r5987 (regtest for: amd64->IR: 0x66 0xF 0xF6 0xC4 (psadbw,SSE2)) Modified: branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.def branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.stdout.exp Modified: branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.def =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.def 2006-08-2= 8 13:08:16 UTC (rev 6031) +++ branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.def 2006-08-2= 8 13:13:01 UTC (rev 6032) @@ -217,8 +217,8 @@ pmuludq m128.ud[12345678,0,87654321,0] xmm.ud[87654321,0,12345678,0] =3D= > 1.uq[1082152022374638,1082152022374638] por xmm.uq[0xfdb97531eca86420,0x0123456789abcdef] xmm.uq[0x0123456789abc= def,0xfdb97531eca86420] =3D> 1.uq[0xfdbb7577edabedef,0xfdbb7577edabedef] por m128.uq[0xfdb97531eca86420,0x0123456789abcdef] xmm.uq[0x0123456789ab= cdef,0xfdb97531eca86420] =3D> 1.uq[0xfdbb7577edabedef,0xfdbb7577edabedef] -#####psadbw xmm.ub[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] xmm.ub[16,15,= 14,13,12,11,10,9,8,7,6,5,4,3,2,1] =3D> 1.sw[64,0,0,0,64,0,0,0] -#####psadbw m128.ub[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] xmm.ub[16,15= ,14,13,12,11,10,9,8,7,6,5,4,3,2,1] =3D> 1.sw[64,0,0,0,64,0,0,0] +psadbw xmm.ub[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] xmm.ub[16,15,14,13= ,12,11,10,9,8,7,6,5,4,3,2,1] =3D> 1.sw[64,0,0,0,64,0,0,0] +psadbw m128.ub[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] xmm.ub[16,15,14,1= 3,12,11,10,9,8,7,6,5,4,3,2,1] =3D> 1.sw[64,0,0,0,64,0,0,0] pshufd imm8[0x1b] xmm.sd[1122,3344,5566,7788] xmm.sd[0,0,0,0] =3D> 2.sd[= 7788,5566,3344,1122] pshufd imm8[0x1b] m128.sd[1122,3344,5566,7788] xmm.sd[0,0,0,0] =3D> 2.sd= [7788,5566,3344,1122] pshufhw imm8[0x1b] xmm.sw[11,22,33,44,55,66,77,88] xmm.sw[0,0,0,0,0,0,0,= 0] =3D> 2.sw[11,22,33,44,88,77,66,55] Modified: branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.stdout.= exp =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.stdout.exp 20= 06-08-28 13:08:16 UTC (rev 6031) +++ branches/VALGRIND_3_2_BRANCH/none/tests/amd64/insn_sse2.stdout.exp 20= 06-08-28 13:13:01 UTC (rev 6032) @@ -217,6 +217,8 @@ pmuludq_4 ... ok por_1 ... ok por_2 ... ok +psadbw_1 ... ok +psadbw_2 ... ok pshufd_1 ... ok pshufd_2 ... ok pshufhw_1 ... ok |
|
From: <sv...@va...> - 2006-08-28 13:08:19
|
Author: sewardj Date: 2006-08-28 14:08:16 +0100 (Mon, 28 Aug 2006) New Revision: 6031 Log: Merge r6030 (fix for: VG_N_SEGNAMES too low) Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0= 8-28 13:05:48 UTC (rev 6030) +++ branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0= 8-28 13:08:16 UTC (rev 6031) @@ -278,7 +278,7 @@ #define VG_N_SEGMENTS 5000 =20 /* Max number of segment file names we can track. */ -#define VG_N_SEGNAMES 400 +#define VG_N_SEGNAMES 1000 =20 /* Max length of a segment file name. */ #define VG_MAX_SEGNAMELEN 1000 |
|
From: <sv...@va...> - 2006-08-28 13:05:52
|
Author: sewardj Date: 2006-08-28 14:05:48 +0100 (Mon, 28 Aug 2006) New Revision: 6030 Log: Increase number of segnames available (Stu Robinson). Modified: trunk/coregrind/m_aspacemgr/aspacemgr.c Modified: trunk/coregrind/m_aspacemgr/aspacemgr.c =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- trunk/coregrind/m_aspacemgr/aspacemgr.c 2006-08-28 12:40:05 UTC (rev = 6029) +++ trunk/coregrind/m_aspacemgr/aspacemgr.c 2006-08-28 13:05:48 UTC (rev = 6030) @@ -278,7 +278,7 @@ #define VG_N_SEGMENTS 5000 =20 /* Max number of segment file names we can track. */ -#define VG_N_SEGNAMES 400 +#define VG_N_SEGNAMES 1000 =20 /* Max length of a segment file name. */ #define VG_MAX_SEGNAMELEN 1000 |
|
From: Bryan M. <om...@br...> - 2006-08-28 13:03:10
|
Josef Weidendorfer wrote: > Hi Bryan, > > On Monday 28 August 2006 13:09, Bryan Meredith wrote: >> What versions of what compilers are we all using for x86 and x86-64? > > I would say this very much depends on the user ;-) > Of course I most often use the GCC which was installed with my distribution > (GCC 4.1 on OpenSuse 10.1), but I also have the Intel compiler installed. > This way, the binary is produced by ICC and the library code by GCC. > > But of course, when I want to use a given Valgrind tool, and there is > a suggestion for the best compiler/options to use, I can arrange it > when I want the tool to work perfectly. > >> Would it be acceptable to determine the compiler version at compile time >> and insert/remove code sections based upon it? > > Why can't you detect the instrumentation you need at runtime? > I would say that especially with valgrind, instrumentation > can be much more dynamic depending on the environment than with any other > instrumentation method. > Josef, I agree that would be the best solution but how does one work out the compiler, given the IR (or is it buried in the debug info somewhere). The other issue that you have highlighted is people having more than one compiler on their system, such that there are mixed code bases. See the reply to Julian as on reflection, this was truly a lousy idea and I need to dig further with what I have before I start doing anything really weird. Bryan |
|
From: Bryan M. <om...@br...> - 2006-08-28 12:58:34
|
Julian Seward wrote: >> Would it be acceptable to determine the compiler version at compile time >> and insert/remove code sections based upon it? > > I don't think that would be a robust assumption in practice. Not only > that, but gcc isn't the only compiler in the universe; icc is quite > widely used for one. > > What is it about the particular stack handling of different gccs that > makes a difference for you? > > J > Julian, I am aware of icc but don't have access to it - it would also have to be taken into account though. The main difference is the amount of stack manipulation that occurs: the old version tends to move the stack pointer around piecemeal as it needs to. The newer version moves the sp at the top of the function then typically leaves it alone. This is also evident in the use of push in the old verses mov (%esp) in the new. Looking at the two programs side by side, I think the real crux of it is the differing epilog code. I think I am falling over trying to detect when there is a value being returned through the accumulator. I need to know this as the accumulator should be ignored if it isn't being used to return anything, possibly generating a leak report at function exit. Is there a robust method of determining if a function returns a value (and in which register(s))? Bryan |
|
From: <sv...@va...> - 2006-08-28 12:40:11
|
Author: sewardj
Date: 2006-08-28 13:40:05 +0100 (Mon, 28 Aug 2006)
New Revision: 6029
Log:
Merge r6011 (amd64 padding suppressions)
Modified:
branches/VALGRIND_3_2_BRANCH/glibc-2.4.supp
Modified: branches/VALGRIND_3_2_BRANCH/glibc-2.4.supp
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/glibc-2.4.supp 2006-08-28 12:38:31 UTC (=
rev 6028)
+++ branches/VALGRIND_3_2_BRANCH/glibc-2.4.supp 2006-08-28 12:40:05 UTC (=
rev 6029)
@@ -124,3 +124,114 @@
fun:index
fun:expand_dynamic_string_token
}
+
+
+##----------------------------------------------------------------------=
##
+## Various structure padding things on amd64 SuSE 10.1
+##
+{
+ X11-64bit-padding-1a
+ Memcheck:Param
+ write(buf)
+ fun:__write_nocancel
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+ fun:X*
+}
+{
+ X11-64bit-padding-1b
+ Memcheck:Param
+ write(buf)
+ fun:__write_nocancel
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+ fun:_X*
+}
+{
+ X11-64bit-padding-1c
+ Memcheck:Param
+ write(buf)
+ fun:__write_nocancel
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+}
+{
+ X11-64bit-padding-1d
+ Memcheck:Param
+ write(buf)
+ fun:__write_nocancel
+ obj:/usr/X*/libICE.so*
+ obj:/usr/X*/libICE.so*
+ obj:/usr/X*/libICE.so*
+}
+
+
+{
+ X11-64bit-padding-2a
+ Memcheck:Param
+ writev(vector[...])
+ fun:do_writev
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+}
+{
+ X11-64bit-padding-2b
+ Memcheck:Param
+ writev(vector[...])
+ fun:do_writev
+ fun:writev
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+}
+
+{
+ glibc24-64bit-padding-1a
+ Memcheck:Param
+ socketcall.sendto(msg)
+ fun:send
+ fun:get_mapping
+ fun:__nscd_get_map_ref
+ fun:nscd*
+}
+{
+ glibc24-64bit-padding-1b
+ Memcheck:Param
+ socketcall.sendto(msg)
+ fun:__sendto_nocancel
+ obj:/*libc-2.4.so
+ obj:/*libc-2.4.so
+ obj:/*libc-2.4.so
+}
+{
+ glibc24-64bit-padding-1c
+ Memcheck:Param
+ socketcall.send(msg)
+ fun:send
+ fun:__nscd_get_map_ref
+ fun:nscd_get*_r
+ fun:*nscd*
+ obj:/*libc-2.4.so
+}
+
+
+{
+ X11-64bit-padding-3a
+ Memcheck:Param
+ write(buf)
+ obj:/*libpthread-2.4.so*
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+ obj:/usr/X*/libX11.so*
+}
+
+{
+ X11-64bit-padding-4a
+ Memcheck:Param
+ socketcall.sendto(msg)
+ fun:send
+ obj:/*libc-2.4.so
+ obj:/*libc-2.4.so
+ obj:/*libc-2.4.so
+}
|
|
From: <sv...@va...> - 2006-08-28 12:38:35
|
Author: sewardj
Date: 2006-08-28 13:38:31 +0100 (Mon, 28 Aug 2006)
New Revision: 6028
Log:
Merge r6010 (ppc32 SuSE 10.1 redir)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_redir.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_redir.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_redir.c 2006-08-28 12:36:11 =
UTC (rev 6027)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_redir.c 2006-08-28 12:38:31 =
UTC (rev 6028)
@@ -762,6 +762,10 @@
"ld.so.1", "strcmp",
(Addr)&VG_(ppc32_linux_REDIR_FOR_strcmp)
);
+ add_hardwired_spec(
+ "ld.so.1", "index",
+ (Addr)&VG_(ppc32_linux_REDIR_FOR_strchr)
+ );
}
=20
# elif defined(VGP_ppc64_linux)
|
|
From: <sv...@va...> - 2006-08-28 12:36:14
|
Author: sewardj
Date: 2006-08-28 13:36:11 +0100 (Mon, 28 Aug 2006)
New Revision: 6027
Log:
Merge r5999 (BartV: Don't print more lines of a stack-trace than were
obtained.)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_stacktrace.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_stacktrace.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_stacktrace.c 2006-08-28 12:3=
2:43 UTC (rev 6026)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_stacktrace.c 2006-08-28 12:3=
6:11 UTC (rev 6027)
@@ -412,8 +412,8 @@
void VG_(get_and_pp_StackTrace) ( ThreadId tid, UInt n_ips )
{
Addr ips[n_ips];
- VG_(get_StackTrace)(tid, ips, n_ips);
- VG_(pp_StackTrace) ( ips, n_ips);
+ UInt n_ips_obtained =3D VG_(get_StackTrace)(tid, ips, n_ips);
+ VG_(pp_StackTrace)(ips, n_ips_obtained);
}
=20
=20
|
|
From: <sv...@va...> - 2006-08-28 12:32:46
|
Author: sewardj
Date: 2006-08-28 13:32:43 +0100 (Mon, 28 Aug 2006)
New Revision: 6026
Log:
Merge r6001 (fix for: Alex Bennee mmap problem (9 Aug))=20
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0=
8-28 12:28:51 UTC (rev 6025)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0=
8-28 12:32:43 UTC (rev 6026)
@@ -994,6 +994,12 @@
case SkShmC:
return False;
=20
+ case SkResvn:
+ if (s1->smode =3D=3D SmFixed && s2->smode =3D=3D SmFixed) {
+ s1->end =3D s2->end;
+ return True;
+ }
+
default:
break;
=20
@@ -2232,9 +2238,22 @@
needDiscard =3D any_Ts_in_range( start, len );
=20
init_nsegment( &seg );
- seg.kind =3D SkFree;
seg.start =3D start;
seg.end =3D start + len - 1;
+
+ /* The segment becomes unused (free). Segments from above
+ aspacem_maxAddr were originally SkResvn and so we make them so
+ again. Note, this isn't really right when the segment straddles
+ the aspacem_maxAddr boundary - then really it should be split in
+ two, the lower part marked as SkFree and the upper part as
+ SkResvn. Ah well. */
+ if (start > aspacem_maxAddr=20
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind =3D SkResvn;
+ else
+ seg.kind =3D SkFree;
+
add_segment( &seg );
=20
/* Unmapping could create two adjacent free segments, so a preen is
@@ -2995,9 +3014,17 @@
=20
/* Create a free hole in the old location. */
init_nsegment( &seg );
- seg.kind =3D SkFree;
seg.start =3D old_addr;
seg.end =3D old_addr + old_len - 1;
+ /* See comments in VG_(am_notify_munmap) about this SkResvn vs
+ SkFree thing. */
+ if (old_addr > aspacem_maxAddr=20
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind =3D SkResvn;
+ else
+ seg.kind =3D SkFree;
+
add_segment( &seg );
=20
AM_SANITY_CHECK;
|
|
From: <sv...@va...> - 2006-08-28 12:28:54
|
Author: sewardj
Date: 2006-08-28 13:28:51 +0100 (Mon, 28 Aug 2006)
New Revision: 6025
Log:
Merge r6003,4 (fix for: cachegrind/callgrind causes executable to die)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c
branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-generic.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0=
8-28 12:24:48 UTC (rev 6024)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_aspacemgr/aspacemgr.c 2006-0=
8-28 12:28:51 UTC (rev 6025)
@@ -2884,6 +2884,9 @@
NSegment seg_copy =3D *seg;
SizeT seg_old_len =3D seg->end + 1 - seg->start;
=20
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
+
if (seg->kind !=3D SkFileC && seg->kind !=3D SkAnonC)
return False;
=20
@@ -2905,6 +2908,9 @@
if (sres.isError) {
AM_SANITY_CHECK;
return False;
+ } else {
+ /* the area must not have moved */
+ aspacem_assert(sres.val =3D=3D seg->start);
}
=20
*need_discard =3D any_Ts_in_range( seg_copy.end+1, delta );
@@ -2912,6 +2918,9 @@
seg_copy.end +=3D delta;
add_segment( &seg_copy );
=20
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
+
AM_SANITY_CHECK;
return True;
}
@@ -2964,6 +2973,8 @@
if (sres.isError) {
AM_SANITY_CHECK;
return False;
+ } else {
+ aspacem_assert(sres.val =3D=3D new_addr);
}
=20
*need_discard =3D any_Ts_in_range( old_addr, old_len )
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-generi=
c.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-generic.c 20=
06-08-28 12:24:48 UTC (rev 6024)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-generic.c 20=
06-08-28 12:28:51 UTC (rev 6025)
@@ -187,6 +187,8 @@
old_addr,old_len,new_addr,new_len,=20
flags & VKI_MREMAP_MAYMOVE ? "MAYMOVE" : "",
flags & VKI_MREMAP_FIXED ? "FIXED" : "");
+ if (0)
+ VG_(am_show_nsegments)(0, "do_remap: before");
=20
if (flags & ~(VKI_MREMAP_FIXED | VKI_MREMAP_MAYMOVE))
goto eINVAL;
@@ -327,6 +329,18 @@
/* VG_(am_get_advisory_client_simple) interprets zero to mean
non-fixed, which is not what we want */
advised =3D VG_(am_get_advisory_client_simple)( needA, needL, &ok );
+ if (ok) {
+ /* VG_(am_get_advisory_client_simple) (first arg =3D=3D 0, meaning
+ this-or-nothing) is too lenient, and may allow us to trash
+ the next segment along. So make very sure that the proposed
+ new area really is free. This is perhaps overly
+ conservative, but it fixes #129866. */
+ NSegment* segLo =3D VG_(am_find_nsegment)( needA );
+ NSegment* segHi =3D VG_(am_find_nsegment)( needA + needL - 1 );
+ if (segLo =3D=3D NULL || segHi =3D=3D NULL=20
+ || segLo !=3D segHi || segLo->kind !=3D SkFree)
+ ok =3D False;
+ }
if (ok && advised =3D=3D needA) {
ok =3D VG_(am_extend_map_client)( &d, old_seg, needL );
if (ok) {
@@ -374,6 +388,17 @@
/* VG_(am_get_advisory_client_simple) interprets zero to mean
non-fixed, which is not what we want */
advised =3D VG_(am_get_advisory_client_simple)( needA, needL, &ok );
+ if (ok) {
+ /* VG_(am_get_advisory_client_simple) (first arg =3D=3D 0, meaning
+ this-or-nothing) is too lenient, and may allow us to trash
+ the next segment along. So make very sure that the proposed
+ new area really is free. */
+ NSegment* segLo =3D VG_(am_find_nsegment)( needA );
+ NSegment* segHi =3D VG_(am_find_nsegment)( needA + needL - 1 );
+ if (segLo =3D=3D NULL || segHi =3D=3D NULL=20
+ || segLo !=3D segHi || segLo->kind !=3D SkFree)
+ ok =3D False;
+ }
if (!ok || advised !=3D needA)
goto eNOMEM;
ok =3D VG_(am_extend_map_client)( &d, old_seg, needL );
|
|
From: <sv...@va...> - 2006-08-28 12:24:50
|
Author: sewardj
Date: 2006-08-28 13:24:48 +0100 (Mon, 28 Aug 2006)
New Revision: 6024
Log:
Merge r5991,4,6 (GraydonH leak checking fix)
Modified:
branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c
branches/VALGRIND_3_2_BRANCH/memcheck/tests/mempool.stderr.exp
Modified: branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c 2006-08-28 12:21=
:19 UTC (rev 6023)
+++ branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c 2006-08-28 12:24=
:48 UTC (rev 6024)
@@ -175,7 +175,7 @@
PROF_EVENT(71, "find_shadow_for_OLD(loop)");
a_lo =3D shadows[i]->data;
a_hi =3D ((Addr)shadows[i]->data) + shadows[i]->size;
- if (a_lo <=3D ptr && ptr <=3D a_hi)
+ if (a_lo <=3D ptr && ptr < a_hi)
return i;
}
return -1;
@@ -201,16 +201,24 @@
mid =3D (lo + hi) / 2;
a_mid_lo =3D shadows[mid]->data;
a_mid_hi =3D shadows[mid]->data + shadows[mid]->size;
+ /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
+ Special-case zero-sized blocks - treat them as if they had
+ size 1. Not doing so causes them to not cover any address
+ range at all and so will never be identified as the target of
+ any pointer, which causes them to be incorrectly reported as
+ definitely leaked. */
+ if (shadows[mid]->size =3D=3D 0)
+ a_mid_hi++;
=20
if (ptr < a_mid_lo) {
hi =3D mid-1;
continue;
}=20
- if (ptr > a_mid_hi) {
+ if (ptr >=3D a_mid_hi) {
lo =3D mid+1;
continue;
}
- tl_assert(ptr >=3D a_mid_lo && ptr <=3D a_mid_hi);
+ tl_assert(ptr >=3D a_mid_lo && ptr < a_mid_hi);
retVal =3D mid;
break;
}
@@ -345,7 +353,10 @@
return;
=20
tl_assert(sh_no >=3D 0 && sh_no < lc_n_shadows);
- tl_assert(ptr <=3D lc_shadows[sh_no]->data + lc_shadows[sh_no]->size)=
;
+ tl_assert(ptr >=3D lc_shadows[sh_no]->data);
+ tl_assert(ptr < lc_shadows[sh_no]->data=20
+ + lc_shadows[sh_no]->size
+ + (lc_shadows[sh_no]->size=3D=3D0 ? 1 : 0));
=20
if (lc_markstack[sh_no].state =3D=3D Unreached) {
if (0)
@@ -673,6 +684,107 @@
}
}
=20
+static MC_Chunk**
+find_active_shadows(UInt* n_shadows)
+{
+ /* Our goal is to construct a set of shadows that includes every
+ * mempool chunk, and every malloc region that *doesn't* contain a
+ * mempool chunk. We do this in several phases.
+ *
+ * First we collect all the malloc chunks into an array and sort it.
+ * We do this because we want to query the chunks by interior
+ * pointers, requiring binary search.
+ *
+ * Second we build an array containing a Bool for each malloc chunk,
+ * indicating whether it contains any mempools.
+ *
+ * Third we loop over the mempool tables. For each chunk in each
+ * pool, we set the entry in the Bool array corresponding to the
+ * malloc chunk containing the mempool chunk.
+ *
+ * Finally we copy the mempool chunks and the non-marked malloc
+ * chunks into a combined array of shadows, free our temporaries,
+ * and return the combined array.
+ */
+
+ MC_Mempool *mp;
+ MC_Chunk **mallocs, **shadows, *mc;
+ UInt n_mallocs, m, s;
+ Bool *malloc_chunk_holds_a_pool_chunk;
+
+ mallocs =3D (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallo=
cs );
+
+ if (n_mallocs =3D=3D 0) {
+ tl_assert(mallocs =3D=3D NULL);
+ *n_shadows =3D 0;
+ return NULL;
+ }
+
+ VG_(ssort)((void*)mallocs, n_mallocs,=20
+ sizeof(VgHashNode*), lc_compar);
+
+ malloc_chunk_holds_a_pool_chunk =3D VG_(calloc)( n_mallocs, sizeof(Bo=
ol) );
+
+ *n_shadows =3D n_mallocs;
+
+ VG_(HT_ResetIter)(MC_(mempool_list));
+ while ( (mp =3D VG_(HT_Next)(MC_(mempool_list))) ) {
+ VG_(HT_ResetIter)(mp->chunks);
+ while ( (mc =3D VG_(HT_Next)(mp->chunks)) ) {
+
+ /* We'll need a shadow for this chunk. */
+ ++(*n_shadows);
+
+ /* Possibly invalidate the malloc holding the beginning of this=
chunk. */
+ m =3D find_shadow_for(mc->data, mallocs, n_mallocs);
+ if (m !=3D -1 && malloc_chunk_holds_a_pool_chunk[m] =3D=3D Fals=
e) {
+ tl_assert(*n_shadows > 0);
+ --(*n_shadows);
+ malloc_chunk_holds_a_pool_chunk[m] =3D True;
+ }
+
+ /* Possibly invalidate the malloc holding the end of this chunk=
. */
+ if (mc->size > 1) {
+ m =3D find_shadow_for(mc->data + (mc->size - 1), mallocs, n_=
mallocs);
+ if (m !=3D -1 && malloc_chunk_holds_a_pool_chunk[m] =3D=3D F=
alse) {
+ tl_assert(*n_shadows > 0);
+ --(*n_shadows);
+ malloc_chunk_holds_a_pool_chunk[m] =3D True;
+ }
+ }
+ }
+ }
+
+ tl_assert(*n_shadows > 0);
+ shadows =3D VG_(malloc)(sizeof(VgHashNode*) * (*n_shadows));
+ s =3D 0;
+
+ /* Copy the mempool chunks into the final array. */
+ VG_(HT_ResetIter)(MC_(mempool_list));
+ while ( (mp =3D VG_(HT_Next)(MC_(mempool_list))) ) {
+ VG_(HT_ResetIter)(mp->chunks);
+ while ( (mc =3D VG_(HT_Next)(mp->chunks)) ) {
+ tl_assert(s < *n_shadows);
+ shadows[s++] =3D mc;
+ }
+ }
+
+ /* Copy the malloc chunks into the final array. */
+ for (m =3D 0; m < n_mallocs; ++m) {
+ if (!malloc_chunk_holds_a_pool_chunk[m]) {
+ tl_assert(s < *n_shadows);
+ shadows[s++] =3D mallocs[m];
+ }
+ }
+
+ tl_assert(s =3D=3D *n_shadows);
+ VG_(free)(mallocs);
+ VG_(free)(malloc_chunk_holds_a_pool_chunk);
+
+ return shadows;
+}
+
+
/* Top level entry point to leak detector. Call here, passing in
suitable address-validating functions (see comment at top of
scan_all_valid_memory above). These functions used to encapsulate th=
e
@@ -689,9 +801,7 @@
=20
tl_assert(mode !=3D LC_Off);
=20
- /* VG_(HT_to_array) allocates storage for shadows */
- lc_shadows =3D (MC_Chunk**)VG_(HT_to_array)( MC_(malloc_list),
- &lc_n_shadows );
+ lc_shadows =3D find_active_shadows(&lc_n_shadows);
=20
/* Sort the array. */
VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_c=
ompar);
Modified: branches/VALGRIND_3_2_BRANCH/memcheck/tests/mempool.stderr.exp
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/memcheck/tests/mempool.stderr.exp 2006-0=
8-28 12:21:19 UTC (rev 6023)
+++ branches/VALGRIND_3_2_BRANCH/memcheck/tests/mempool.stderr.exp 2006-0=
8-28 12:24:48 UTC (rev 6024)
@@ -35,7 +35,25 @@
by 0x........: main (mempool.c:148)
=20
=20
-100,028 (20 direct, 100,008 indirect) bytes in 1 blocks are definitely l=
ost in loss record 2 of 3
+10 bytes in 1 blocks are definitely lost in loss record 2 of 5
+ at 0x........: allocate (mempool.c:99)
+ by 0x........: test (mempool.c:135)
+ by 0x........: main (mempool.c:148)
+
+
+10 bytes in 1 blocks are definitely lost in loss record 3 of 5
+ at 0x........: allocate (mempool.c:99)
+ by 0x........: test (mempool.c:115)
+ by 0x........: main (mempool.c:148)
+
+
+20 bytes in 1 blocks are definitely lost in loss record 4 of 5
+ at 0x........: allocate (mempool.c:99)
+ by 0x........: test (mempool.c:116)
+ by 0x........: main (mempool.c:148)
+
+
+28 (20 direct, 8 indirect) bytes in 1 blocks are definitely lost in loss=
record 5 of 5
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: make_pool (mempool.c:37)
by 0x........: test (mempool.c:111)
|
|
From: Josef W. <Jos...@gm...> - 2006-08-28 12:23:09
|
Hi Bryan, On Monday 28 August 2006 13:09, Bryan Meredith wrote: > What versions of what compilers are we all using for x86 and x86-64? I would say this very much depends on the user ;-) Of course I most often use the GCC which was installed with my distribution (GCC 4.1 on OpenSuse 10.1), but I also have the Intel compiler installed. This way, the binary is produced by ICC and the library code by GCC. But of course, when I want to use a given Valgrind tool, and there is a suggestion for the best compiler/options to use, I can arrange it when I want the tool to work perfectly. > Would it be acceptable to determine the compiler version at compile time > and insert/remove code sections based upon it? Why can't you detect the instrumentation you need at runtime? I would say that especially with valgrind, instrumentation can be much more dynamic depending on the environment than with any other instrumentation method. Josef |
|
From: <sv...@va...> - 2006-08-28 12:21:24
|
Author: sewardj
Date: 2006-08-28 13:21:19 +0100 (Mon, 28 Aug 2006)
New Revision: 6023
Log:
Merge r5990 (fix for: Livelocks entire machine)
Modified:
branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c
Modified: branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c 2006-08-28 12:19=
:40 UTC (rev 6022)
+++ branches/VALGRIND_3_2_BRANCH/memcheck/mc_leakcheck.c 2006-08-28 12:21=
:19 UTC (rev 6023)
@@ -763,6 +763,21 @@
continue;
if (seg->isCH)
continue;
+
+ /* Don't poke around in device segments as this may cause
+ hangs. Exclude /dev/zero just in case someone allocated
+ memory by explicitly mapping /dev/zero. */
+ if (seg->kind =3D=3D SkFileC=20
+ && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
+ HChar* dev_name =3D VG_(am_get_filename)( seg );
+ if (dev_name && 0 =3D=3D VG_(strcmp)(dev_name, "/dev/zero")) =
{
+ /* don't skip /dev/zero */
+ } else {
+ /* skip this device mapping */
+ continue;
+ }
+ }
+
if (0)
VG_(printf)("ACCEPT %2d %p %p\n", i, seg->start, seg->end);
lc_scan_memory(seg->start, seg->end+1 - seg->start);
|
|
From: <sv...@va...> - 2006-08-28 12:19:43
|
Author: sewardj
Date: 2006-08-28 13:19:40 +0100 (Mon, 28 Aug 2006)
New Revision: 6022
Log:
Merge r5983 (fix for:Valgrind aborts when process calls malloc_trim())=20
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_replacemalloc/vg_replace_mall=
oc.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_replacemalloc/vg_repla=
ce_malloc.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_replacemalloc/vg_replace_mal=
loc.c 2006-08-28 12:17:41 UTC (rev 6021)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_replacemalloc/vg_replace_mal=
loc.c 2006-08-28 12:19:40 UTC (rev 6022)
@@ -358,6 +358,41 @@
MALLOPT(m_libc_dot_so_star, mallopt);
=20
=20
+// Documentation says:
+// malloc_trim(size_t pad);
+//=20
+// If possible, gives memory back to the system (via negative argument=
s to
+// sbrk) if there is unused memory at the `high' end of the malloc poo=
l.
+// You can call this after freeing large blocks of memory to potential=
ly
+// reduce the system-level memory requirements of a program. However, =
it
+// cannot guarantee to reduce memory. Under some allocation patterns,
+// some large free blocks of memory will be locked between two used
+// chunks, so they cannot be given back to the system.
+//=20
+// The `pad' argument to malloc_trim represents the amount of free
+// trailing space to leave untrimmed. If this argument is zero, only t=
he
+// minimum amount of memory to maintain internal data structures will =
be
+// left (one page or less). Non-zero arguments can be supplied to main=
tain
+// enough trailing space to service future expected allocations withou=
t
+// having to re-obtain memory from the system.
+//=20
+// Malloc_trim returns 1 if it actually released any memory, else 0. O=
n
+// systems that do not support "negative sbrks", it will always return=
0.=20
+//
+// For simplicity, we always return 0.
+#define MALLOC_TRIM(soname, fnname) \
+ \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( SizeT pad ); \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( SizeT pad ) \
+ { \
+ /* 0 denotes that malloc_trim() either wasn't able \
+ to do anything, or was not implemented */ \
+ return 0; \
+ }
+
+MALLOC_TRIM(m_libc_dot_so_star, malloc_trim);
+
+
#define POSIX_MEMALIGN(soname, fnname) \
\
int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void **memptr, \
@@ -427,7 +462,6 @@
=20
PANIC(m_libc_dot_so_star, pvalloc);
PANIC(m_libc_dot_so_star, malloc_stats);
-PANIC(m_libc_dot_so_star, malloc_trim);
PANIC(m_libc_dot_so_star, malloc_get_state);
PANIC(m_libc_dot_so_star, malloc_set_state);
=20
|
|
From: <sv...@va...> - 2006-08-28 12:17:45
|
Author: sewardj
Date: 2006-08-28 13:17:41 +0100 (Mon, 28 Aug 2006)
New Revision: 6021
Log:
Merge r5979 (fix for: Can't stat .so/.exe error while reading symbols)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_debuginfo/readelf.c
branches/VALGRIND_3_2_BRANCH/coregrind/m_libcfile.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_debuginfo/readelf.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_debuginfo/readelf.c 2006-08-=
28 12:13:19 UTC (rev 6020)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_debuginfo/readelf.c 2006-08-=
28 12:17:41 UTC (rev 6021)
@@ -821,7 +821,6 @@
Addr dimage =3D 0;
UInt n_dimage =3D 0;
OffT offset_dimage =3D 0;
- struct vki_stat stat_buf;
=20
oimage =3D (Addr)NULL;
if (VG_(clo_verbosity) > 1 || VG_(clo_trace_redir))
@@ -832,16 +831,16 @@
line number info out of it. It will be munmapped immediately
thereafter; it is only aboard transiently. */
=20
- fd =3D VG_(stat)(si->filename, &stat_buf);
+ fd =3D VG_(open)(si->filename, VKI_O_RDONLY, 0);
if (fd.isError) {
- ML_(symerr)("Can't stat .so/.exe (to determine its size)?!");
+ ML_(symerr)("Can't open .so/.exe to read symbols?!");
return False;
}
- n_oimage =3D stat_buf.st_size;
=20
- fd =3D VG_(open)(si->filename, VKI_O_RDONLY, 0);
- if (fd.isError) {
- ML_(symerr)("Can't open .so/.exe to read symbols?!");
+ n_oimage =3D VG_(fsize)(fd.val);
+ if (n_oimage < 0) {
+ ML_(symerr)("Can't stat .so/.exe (to determine its size)?!");
+ VG_(close)(fd.val);
return False;
}
=20
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_libcfile.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_libcfile.c 2006-08-28 12:13:=
19 UTC (rev 6020)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_libcfile.c 2006-08-28 12:17:=
41 UTC (rev 6021)
@@ -133,8 +133,13 @@
=20
Int VG_(fsize) ( Int fd )
{
+#ifdef __NR_fstat64
+ struct vki_stat64 buf;
+ SysRes res =3D VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf);
+#else
struct vki_stat buf;
SysRes res =3D VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
+#endif
return res.isError ? (-1) : buf.st_size;
}
=20
|
|
From: <sv...@va...> - 2006-08-28 12:13:24
|
Author: sewardj
Date: 2006-08-28 13:13:19 +0100 (Mon, 28 Aug 2006)
New Revision: 6020
Log:
Merge r5989 (fix for: PATCH: ppc32 missing system calls)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.c
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-=
linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.=
c 2006-08-28 12:11:25 UTC (rev 6019)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.=
c 2006-08-28 12:13:19 UTC (rev 6020)
@@ -1598,7 +1598,7 @@
GENXY(__NR_fstatfs, sys_fstatfs), // 100
//.. LINX_(__NR_ioperm, sys_ioperm), // 101
PLAXY(__NR_socketcall, sys_socketcall), // 102
-//.. LINXY(__NR_syslog, sys_syslog), // 103
+ LINXY(__NR_syslog, sys_syslog), // 103
GENXY(__NR_setitimer, sys_setitimer), // 104
//..=20
//.. GENXY(__NR_getitimer, sys_getitimer), // 105
@@ -1614,7 +1614,7 @@
GENXY(__NR_wait4, sys_wait4), // 114
//..=20
//.. // (__NR_swapoff, sys_swapoff), // 115 */L=
inux=20
-//.. LINXY(__NR_sysinfo, sys_sysinfo), // 116
+ LINXY(__NR_sysinfo, sys_sysinfo), // 116
PLAXY(__NR_ipc, sys_ipc), // 117
GENX_(__NR_fsync, sys_fsync), // 118
PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux
|
|
From: <sv...@va...> - 2006-08-28 12:11:29
|
Author: sewardj
Date: 2006-08-28 13:11:25 +0100 (Mon, 28 Aug 2006)
New Revision: 6019
Log:
Merge r5988 (fix for: JJ: ppc32/ppc64 syscalls, w/ patch)
Modified:
branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.c
branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc64-linux.c
branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc32-linux.h
branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc64-linux.h
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-=
linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.=
c 2006-08-28 12:08:10 UTC (rev 6018)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc32-linux.=
c 2006-08-28 12:11:25 UTC (rev 6019)
@@ -386,6 +386,7 @@
DECL_TEMPLATE(ppc32_linux, sys_mmap2);
DECL_TEMPLATE(ppc32_linux, sys_stat64);
DECL_TEMPLATE(ppc32_linux, sys_lstat64);
+DECL_TEMPLATE(ppc32_linux, sys_fstatat64);
DECL_TEMPLATE(ppc32_linux, sys_fstat64);
DECL_TEMPLATE(ppc32_linux, sys_ipc);
DECL_TEMPLATE(ppc32_linux, sys_clone);
@@ -719,6 +720,20 @@
}
}
=20
+PRE(sys_fstatat64)
+{
+ PRINT("sys_fstatat64 ( %d, %p(%s), %p )",ARG1,ARG2,ARG2,ARG3);
+ PRE_REG_READ3(long, "fstatat64",
+ int, dfd, char *, file_name, struct stat64 *, buf);
+ PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 );
+ PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) );
+}
+
+POST(sys_fstatat64)
+{
+ POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) );
+}
+
PRE(sys_fstat64)
{
PRINT("sys_fstat64 ( %d, %p )",ARG1,ARG2);
@@ -1798,6 +1813,22 @@
LINX_(__NR_inotify_init, sys_inotify_init), // 275
LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 276
LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 277
+
+ LINXY(__NR_openat, sys_openat), // 286
+ LINX_(__NR_mkdirat, sys_mkdirat), // 287
+ LINX_(__NR_mknodat, sys_mknodat), // 288
+ LINX_(__NR_fchownat, sys_fchownat), // 289
+ LINX_(__NR_futimesat, sys_futimesat), // 290
+ PLAXY(__NR_fstatat64, sys_fstatat64), // 291
+ LINX_(__NR_unlinkat, sys_unlinkat), // 292
+ LINX_(__NR_renameat, sys_renameat), // 293
+ LINX_(__NR_linkat, sys_linkat), // 294
+ LINX_(__NR_symlinkat, sys_symlinkat), // 295
+ LINX_(__NR_readlinkat, sys_readlinkat), // 296
+ LINX_(__NR_fchmodat, sys_fchmodat), // 297
+ LINX_(__NR_faccessat, sys_faccessat), // 298
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 299
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 300
};
=20
const UInt ML_(syscall_table_size) =3D=20
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc64-=
linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc64-linux.=
c 2006-08-28 12:08:10 UTC (rev 6018)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/m_syswrap/syswrap-ppc64-linux.=
c 2006-08-28 12:11:25 UTC (rev 6019)
@@ -1174,7 +1174,7 @@
const SyscallTableEntry ML_(syscall_table)[] =3D {
// _____(__NR_restart_syscall, sys_restart_syscall), // 0
GENX_(__NR_exit, sys_exit), // 1
-// _____(__NR_fork, sys_fork), // 2
+ GENX_(__NR_fork, sys_fork), // 2
GENXY(__NR_read, sys_read), // 3
GENX_(__NR_write, sys_write), // 4
=20
@@ -1182,25 +1182,25 @@
GENXY(__NR_close, sys_close), // 6
GENXY(__NR_waitpid, sys_waitpid), // 7
GENXY(__NR_creat, sys_creat), // 8
-// _____(__NR_link, sys_link), // 9
+ GENX_(__NR_link, sys_link), // 9
=20
GENX_(__NR_unlink, sys_unlink), // 10
GENX_(__NR_execve, sys_execve), // 11
GENX_(__NR_chdir, sys_chdir), // 12
GENXY(__NR_time, sys_time), // 13
-// _____(__NR_mknod, sys_mknod), // 14
+ GENX_(__NR_mknod, sys_mknod), // 14
=20
GENX_(__NR_chmod, sys_chmod), // 15
-// _____(__NR_lchown, sys_lchown), // 16
+ GENX_(__NR_lchown, sys_lchown), // 16
// _____(__NR_break, sys_break), // 17
// _____(__NR_oldstat, sys_oldstat), // 18
LINX_(__NR_lseek, sys_lseek), // 19
=20
GENX_(__NR_getpid, sys_getpid), // 20
-// _____(__NR_mount, sys_mount), // 21
+ LINX_(__NR_mount, sys_mount), // 21
// _____(__NR_umount, sys_umount), // 22
-// _____(__NR_setuid, sys_setuid), // 23
-// _____(__NR_getuid, sys_getuid), // 24
+ GENX_(__NR_setuid, sys_setuid), // 23
+ GENX_(__NR_getuid, sys_getuid), // 24
=20
// _____(__NR_stime, sys_stime), // 25
// _____(__NR_ptrace, sys_ptrace), // 26
@@ -1217,47 +1217,47 @@
// _____(__NR_ftime, sys_ftime), // 35
// _____(__NR_sync, sys_sync), // 36
GENX_(__NR_kill, sys_kill), // 37
-// _____(__NR_rename, sys_rename), // 38
+ GENX_(__NR_rename, sys_rename), // 38
GENX_(__NR_mkdir, sys_mkdir), // 39
=20
// _____(__NR_rmdir, sys_rmdir), // 40
GENXY(__NR_dup, sys_dup), // 41
LINXY(__NR_pipe, sys_pipe), // 42
-// _____(__NR_times, sys_times), // 43
+ GENXY(__NR_times, sys_times), // 43
// _____(__NR_prof, sys_prof), // 44
=20
GENX_(__NR_brk, sys_brk), // 45
-// _____(__NR_setgid, sys_setgid), // 46
-// _____(__NR_getgid, sys_getgid), // 47
+ GENX_(__NR_setgid, sys_setgid), // 46
+ GENX_(__NR_getgid, sys_getgid), // 47
// _____(__NR_signal, sys_signal), // 48
GENX_(__NR_geteuid, sys_geteuid), // 49
=20
-// _____(__NR_getegid, sys_getegid), // 50
+ GENX_(__NR_getegid, sys_getegid), // 50
// _____(__NR_acct, sys_acct), // 51
-// _____(__NR_umount2, sys_umount2), // 52
+ LINX_(__NR_umount2, sys_umount), // 52
// _____(__NR_lock, sys_lock), // 53
GENXY(__NR_ioctl, sys_ioctl), // 54
=20
GENXY(__NR_fcntl, sys_fcntl), // 55
// _____(__NR_mpx, sys_mpx), // 56
-// _____(__NR_setpgid, sys_setpgid), // 57
+ GENX_(__NR_setpgid, sys_setpgid), // 57
// _____(__NR_ulimit, sys_ulimit), // 58
// _____(__NR_oldolduname, sys_oldolduname), // 59
=20
-// _____(__NR_umask, sys_umask), // 60
-// _____(__NR_chroot, sys_chroot), // 61
+ GENX_(__NR_umask, sys_umask), // 60
+ GENX_(__NR_chroot, sys_chroot), // 61
// _____(__NR_ustat, sys_ustat), // 62
GENXY(__NR_dup2, sys_dup2), // 63
GENX_(__NR_getppid, sys_getppid), // 64
=20
-// _____(__NR_getpgrp, sys_getpgrp), // 65
-// _____(__NR_setsid, sys_setsid), // 66
+ GENX_(__NR_getpgrp, sys_getpgrp), // 65
+ GENX_(__NR_setsid, sys_setsid), // 66
// _____(__NR_sigaction, sys_sigaction), // 67
// _____(__NR_sgetmask, sys_sgetmask), // 68
// _____(__NR_ssetmask, sys_ssetmask), // 69
=20
-// _____(__NR_setreuid, sys_setreuid), // 70
-// _____(__NR_setregid, sys_setregid), // 71
+ GENX_(__NR_setreuid, sys_setreuid), // 70
+ GENX_(__NR_setregid, sys_setregid), // 71
// _____(__NR_sigsuspend, sys_sigsuspend), // 72
// _____(__NR_sigpending, sys_sigpending), // 73
// _____(__NR_sethostname, sys_sethostname), // 74
@@ -1268,10 +1268,10 @@
GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
// _____(__NR_settimeofday, sys_settimeofday), // 79
=20
-// _____(__NR_getgroups, sys_getgroups), // 80
-// _____(__NR_setgroups, sys_setgroups), // 81
+ GENXY(__NR_getgroups, sys_getgroups), // 80
+ GENX_(__NR_setgroups, sys_setgroups), // 81
// _____(__NR_select, sys_select), // 82
-// _____(__NR_symlink, sys_symlink), // 83
+ GENX_(__NR_symlink, sys_symlink), // 83
// _____(__NR_oldlstat, sys_oldlstat), // 84
=20
GENX_(__NR_readlink, sys_readlink), // 85
@@ -1282,11 +1282,11 @@
=20
PLAX_(__NR_mmap, sys_mmap), // 90
GENXY(__NR_munmap, sys_munmap), // 91
-// _____(__NR_truncate, sys_truncate), // 92
+ GENX_(__NR_truncate, sys_truncate), // 92
GENX_(__NR_ftruncate, sys_ftruncate), // 93
-// _____(__NR_fchmod, sys_fchmod), // 94
-
-// _____(__NR_fchown, sys_fchown), // 95
+ GENX_(__NR_fchmod, sys_fchmod), // 94
+ =20
+ GENX_(__NR_fchown, sys_fchown), // 95
// _____(__NR_getpriority, sys_getpriority), // 96
// _____(__NR_setpriority, sys_setpriority), // 97
// _____(__NR_profil, sys_profil), // 98
@@ -1296,11 +1296,11 @@
// _____(__NR_ioperm, sys_ioperm), // 101
PLAXY(__NR_socketcall, sys_socketcall), // 102
// _____(__NR_syslog, sys_syslog), // 103
-// _____(__NR_setitimer, sys_setitimer), // 104
+ GENXY(__NR_setitimer, sys_setitimer), // 104
=20
// _____(__NR_getitimer, sys_getitimer), // 105
GENXY(__NR_stat, sys_newstat), // 106
-// _____(__NR_lstat, sys_lstat), // 107
+ GENXY(__NR_lstat, sys_newlstat), // 107
GENXY(__NR_fstat, sys_newfstat), // 108
// _____(__NR_olduname, sys_olduname), // 109
=20
@@ -1313,7 +1313,7 @@
// _____(__NR_swapoff, sys_swapoff), // 115
// _____(__NR_sysinfo, sys_sysinfo), // 116
PLAXY(__NR_ipc, sys_ipc), // 117
-// _____(__NR_fsync, sys_fsync), // 118
+ GENX_(__NR_fsync, sys_fsync), // 118
// _____(__NR_sigreturn, sys_sigreturn), // 119
=20
PLAX_(__NR_clone, sys_clone), // 120
@@ -1330,41 +1330,41 @@
=20
// _____(__NR_get_kernel_syms, sys_get_kernel_syms), // 130
// _____(__NR_quotactl, sys_quotactl), // 131
-// _____(__NR_getpgid, sys_getpgid), // 132
-// _____(__NR_fchdir, sys_fchdir), // 133
+ GENX_(__NR_getpgid, sys_getpgid), // 132
+ GENX_(__NR_fchdir, sys_fchdir), // 133
// _____(__NR_bdflush, sys_bdflush), // 134
=20
// _____(__NR_sysfs, sys_sysfs), // 135
-// _____(__NR_personality, sys_personality), // 136
+ LINX_(__NR_personality, sys_personality), // 136
// _____(__NR_afs_syscall, sys_afs_syscall), // 137
-// _____(__NR_setfsuid, sys_setfsuid), // 138
-// _____(__NR_setfsgid, sys_setfsgid), // 139
+ LINX_(__NR_setfsuid, sys_setfsuid), // 138
+ LINX_(__NR_setfsgid, sys_setfsgid), // 139
=20
LINXY(__NR__llseek, sys_llseek), // 140
GENXY(__NR_getdents, sys_getdents), // 141
-// _____(__NR__newselect, sys__newselect), // 142
-// _____(__NR_flock, sys_flock), // 143
-// _____(__NR_msync, sys_msync), // 144
+ GENX_(__NR__newselect, sys_select), // 142
+ GENX_(__NR_flock, sys_flock), // 143
+ GENX_(__NR_msync, sys_msync), // 144
=20
GENXY(__NR_readv, sys_readv), // 145
GENX_(__NR_writev, sys_writev), // 146
// _____(__NR_getsid, sys_getsid), // 147
-// _____(__NR_fdatasync, sys_fdatasync), // 148
+ GENX_(__NR_fdatasync, sys_fdatasync), // 148
LINXY(__NR__sysctl, sys_sysctl), // 149
=20
-// _____(__NR_mlock, sys_mlock), // 150
-// _____(__NR_munlock, sys_munlock), // 151
-// _____(__NR_mlockall, sys_mlockall), // 152
-// _____(__NR_munlockall, sys_munlockall), // 153
+ GENX_(__NR_mlock, sys_mlock), // 150
+ GENX_(__NR_munlock, sys_munlock), // 151
+ GENX_(__NR_mlockall, sys_mlockall), // 152
+ LINX_(__NR_munlockall, sys_munlockall), // 153
// _____(__NR_sched_setparam, sys_sched_setparam), // 154
=20
-// _____(__NR_sched_getparam, sys_sched_getparam), // 155
-// _____(__NR_sched_setscheduler, sys_sched_setscheduler), // 1=
56
-// _____(__NR_sched_getscheduler, sys_sched_getscheduler), // 1=
57
-// _____(__NR_sched_yield, sys_sched_yield), // 1=
58
-// _____(__NR_sched_get_priority_max, sys_sched_get_priority_max), // 1=
59
+ LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
+ LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
+ LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
+ LINX_(__NR_sched_yield, sys_sched_yield), // 158
+ LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159
=20
-// _____(__NR_sched_get_priority_min, sys_sched_get_priority_min), // 1=
60
+ LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
// _____(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 1=
61
GENXY(__NR_nanosleep, sys_nanosleep), // 162
GENX_(__NR_mremap, sys_mremap), // 163
@@ -1395,7 +1395,7 @@
// _____(__NR_capset, sys_capset), // 184
=20
GENXY(__NR_sigaltstack, sys_sigaltstack), // 185
-// _____(__NR_sendfile, sys_sendfile), // 186
+ LINXY(__NR_sendfile, sys_sendfile), // 186
// _____(__NR_getpmsg, sys_getpmsg), // 187
// _____(__NR_putpmsg, sys_putpmsg), // 188
GENX_(__NR_vfork, sys_fork), // 189 treat a=
s fork
@@ -1426,50 +1426,48 @@
=20
// _____(__NR_lsetxattr, sys_lsetxattr), // 210
// _____(__NR_fsetxattr, sys_fsetxattr), // 211
-// _____(__NR_getxattr, sys_getxattr), // 212
-// _____(__NR_lgetxattr, sys_lgetxattr), // 213
-// _____(__NR_fgetxattr, sys_fgetxattr), // 214
+ LINXY(__NR_getxattr, sys_getxattr), // 212
+ LINXY(__NR_lgetxattr, sys_lgetxattr), // 213
+ LINXY(__NR_fgetxattr, sys_fgetxattr), // 214
+ LINXY(__NR_listxattr, sys_listxattr), // 215
+ LINXY(__NR_llistxattr, sys_llistxattr), // 216
+ LINXY(__NR_flistxattr, sys_flistxattr), // 217
+ LINX_(__NR_removexattr, sys_removexattr), // 218
+ LINX_(__NR_lremovexattr, sys_lremovexattr), // 219
+ LINX_(__NR_fremovexattr, sys_fremovexattr), // 220
=20
-// _____(__NR_listxattr, sys_listxattr), // 215
-// _____(__NR_llistxattr, sys_llistxattr), // 216
-// _____(__NR_flistxattr, sys_flistxattr), // 217
-// _____(__NR_removexattr, sys_removexattr), // 218
-// _____(__NR_lremovexattr, sys_lremovexattr), // 219
-
-// _____(__NR_fremovexattr, sys_fremovexattr), // 220
LINXY(__NR_futex, sys_futex), // 221
-// _____(__NR_sched_setaffinity, sys_sched_setaffinity), // 222
-// _____(__NR_sched_getaffinity, sys_sched_getaffinity), // 223
+ LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 222
+ LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 223
// /* 224 currently unused */
=20
// _____(__NR_tuxcall, sys_tuxcall), // 225
// /* #define __NR_sendfile64 226 32bit only */
-// _____(__NR_io_setup, sys_io_setup), // 227
-// _____(__NR_io_destroy, sys_io_destroy), // 228
-// _____(__NR_io_getevents, sys_io_getevents), // 229
-
-// _____(__NR_io_submit, sys_io_submit), // 230
-// _____(__NR_io_cancel, sys_io_cancel), // 231
+ LINX_(__NR_io_setup, sys_io_setup), // 227
+ LINX_(__NR_io_destroy, sys_io_destroy), // 228
+ LINXY(__NR_io_getevents, sys_io_getevents), // 229
+ LINX_(__NR_io_submit, sys_io_submit), // 230
+ LINXY(__NR_io_cancel, sys_io_cancel), // 231
LINX_(__NR_set_tid_address, sys_set_tid_address), // 232
// _____(__NR_fadvise64, sys_fadvise64), // 233
LINX_(__NR_exit_group, sys_exit_group), // 234
=20
// _____(__NR_lookup_dcookie, sys_lookup_dcookie), // 235
-// _____(__NR_epoll_create, sys_epoll_create), // 236
-// _____(__NR_epoll_ctl, sys_epoll_ctl), // 237
-// _____(__NR_epoll_wait, sys_epoll_wait), // 238
+ LINXY(__NR_epoll_create, sys_epoll_create), // 236
+ LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 237
+ LINXY(__NR_epoll_wait, sys_epoll_wait), // 238
// _____(__NR_remap_file_pages, sys_remap_file_pages), // 239
=20
-// _____(__NR_timer_create, sys_timer_create), // 240
-// _____(__NR_timer_settime, sys_timer_settime), // 241
-// _____(__NR_timer_gettime, sys_timer_gettime), // 242
-// _____(__NR_timer_getoverrun, sys_timer_getoverrun), // 243
-// _____(__NR_timer_delete, sys_timer_delete), // 244
+ LINXY(__NR_timer_create, sys_timer_create), // 240
+ LINXY(__NR_timer_settime, sys_timer_settime), // 241
+ LINXY(__NR_timer_gettime, sys_timer_gettime), // 242
+ LINX_(__NR_timer_getoverrun, sys_timer_getoverrun), // 243
+ LINX_(__NR_timer_delete, sys_timer_delete), // 244
+ LINX_(__NR_clock_settime, sys_clock_settime), // 245
+ LINXY(__NR_clock_gettime, sys_clock_gettime), // 246
+ LINXY(__NR_clock_getres, sys_clock_getres), // 247
+ LINXY(__NR_clock_nanosleep, sys_clock_nanosleep), // 248
=20
-// _____(__NR_clock_settime, sys_clock_settime), // 245
-// _____(__NR_clock_gettime, sys_clock_gettime), // 246
-// _____(__NR_clock_getres, sys_clock_getres), // 247
-// _____(__NR_clock_nanosleep, sys_clock_nanosleep), // 248
// _____(__NR_swapcontext, sys_swapcontext), // 249
=20
LINXY(__NR_tgkill, sys_tgkill), // 250
@@ -1502,9 +1500,26 @@
// _____(__NR_ioprio_set, sys_ioprio_set), // 273
// _____(__NR_ioprio_get, sys_ioprio_get), // 274
=20
-// _____(__NR_inotify_init, sys_inotify_init), // 275
-// _____(__NR_inotify_add_watch, sys_inotify_add_watch), // 276
-// _____(__NR_inotify_rm_watch, sys_inotify_rm_watch) // 277
+ LINX_(__NR_inotify_init, sys_inotify_init), // 275
+ LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 276
+ LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 277
+
+ LINXY(__NR_openat, sys_openat), // 286
+ LINX_(__NR_mkdirat, sys_mkdirat), // 287
+ LINX_(__NR_mknodat, sys_mknodat), // 288
+ LINX_(__NR_fchownat, sys_fchownat), // 289
+ LINX_(__NR_futimesat, sys_futimesat), // 290
+ LINXY(__NR_newfstatat, sys_newfstatat), // 291
+ LINX_(__NR_unlinkat, sys_unlinkat), // 292
+ LINX_(__NR_renameat, sys_renameat), // 293
+ LINX_(__NR_linkat, sys_linkat), // 294
+ LINX_(__NR_symlinkat, sys_symlinkat), // 295
+ LINX_(__NR_readlinkat, sys_readlinkat), // 296
+ LINX_(__NR_fchmodat, sys_fchmodat), // 297
+ LINX_(__NR_faccessat, sys_faccessat), // 298
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 299
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 300
+
};
=20
const UInt ML_(syscall_table_size) =3D=20
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc32-linux.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc32-linux.h 2006-=
08-28 12:08:10 UTC (rev 6018)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc32-linux.h 2006-=
08-28 12:11:25 UTC (rev 6019)
@@ -335,7 +335,9 @@
#define __NR_readlinkat 296
#define __NR_fchmodat 297
#define __NR_faccessat 298
+#define __NR_get_robust_list 299
+#define __NR_set_robust_list 300
=20
-#define __NR_syscalls 299
+#define __NR_syscalls 301
=20
#endif /* __VKI_UNISTD_PPC32_LINUX_H */
Modified: branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc64-linux.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc64-linux.h 2006-=
08-28 12:08:10 UTC (rev 6018)
+++ branches/VALGRIND_3_2_BRANCH/coregrind/vki_unistd-ppc64-linux.h 2006-=
08-28 12:11:25 UTC (rev 6019)
@@ -328,6 +328,7 @@
#define __NR_readlinkat 296
#define __NR_fchmodat 297
#define __NR_faccessat 298
+#define __NR_get_robust_list 299
+#define __NR_set_robust_list 300
=20
-
#endif /* __VKI_UNISTD_PPC64_LINUX_H */
|