You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
1
(36) |
2
(30) |
|
3
(17) |
4
(21) |
5
(18) |
6
(14) |
7
(23) |
8
(12) |
9
(11) |
|
10
(11) |
11
(12) |
12
(11) |
13
(12) |
14
(11) |
15
(11) |
16
(15) |
|
17
(12) |
18
(15) |
19
(15) |
20
(25) |
21
(26) |
22
(21) |
23
(18) |
|
24
(25) |
25
(28) |
26
(27) |
27
(32) |
28
(13) |
29
(12) |
30
(10) |
|
From: <sv...@va...> - 2005-04-20 22:57:14
|
Author: sewardj
Date: 2005-04-20 23:57:11 +0100 (Wed, 20 Apr 2005)
New Revision: 1132
Modified:
trunk/priv/host-amd64/isel.c
Log:
Fix some more insn selection cases required by Memcheck.
Modified: trunk/priv/host-amd64/isel.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-amd64/isel.c 2005-04-19 23:06:11 UTC (rev 1131)
+++ trunk/priv/host-amd64/isel.c 2005-04-20 22:57:11 UTC (rev 1132)
@@ -87,12 +87,12 @@
//.. {
//.. return IRExpr_Const(IRConst_U64(i));
//.. }
-//..=20
-//.. static IRExpr* mkU32 ( UInt i )
-//.. {
-//.. return IRExpr_Const(IRConst_U32(i));
-//.. }
=20
+static IRExpr* mkU32 ( UInt i )
+{
+ return IRExpr_Const(IRConst_U32(i));
+}
+
static IRExpr* bind ( Int binder )
{
return IRExpr_Binder(binder);
@@ -1284,15 +1284,16 @@
}
//.. case Iop_1Sto8:
//.. case Iop_1Sto16:
-//.. case Iop_1Sto32: {
-//.. /* could do better than this, but for now ... */
-//.. HReg dst =3D newVRegI(env);
-//.. X86CondCode cond =3D iselCondCode(env, e->Iex.Unop.arg)=
;
-//.. addInstr(env, X86Instr_Set32(cond,dst));
-//.. addInstr(env, X86Instr_Sh32(Xsh_SHL, 31, X86RM_Reg(dst)=
));
-//.. addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, X86RM_Reg(dst)=
));
-//.. return dst;
-//.. }
+//.. case Iop_1Sto32:
+ case Iop_1Sto64: {
+ /* could do better than this, but for now ... */
+ HReg dst =3D newVRegI(env);
+ AMD64CondCode cond =3D iselCondCode(env, e->Iex.Unop.arg);
+ addInstr(env, AMD64Instr_Set64(cond,dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, AMD64RM_Reg(dst))=
);
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, AMD64RM_Reg(dst))=
);
+ return dst;
+ }
case Iop_Ctz64: {
/* Count trailing zeroes, implemented by amd64 'bsfq' */
HReg dst =3D newVRegI(env);
@@ -1798,23 +1799,23 @@
return Acc_NZ;
}
=20
-//.. /* CmpEQ8 / CmpNE8 */
-//.. if (e->tag =3D=3D Iex_Binop=20
-//.. && (e->Iex.Binop.op =3D=3D Iop_CmpEQ8
-//.. || e->Iex.Binop.op =3D=3D Iop_CmpNE8)) {
-//.. HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
-//.. X86RMI* rmi2 =3D iselIntExpr_RMI(env, e->Iex.Binop.arg2);
-//.. HReg r =3D newVRegI(env);
-//.. addInstr(env, mk_iMOVsd_RR(r1,r));
-//.. addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r));
-//.. addInstr(env, X86Instr_Alu32R(Xalu_AND,X86RMI_Imm(0xFF),r));
-//.. switch (e->Iex.Binop.op) {
-//.. case Iop_CmpEQ8: return Xcc_Z;
-//.. case Iop_CmpNE8: return Xcc_NZ;
-//.. default: vpanic("iselCondCode(x86): CmpXX8");
-//.. }
-//.. }
-//..=20
+ /* CmpEQ8 / CmpNE8 */
+ if (e->tag =3D=3D Iex_Binop=20
+ && (e->Iex.Binop.op =3D=3D Iop_CmpEQ8
+ || e->Iex.Binop.op =3D=3D Iop_CmpNE8)) {
+ HReg r1 =3D iselIntExpr_R(env, e->Iex.Binop.arg1);
+ AMD64RMI* rmi2 =3D iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+ HReg r =3D newVRegI(env);
+ addInstr(env, mk_iMOVsd_RR(r1,r));
+ addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
+ addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0xFF),r));
+ switch (e->Iex.Binop.op) {
+ case Iop_CmpEQ8: return Acc_Z;
+ case Iop_CmpNE8: return Acc_NZ;
+ default: vpanic("iselCondCode(amd64): CmpXX8");
+ }
+ }
+
//.. /* CmpEQ16 / CmpNE16 */
//.. if (e->tag =3D=3D Iex_Binop=20
//.. && (e->Iex.Binop.op =3D=3D Iop_CmpEQ16
@@ -1831,18 +1832,18 @@
//.. default: vpanic("iselCondCode(x86): CmpXX16");
//.. }
//.. }
-//..=20
-//.. /* CmpNE32(1Sto32(b), 0) =3D=3D> b */
-//.. {
-//.. DECLARE_PATTERN(p_CmpNE32_1Sto32);
-//.. DEFINE_PATTERN(
-//.. p_CmpNE32_1Sto32,
-//.. binop(Iop_CmpNE32, unop(Iop_1Sto32,bind(0)), mkU32(0)));
-//.. if (matchIRExpr(&mi, p_CmpNE32_1Sto32, e)) {
-//.. return iselCondCode(env, mi.bindee[0]);
-//.. }
-//.. }
=20
+ /* CmpNE32(1Sto32(b), 0) =3D=3D> b */
+ {
+ DECLARE_PATTERN(p_CmpNE32_1Sto32);
+ DEFINE_PATTERN(
+ p_CmpNE32_1Sto32,
+ binop(Iop_CmpNE32, unop(Iop_1Sto32,bind(0)), mkU32(0)));
+ if (matchIRExpr(&mi, p_CmpNE32_1Sto32, e)) {
+ return iselCondCode(env, mi.bindee[0]);
+ }
+ }
+
/* Cmp*64*(x,y) */
if (e->tag =3D=3D Iex_Binop=20
&& (e->Iex.Binop.op =3D=3D Iop_CmpEQ64
@@ -3612,7 +3613,7 @@
return;
=20
retty =3D typeOfIRTemp(env->type_env, d->tmp);
- if (retty =3D=3D Ity_I64) {
+ if (retty =3D=3D Ity_I64 || retty =3D=3D Ity_I32) {
/* The returned value is in %rax. Park it in the register
associated with tmp. */
HReg dst =3D lookupIRTemp(env, d->tmp);
|
|
From: <sv...@va...> - 2005-04-20 22:31:31
|
Author: sewardj
Date: 2005-04-20 23:31:26 +0100 (Wed, 20 Apr 2005)
New Revision: 3536
Modified:
trunk/memcheck/mc_include.h
trunk/memcheck/mc_main.c
trunk/memcheck/mc_translate.c
Log:
Fix a bunch of 64-bit cases required amd64. Stop to ponder whether
there is a better way to handle the 'pessimising cast' family of
operations in such a way that Vex's back-end instruction selectors can
generate better code than they do now, with less verbosity and general
confusingness in the insn selectors.
Modified: trunk/memcheck/mc_include.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_include.h 2005-04-20 14:44:11 UTC (rev 3535)
+++ trunk/memcheck/mc_include.h 2005-04-20 22:31:26 UTC (rev 3536)
@@ -61,13 +61,13 @@
extern void MC_(helperc_value_check0_fail) ( void );
=20
extern VGA_REGPARM(1) void MC_(helperc_STOREV8) ( Addr, ULong );
-extern VGA_REGPARM(2) void MC_(helperc_STOREV4) ( Addr, UInt );
-extern VGA_REGPARM(2) void MC_(helperc_STOREV2) ( Addr, UInt );
-extern VGA_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UInt );
+extern VGA_REGPARM(2) void MC_(helperc_STOREV4) ( Addr, UWord );
+extern VGA_REGPARM(2) void MC_(helperc_STOREV2) ( Addr, UWord );
+extern VGA_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UWord );
=20
-extern VGA_REGPARM(1) UInt MC_(helperc_LOADV1) ( Addr );
-extern VGA_REGPARM(1) UInt MC_(helperc_LOADV2) ( Addr );
-extern VGA_REGPARM(1) UInt MC_(helperc_LOADV4) ( Addr );
+extern VGA_REGPARM(1) UWord MC_(helperc_LOADV1) ( Addr );
+extern VGA_REGPARM(1) UWord MC_(helperc_LOADV2) ( Addr );
+extern VGA_REGPARM(1) UWord MC_(helperc_LOADV4) ( Addr );
extern VGA_REGPARM(1) ULong MC_(helperc_LOADV8) ( Addr );
=20
/* Functions defined in mc_errcontext.c */
Modified: trunk/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_main.c 2005-04-20 14:44:11 UTC (rev 3535)
+++ trunk/memcheck/mc_main.c 2005-04-20 22:31:26 UTC (rev 3536)
@@ -1193,8 +1193,8 @@
static void mc_post_reg_write ( CorePart part, ThreadId tid,=20
OffT offset, SizeT size)
{
- UChar area[512];
- tl_assert(size <=3D 512);
+ UChar area[1024];
+ tl_assert(size <=3D 1024);
VG_(memset)(area, VGM_BYTE_VALID, size);
VG_(set_shadow_regs_area)( tid, offset, size, area );
}
@@ -1235,10 +1235,21 @@
}
=20
=20
-//zz /*------------------------------------------------------------*/
-//zz /*--- Functions called directly from generated code. ---*/
-//zz /*------------------------------------------------------------*/
-//zz=20
+/*------------------------------------------------------------*/
+/*--- Functions called directly from generated code. ---*/
+/*------------------------------------------------------------*/
+
+/* Types: LOADV4, LOADV2, LOADV1 are:
+ UWord fn ( Addr a )
+ so they return 32-bits on 32-bit machines and 64-bits on
+ 64-bit machines. Addr has the same size as a host word.
+
+ LOADV8 is always ULong fn ( Addr a )
+
+ Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
+ are a UWord, and for STOREV8 they are a ULong.
+*/
+
//zz static __inline__ UInt rotateRight16 ( UInt x )
//zz {
//zz /* Amazingly, gcc turns this into a single rotate insn. */
@@ -1338,9 +1349,9 @@
/* ------------------------ Size =3D 4 ------------------------ */
=20
VGA_REGPARM(1)
-UInt MC_(helperc_LOADV4) ( Addr a )
+UWord MC_(helperc_LOADV4) ( Addr a )
{
- return (UInt)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
+ return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V4_SLOWLY(a);
//zz # else
@@ -1364,9 +1375,9 @@
}
=20
VGA_REGPARM(2)
-void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
+void MC_(helperc_STOREV4) ( Addr a, UWord vbytes )
{
- mc_STOREVn_slow( a, 4, vbytes, False/*littleendian*/ );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V4_SLOWLY(a, vbytes);
//zz # else
@@ -1392,9 +1403,9 @@
/* ------------------------ Size =3D 2 ------------------------ */
=20
VGA_REGPARM(1)
-UInt MC_(helperc_LOADV2) ( Addr a )
+UWord MC_(helperc_LOADV2) ( Addr a )
{
- return (UInt)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
+ return (UWord)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V2_SLOWLY(a);
//zz # else
@@ -1416,9 +1427,9 @@
}
=20
VGA_REGPARM(2)
-void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
+void MC_(helperc_STOREV2) ( Addr a, UWord vbytes )
{
- mc_STOREVn_slow( a, 2, vbytes, False/*littleendian*/ );
+ mc_STOREVn_slow( a, 2, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V2_SLOWLY(a, vbytes);
//zz # else
@@ -1440,9 +1451,9 @@
/* ------------------------ Size =3D 1 ------------------------ */
=20
VGA_REGPARM(1)
-UInt MC_(helperc_LOADV1) ( Addr a )
+UWord MC_(helperc_LOADV1) ( Addr a )
{
- return (UInt)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
+ return (UWord)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V1_SLOWLY(a);
//zz # else
@@ -1464,9 +1475,9 @@
}
=20
VGA_REGPARM(2)
-void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
+void MC_(helperc_STOREV1) ( Addr a, UWord vbytes )
{
- mc_STOREVn_slow( a, 1, vbytes, False/*littleendian*/ );
+ mc_STOREVn_slow( a, 1, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V1_SLOWLY(a, vbytes);
//zz # else
Modified: trunk/memcheck/mc_translate.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_translate.c 2005-04-20 14:44:11 UTC (rev 3535)
+++ trunk/memcheck/mc_translate.c 2005-04-20 22:31:26 UTC (rev 3536)
@@ -192,7 +192,8 @@
case Ity_I8:
case Ity_I16:
case Ity_I32:=20
- case Ity_I64: return ty;
+ case Ity_I64:=20
+ case Ity_I128: return ty;
case Ity_F32: return Ity_I32;
case Ity_F64: return Ity_I64;
case Ity_V128: return Ity_V128;
@@ -1554,6 +1555,13 @@
case Iop_32HLto64:
return assignNew(mce, Ity_I64, binop(op, vatom1, vatom2));
=20
+ case Iop_MullS64:
+ case Iop_MullU64: {
+ IRAtom* vLo64 =3D mkLeft64(mce, mkUifU64(mce, vatom1,vatom2));
+ IRAtom* vHi64 =3D mkPCastTo(mce, Ity_I64, vLo64);
+ return assignNew(mce, Ity_I128, binop(Iop_64HLto128, vHi64, vLo=
64));
+ }
+
case Iop_MullS32:
case Iop_MullU32: {
IRAtom* vLo32 =3D mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
@@ -1750,6 +1758,8 @@
case Iop_32Uto64:
case Iop_V128to64:
case Iop_V128HIto64:
+ case Iop_128HIto64:
+ case Iop_128to64:
return assignNew(mce, Ity_I64, unop(op, vatom));
=20
case Iop_64to32:
@@ -1977,6 +1987,12 @@
case Ity_I8: return assignNew(mce, tyH, unop(Iop_8Uto32, vatom=
));
default: goto unhandled;
}
+ } else
+ if (tyH =3D=3D Ity_I64) {
+ switch (ty) {
+ case Ity_I32: return assignNew(mce, tyH, unop(Iop_32Uto64, vato=
m));
+ default: goto unhandled;
+ }
} else {
goto unhandled;
}
@@ -2399,6 +2415,14 @@
VG_(tool_panic)("host/guest word size mismatch");
}
=20
+ /* Check we're not completely nuts */
+ tl_assert(sizeof(UWord) =3D=3D sizeof(void*));
+ tl_assert(sizeof(Word) =3D=3D sizeof(void*));
+ tl_assert(sizeof(ULong) =3D=3D 8);
+ tl_assert(sizeof(Long) =3D=3D 8);
+ tl_assert(sizeof(UInt) =3D=3D 4);
+ tl_assert(sizeof(Int) =3D=3D 4);
+
/* Set up BB */
bb =3D emptyIRBB();
bb->tyenv =3D dopyIRTypeEnv(bb_in->tyenv);
|
|
From: Rex W. <wa...@gm...> - 2005-04-20 21:25:27
|
Hi So Dullard gives out addresses which have been read from, written to or modified . I want to map a particular address to its corresponding variable or symbol. Can I do that ? Could someone give me a hint as to how I would do that. I have read vg_symtab2.c and know that a symbol can be mapped to an address. But I think dullard works with UCode and UCode is independent of symbols. Am I right in thinking this ? I have been reading through the valgrind code for 2 days now and I have read the user manual as well. My main aim is to count how many times a particular variable is accessed (read from, written to, modified etc.). So I started with dullard, which seems to tell me partly what I want. I am looking for some guidance.=20 Thanks. Rex. |
|
From: Rex W. <wa...@gm...> - 2005-04-20 20:49:10
|
Thanks.=20 I was able to execute the tool dullard compiled with Valgrind-2.4.0 fine after I removed the calls to VG_(register_compact_helper) in dl_main.c. -- Rex. On 4/20/05, Nicholas Nethercote <nj...@cs...> wrote: > On Wed, 20 Apr 2005, Rex Walburn wrote: >=20 > > I was trying to build the source code for the tool dullard and I get > > many errors related to stage2.lds which is apparently missing. So I > > just wanted to know if I can build the dullard tool with valgrind-2.4 > > . I do not really know the differences between versions 2.1.2 and > > 2.4.0 of Valgrind but I dont wanna end up breaking my head over > > something which is obvious to the developers of Valgrind. So if I > > build dullard (dl_main.c) with valgrind 2.4.0 will it break something > > else ? Does dl_main.c access some features of Valgrind which existed > > in 2.1.2 and are deprecated in 2.4.0 ? >=20 > I think the problem is just that the way the Makefiles are organised > changed from 2.1.2 to 2.4.0. Copy the Makefile.in from the "none" > directory, and change "SUBDIRS" to not include "docs" and "tests", and > then replace "none" with "dullard" and "nl_" with "dl_" and it should wor= k > out ok. You'll need to rerun configure.in. >=20 > (Or if you're working from the repository, make the corresponding changes > to the Makefile.am file, which is a bit easier.) >=20 > N > |
|
From: Nicholas N. <nj...@cs...> - 2005-04-20 19:54:33
|
On Wed, 20 Apr 2005, Rex Walburn wrote: > I was trying to build the source code for the tool dullard and I get > many errors related to stage2.lds which is apparently missing. So I > just wanted to know if I can build the dullard tool with valgrind-2.4 > . I do not really know the differences between versions 2.1.2 and > 2.4.0 of Valgrind but I dont wanna end up breaking my head over > something which is obvious to the developers of Valgrind. So if I > build dullard (dl_main.c) with valgrind 2.4.0 will it break something > else ? Does dl_main.c access some features of Valgrind which existed > in 2.1.2 and are deprecated in 2.4.0 ? I think the problem is just that the way the Makefiles are organised changed from 2.1.2 to 2.4.0. Copy the Makefile.in from the "none" directory, and change "SUBDIRS" to not include "docs" and "tests", and then replace "none" with "dullard" and "nl_" with "dl_" and it should work out ok. You'll need to rerun configure.in. (Or if you're working from the repository, make the corresponding changes to the Makefile.am file, which is a bit easier.) N |
|
From: Rex W. <wa...@gm...> - 2005-04-20 19:39:04
|
Hi=20 This mail might sound stupid, but I have to ask the question.=20 =20 I was trying to build the source code for the tool dullard and I get many errors related to stage2.lds which is apparently missing. So I just wanted to know if I can build the dullard tool with valgrind-2.4 . I do not really know the differences between versions 2.1.2 and 2.4.0 of Valgrind but I dont wanna end up breaking my head over something which is obvious to the developers of Valgrind. So if I build dullard (dl_main.c) with valgrind 2.4.0 will it break something else ? Does dl_main.c access some features of Valgrind which existed in 2.1.2 and are deprecated in 2.4.0 ? Thanks.=20 Rex |
|
From: Josef W. <Jos...@gm...> - 2005-04-20 16:26:46
|
On Wednesday 20 April 2005 16:02, Julian Seward wrote: > Sorry, no silver bullet. For memcheck/addrcheck/helgrind, Valgrind > needs a complete picture of the state of memory, and that can only > be achieved by running the program right from the start. > > For cachegrind, in principle it would be possible to start collecting > data part way through the run. That would give you some misleading > cache-cold-start effects, but it would work in general. However, we > have no mechanism to achieve that at present. In Callgrind, this is implemented: Start with "instrumentation mode" switched off: "callgrind --instr-atstart=no ..." and either switch real instrumentation on dynamically with "callgrind_control -i on", or via Macro CALLGRIND_START_INSTRUMENTATION, see /usr/include/valgrind/callgrind.h. This is very good to "fast forward" to the interesting place. Josef > > J > > On Wednesday 20 April 2005 14:49, Rex Walburn wrote: > > Hi All > > > > I wanted to know if there exists a macro such that, I can insert it in > > my source code and the valgrind tool (like memcheck) will check for > > memory errors or other cache misses only from that point onwards. For > > example, if I can insert a macro like VALGRIND_TOOL_START and > > VALGRIND_TOOL_END in 2 areas in my source code, then the tool like > > memcheck or cachegrind will do their checking and stuff only for that > > part of the code that lies between the two macros. Do these macros > > exist? If not, can I write them using the VALGRIND_MAGIC_SEQUENCE > > macro, or will I have to do something tricky ? > > > > Thanks. > > ------------------------------------------------------- > This SF.Net email is sponsored by: New Crystal Reports XI. > Version 11 adds new functionality designed to reduce time involved in > creating, integrating, and deploying reporting solutions. Free runtime > info, new features, or free trial, at: > http://www.businessobjects.com/devxi/728 > _______________________________________________ > Valgrind-developers mailing list > Val...@li... > https://lists.sourceforge.net/lists/listinfo/valgrind-developers |
|
From: Naveen K. <g_n...@ya...> - 2005-04-20 15:48:48
|
Hi Leonard There is ongoing work to port valgrind 2.4.0(and 3.0) to solaris-8 x86. This is a stage where valgrind can successfully load and instrument static executables. There are some problems though when instrumenting dynamic executables which is being looked at. Once that is done porting to solaris 10 shouldnt be too much of a task. Naveen >Hi, > >Do you plan to port Valgrind to Solaris 10 on x86 >machines? > >Thanks, >-Leonard __________________________________________________ Do You Yahoo!? Tired of spam? Yahoo! Mail has the best spam protection around http://mail.yahoo.com |
|
From: <sv...@va...> - 2005-04-20 14:44:16
|
Author: sewardj
Date: 2005-04-20 15:44:11 +0100 (Wed, 20 Apr 2005)
New Revision: 3535
Modified:
trunk/memcheck/mac_leakcheck.c
trunk/memcheck/mac_shared.h
trunk/memcheck/mc_main.c
Log:
Initial rehash of Memcheck's shadow-space management to support both
32- and 64-bit targets, little- and big-endian. It does more or less
work on x86 as-is, although is unusably slow since I have knocked out
all the fast-path cases and am concentrating on getting the baseline
functionality correct. The fast cases will go back in in due course.
The fundamental idea is to retain the old 2-level indexing for speed,
even on a 64-bit target. Since that's clearly unviable on a 64-bit
target, the primary map handles only first N gigabytes of address
space (probably to be set to 16, 32 or 64G). Addresses above that are
handled slowly using an auxiliary primary map which explicitly lists
(base, &-of-secondary-map) pairs. The goal is to have the
address-space-manager try and put everything below the 16/32/64G
boundary, so we hit the fast cases almost all the time.
Performance of the 32-bit case should be unaffected since the fast map
will always cover at least the lowest 4G of address space.
There are many word-size and endianness cleanups.
Jeremy's distinguished-map space-compression scheme is retained, in
modified form, as it is simple and seems effective at reducing
Memcheck's space use.
Note this is all subject to rapid change.
Modified: trunk/memcheck/mac_leakcheck.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mac_leakcheck.c 2005-04-20 14:32:32 UTC (rev 3534)
+++ trunk/memcheck/mac_leakcheck.c 2005-04-20 14:44:11 UTC (rev 3535)
@@ -316,6 +316,7 @@
return ret;
}
=20
+
/* Scan a block of memory between [start, start+len). This range may
be bogus, inaccessable, or otherwise strange; we deal with it.
=20
@@ -323,6 +324,7 @@
cliques, and clique is the index of the current clique leader. */
static void _lc_scan_memory(Addr start, SizeT len, Int clique)
{
+#if 0
Addr ptr =3D ROUNDUP(start, sizeof(Addr));
Addr end =3D ROUNDDN(start+len, sizeof(Addr));
vki_sigset_t sigmask;
@@ -372,8 +374,10 @@
=20
VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
VG_(set_fault_catcher)(NULL);
+#endif
}
=20
+
static void lc_scan_memory(Addr start, SizeT len)
{
_lc_scan_memory(start, len, -1);
Modified: trunk/memcheck/mac_shared.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mac_shared.h 2005-04-20 14:32:32 UTC (rev 3534)
+++ trunk/memcheck/mac_shared.h 2005-04-20 14:44:11 UTC (rev 3535)
@@ -189,22 +189,23 @@
=20
#endif /* MAC_PROFILE_MEMORY */
=20
-/*------------------------------------------------------------*/
-/*--- V and A bits ---*/
-/*------------------------------------------------------------*/
=20
-/* expand 1 bit -> 8 */
-#define BIT_EXPAND(b) ((~(((UChar)(b) & 1) - 1)) & 0xFF)
-
-#define SECONDARY_SHIFT 16
-#define SECONDARY_SIZE (1 << SECONDARY_SHIFT)
-#define SECONDARY_MASK (SECONDARY_SIZE - 1)
-
-#define PRIMARY_SIZE (1 << (32 - SECONDARY_SHIFT))
-
-#define SM_OFF(addr) ((addr) & SECONDARY_MASK)
-#define PM_IDX(addr) ((addr) >> SECONDARY_SHIFT)
-
+//zz /*------------------------------------------------------------*/
+//zz /*--- V and A bits (Victoria & Albert ?) ---*/
+//zz /*------------------------------------------------------------*/
+//zz=20
+//zz /* expand 1 bit -> 8 */
+//zz #define BIT_EXPAND(b) ((~(((UChar)(b) & 1) - 1)) & 0xFF)
+//zz=20
+//zz #define SECONDARY_SHIFT 16
+//zz #define SECONDARY_SIZE (1 << SECONDARY_SHIFT)
+//zz #define SECONDARY_MASK (SECONDARY_SIZE - 1)
+//zz=20
+//zz #define PRIMARY_SIZE (1 << (32 - SECONDARY_SHIFT))
+//zz=20
+//zz #define SM_OFF(addr) ((addr) & SECONDARY_MASK)
+//zz #define PM_IDX(addr) ((addr) >> SECONDARY_SHIFT)
+/*
#define IS_DISTINGUISHED_SM(smap) \
((smap) >=3D &distinguished_secondary_maps[0] && \
(smap) < &distinguished_secondary_maps[N_SECONDARY_MAPS])
@@ -215,44 +216,63 @@
do { \
if (IS_DISTINGUISHED(addr)) { \
primary_map[PM_IDX(addr)] =3D alloc_secondary_map(caller, primary_map[=
PM_IDX(addr)]); \
- /* VG_(printf)("new 2map because of %p\n", addr); */ \
+ if (0) VG_(printf)("new 2map because of %p\n", addr); \
} \
- } while(0)
+ } while(0)
+*/
=20
#define BITARR_SET(aaa_p,iii_p) \
do { \
- UInt iii =3D (UInt)iii_p; \
- UChar* aaa =3D (UChar*)aaa_p; \
+ UWord iii =3D (UWord)iii_p; \
+ UChar* aaa =3D (UChar*)aaa_p; \
aaa[iii >> 3] |=3D (1 << (iii & 7)); \
} while (0)
=20
#define BITARR_CLEAR(aaa_p,iii_p) \
do { \
- UInt iii =3D (UInt)iii_p; \
- UChar* aaa =3D (UChar*)aaa_p; \
+ UWord iii =3D (UWord)iii_p; \
+ UChar* aaa =3D (UChar*)aaa_p; \
aaa[iii >> 3] &=3D ~(1 << (iii & 7)); \
} while (0)
=20
#define BITARR_TEST(aaa_p,iii_p) \
- (0 !=3D (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
- & (1 << (((UInt)iii_p) & 7)))) \
+ (0 !=3D (((UChar*)aaa_p)[ ((UWord)iii_p) >> 3 ] \
+ & (1 << (((UWord)iii_p) & 7)))) \
=20
+static inline=20
+void write_bit_array ( UChar* arr, UWord idx, UWord bit )=20
+{
+ UWord shift =3D idx & 7;
+ idx >>=3D 3;
+ bit &=3D 1;
+ arr[idx] =3D (arr[idx] & ~(1<<shift)) | (bit << shift);
+}
=20
-#define VGM_BIT_VALID 0
-#define VGM_BIT_INVALID 1
+static inline
+UWord read_bit_array ( UChar* arr, UWord idx )
+{
+ UWord shift =3D idx & 7;
+ idx >>=3D 3;
+ return 1 & (arr[idx] >> shift);
+}
=20
-#define VGM_NIBBLE_VALID 0
-#define VGM_NIBBLE_INVALID 0xF
=20
-#define VGM_BYTE_VALID 0
-#define VGM_BYTE_INVALID 0xFF
+#define VGM_BIT_VALID 0
+#define VGM_BIT_INVALID 1
=20
-#define VGM_WORD_VALID 0
-#define VGM_WORD_INVALID 0xFFFFFFFF
+#define VGM_NIBBLE_VALID 0
+#define VGM_NIBBLE_INVALID 0xF
=20
-#define VGM_WORD64_VALID 0x0ULL
-#define VGM_WORD64_INVALID 0xFFFFFFFFFFFFFFFFULL
+#define VGM_BYTE_VALID 0
+#define VGM_BYTE_INVALID 0xFF
=20
+#define VGM_WORD32_VALID 0
+#define VGM_WORD32_INVALID 0xFFFFFFFF
+
+#define VGM_WORD64_VALID 0ULL
+#define VGM_WORD64_INVALID 0xFFFFFFFFFFFFFFFFULL
+
+
/*------------------------------------------------------------*/
/*--- Command line options + defaults ---*/
/*------------------------------------------------------------*/
@@ -408,166 +428,166 @@
=20
Note that this code is executed very frequently and must be highly
optimised, which is why I resort to the preprocessor to achieve the
- factoring, rather than eg. using function pointers.
+ factoring, rather than eg. using function pointers. =20
*/
=20
-#define ESP_UPDATE_HANDLERS(ALIGNED4_NEW, ALIGNED4_DIE, =
\
- ALIGNED8_NEW, ALIGNED8_DIE, =
\
- UNALIGNED_NEW, UNALIGNED_DIE) =
\
- =
\
-void VGA_REGPARM(1) MAC_(new_mem_stack_4)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(110); =
\
- if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_NEW ( new_ESP ); =
\
- } else { =
\
- UNALIGNED_NEW ( new_ESP, 4 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(die_mem_stack_4)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(120); =
\
- if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_DIE ( new_ESP-4 ); =
\
- } else { =
\
- UNALIGNED_DIE ( new_ESP-4, 4 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(new_mem_stack_8)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(111); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_NEW ( new_ESP ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_NEW ( new_ESP ); =
\
- ALIGNED4_NEW ( new_ESP+4 ); =
\
- } else { =
\
- UNALIGNED_NEW ( new_ESP, 8 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(die_mem_stack_8)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(121); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_DIE ( new_ESP-8 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_DIE ( new_ESP-8 ); =
\
- ALIGNED4_DIE ( new_ESP-4 ); =
\
- } else { =
\
- UNALIGNED_DIE ( new_ESP-8, 8 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(new_mem_stack_12)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(112); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_NEW ( new_ESP ); =
\
- ALIGNED4_NEW ( new_ESP+8 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_NEW ( new_ESP ); =
\
- ALIGNED8_NEW ( new_ESP+4 ); =
\
- } else { =
\
- UNALIGNED_NEW ( new_ESP, 12 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(die_mem_stack_12)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(122); =
\
- /* Note the -12 in the test */ =
\
- if (VG_IS_8_ALIGNED(new_ESP-12)) { =
\
- ALIGNED8_DIE ( new_ESP-12 ); =
\
- ALIGNED4_DIE ( new_ESP-4 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_DIE ( new_ESP-12 ); =
\
- ALIGNED8_DIE ( new_ESP-8 ); =
\
- } else { =
\
- UNALIGNED_DIE ( new_ESP-12, 12 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(new_mem_stack_16)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(113); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_NEW ( new_ESP ); =
\
- ALIGNED8_NEW ( new_ESP+8 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_NEW ( new_ESP ); =
\
- ALIGNED8_NEW ( new_ESP+4 ); =
\
- ALIGNED4_NEW ( new_ESP+12 ); =
\
- } else { =
\
- UNALIGNED_NEW ( new_ESP, 16 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(die_mem_stack_16)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(123); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_DIE ( new_ESP-16 ); =
\
- ALIGNED8_DIE ( new_ESP-8 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_DIE ( new_ESP-16 ); =
\
- ALIGNED8_DIE ( new_ESP-12 ); =
\
- ALIGNED4_DIE ( new_ESP-4 ); =
\
- } else { =
\
- UNALIGNED_DIE ( new_ESP-16, 16 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(new_mem_stack_32)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(114); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_NEW ( new_ESP ); =
\
- ALIGNED8_NEW ( new_ESP+8 ); =
\
- ALIGNED8_NEW ( new_ESP+16 ); =
\
- ALIGNED8_NEW ( new_ESP+24 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_NEW ( new_ESP ); =
\
- ALIGNED8_NEW ( new_ESP+4 ); =
\
- ALIGNED8_NEW ( new_ESP+12 ); =
\
- ALIGNED8_NEW ( new_ESP+20 ); =
\
- ALIGNED4_NEW ( new_ESP+28 ); =
\
- } else { =
\
- UNALIGNED_NEW ( new_ESP, 32 ); =
\
- } =
\
-} =
\
- =
\
-void VGA_REGPARM(1) MAC_(die_mem_stack_32)(Addr new_ESP) =
\
-{ =
\
- PROF_EVENT(124); =
\
- if (VG_IS_8_ALIGNED(new_ESP)) { =
\
- ALIGNED8_DIE ( new_ESP-32 ); =
\
- ALIGNED8_DIE ( new_ESP-24 ); =
\
- ALIGNED8_DIE ( new_ESP-16 ); =
\
- ALIGNED8_DIE ( new_ESP- 8 ); =
\
- } else if (VG_IS_4_ALIGNED(new_ESP)) { =
\
- ALIGNED4_DIE ( new_ESP-32 ); =
\
- ALIGNED8_DIE ( new_ESP-28 ); =
\
- ALIGNED8_DIE ( new_ESP-20 ); =
\
- ALIGNED8_DIE ( new_ESP-12 ); =
\
- ALIGNED4_DIE ( new_ESP-4 ); =
\
- } else { =
\
- UNALIGNED_DIE ( new_ESP-32, 32 ); =
\
- } =
\
-} =
\
- =
\
-void MAC_(new_mem_stack) ( Addr a, SizeT len ) =
\
-{ =
\
- PROF_EVENT(115); =
\
- UNALIGNED_NEW ( a, len ); =
\
-} =
\
- =
\
-void MAC_(die_mem_stack) ( Addr a, SizeT len ) =
\
-{ =
\
- PROF_EVENT(125); =
\
- UNALIGNED_DIE ( a, len ); =
\
+#define SP_UPDATE_HANDLERS(ALIGNED4_NEW, ALIGNED4_DIE, \
+ ALIGNED8_NEW, ALIGNED8_DIE, \
+ UNALIGNED_NEW, UNALIGNED_DIE) \
+ \
+void VGA_REGPARM(1) MAC_(new_mem_stack_4)(Addr new_SP) \
+{ \
+ PROF_EVENT(110); \
+ if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_NEW ( new_SP ); \
+ } else { \
+ UNALIGNED_NEW ( new_SP, 4 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(die_mem_stack_4)(Addr new_SP) \
+{ \
+ PROF_EVENT(120); \
+ if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_DIE ( new_SP-4 ); \
+ } else { \
+ UNALIGNED_DIE ( new_SP-4, 4 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(new_mem_stack_8)(Addr new_SP) \
+{ \
+ PROF_EVENT(111); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_NEW ( new_SP ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_NEW ( new_SP ); \
+ ALIGNED4_NEW ( new_SP+4 ); \
+ } else { \
+ UNALIGNED_NEW ( new_SP, 8 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(die_mem_stack_8)(Addr new_SP) \
+{ \
+ PROF_EVENT(121); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_DIE ( new_SP-8 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_DIE ( new_SP-8 ); \
+ ALIGNED4_DIE ( new_SP-4 ); \
+ } else { \
+ UNALIGNED_DIE ( new_SP-8, 8 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(new_mem_stack_12)(Addr new_SP) \
+{ \
+ PROF_EVENT(112); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_NEW ( new_SP ); \
+ ALIGNED4_NEW ( new_SP+8 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_NEW ( new_SP ); \
+ ALIGNED8_NEW ( new_SP+4 ); \
+ } else { \
+ UNALIGNED_NEW ( new_SP, 12 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(die_mem_stack_12)(Addr new_SP) \
+{ \
+ PROF_EVENT(122); \
+ /* Note the -12 in the test */ \
+ if (VG_IS_8_ALIGNED(new_SP-12)) { \
+ ALIGNED8_DIE ( new_SP-12 ); \
+ ALIGNED4_DIE ( new_SP-4 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_DIE ( new_SP-12 ); \
+ ALIGNED8_DIE ( new_SP-8 ); \
+ } else { \
+ UNALIGNED_DIE ( new_SP-12, 12 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(new_mem_stack_16)(Addr new_SP) \
+{ \
+ PROF_EVENT(113); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_NEW ( new_SP ); \
+ ALIGNED8_NEW ( new_SP+8 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_NEW ( new_SP ); \
+ ALIGNED8_NEW ( new_SP+4 ); \
+ ALIGNED4_NEW ( new_SP+12 ); \
+ } else { \
+ UNALIGNED_NEW ( new_SP, 16 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(die_mem_stack_16)(Addr new_SP) \
+{ \
+ PROF_EVENT(123); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_DIE ( new_SP-16 ); \
+ ALIGNED8_DIE ( new_SP-8 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_DIE ( new_SP-16 ); \
+ ALIGNED8_DIE ( new_SP-12 ); \
+ ALIGNED4_DIE ( new_SP-4 ); \
+ } else { \
+ UNALIGNED_DIE ( new_SP-16, 16 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(new_mem_stack_32)(Addr new_SP) \
+{ \
+ PROF_EVENT(114); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_NEW ( new_SP ); \
+ ALIGNED8_NEW ( new_SP+8 ); \
+ ALIGNED8_NEW ( new_SP+16 ); \
+ ALIGNED8_NEW ( new_SP+24 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_NEW ( new_SP ); \
+ ALIGNED8_NEW ( new_SP+4 ); \
+ ALIGNED8_NEW ( new_SP+12 ); \
+ ALIGNED8_NEW ( new_SP+20 ); \
+ ALIGNED4_NEW ( new_SP+28 ); \
+ } else { \
+ UNALIGNED_NEW ( new_SP, 32 ); \
+ } \
+} \
+ \
+void VGA_REGPARM(1) MAC_(die_mem_stack_32)(Addr new_SP) \
+{ \
+ PROF_EVENT(124); \
+ if (VG_IS_8_ALIGNED(new_SP)) { \
+ ALIGNED8_DIE ( new_SP-32 ); \
+ ALIGNED8_DIE ( new_SP-24 ); \
+ ALIGNED8_DIE ( new_SP-16 ); \
+ ALIGNED8_DIE ( new_SP- 8 ); \
+ } else if (VG_IS_4_ALIGNED(new_SP)) { \
+ ALIGNED4_DIE ( new_SP-32 ); \
+ ALIGNED8_DIE ( new_SP-28 ); \
+ ALIGNED8_DIE ( new_SP-20 ); \
+ ALIGNED8_DIE ( new_SP-12 ); \
+ ALIGNED4_DIE ( new_SP-4 ); \
+ } else { \
+ UNALIGNED_DIE ( new_SP-32, 32 ); \
+ } \
+} \
+ \
+void MAC_(new_mem_stack) ( Addr a, SizeT len ) \
+{ \
+ PROF_EVENT(115); \
+ UNALIGNED_NEW ( a, len ); \
+} \
+ \
+void MAC_(die_mem_stack) ( Addr a, SizeT len ) \
+{ \
+ PROF_EVENT(125); \
+ UNALIGNED_DIE ( a, len ); \
}
=20
#endif /* __MAC_SHARED_H */
Modified: trunk/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_main.c 2005-04-20 14:32:32 UTC (rev 3534)
+++ trunk/memcheck/mc_main.c 2005-04-20 14:44:11 UTC (rev 3535)
@@ -30,424 +30,800 @@
The GNU General Public License is contained in the file COPYING.
*/
=20
+/* TODO urgently
+
+ sanity check:=20
+ auxmap only covers address space that the primary doesn't
+ auxmap entries non-duplicated (expensive)
+
+ types of helper functions
+
+ set_address_range_perms to notice when a distinguished secondary
+ will work, and use that (viz, re-implement compression scheme)
+
+ profile
+
+ reinstate fast-path cases
+*/
+
+
#include "mc_include.h"
#include "memcheck.h" /* for client requests */
//#include "vg_profile.c"
=20
-/* Define to debug the mem audit system. */
-/* #define VG_DEBUG_MEMORY */
=20
+typedef enum {
+ MC_Ok =3D 5, MC_AddrErr =3D 6, MC_ValueErr =3D 7
+} MC_ReadResult;
+
#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
=20
+
/*------------------------------------------------------------*/
-/*--- Low-level support for memory checking. ---*/
+/*--- Basic A/V bitmap representation. ---*/
/*------------------------------------------------------------*/
=20
-/* All reads and writes are checked against a memory map, which
- records the state of all memory in the process. The memory map is
- organised like this:
+/* --------------- Basic configuration --------------- */
=20
- The top 16 bits of an address are used to index into a top-level
- map table, containing 65536 entries. Each entry is a pointer to a
- second-level map, which records the accesibililty and validity
- permissions for the 65536 bytes indexed by the lower 16 bits of the
- address. Each byte is represented by nine bits, one indicating
- accessibility, the other eight validity. So each second-level map
- contains 73728 bytes. This two-level arrangement conveniently
- divides the 4G address space into 64k lumps, each size 64k bytes.
+/* The number of entries in the primary map can be altered. However
+ we hardwire the assumption that each secondary map covers precisely
+ 64k of address space. */
=20
- All entries in the primary (top-level) map must point to a valid
- secondary (second-level) map. Since most of the 4G of address
- space will not be in use -- ie, not mapped at all -- there is a
- distinguished secondary map, which indicates `not addressible and
- not valid' writeable for all bytes. Entries in the primary map for
- which the entire 64k is not in use at all point at this
- distinguished map.
+#define N_PRIMARY_BITS 16
+#define N_PRIMARY_MAPS ((1 << N_PRIMARY_BITS)-1)
=20
- There are actually 4 distinguished secondaries. These are used to
- represent a memory range which is either not addressable (validity
- doesn't matter), addressable+not valid, addressable+valid.
+#define MAX_PRIMARY_ADDRESS (Addr)(((Addr)65536) * N_PRIMARY_MAPS)
=20
- [...] lots of stuff deleted due to out of date-ness
=20
- As a final optimisation, the alignment and address checks for
- 4-byte loads and stores are combined in a neat way. The primary
- map is extended to have 262144 entries (2^18), rather than 2^16.
- The top 3/4 of these entries are permanently set to the
- distinguished secondary map. For a 4-byte load/store, the
- top-level map is indexed not with (addr >> 16) but instead f(addr),
- where
+/* --------------- Secondary maps --------------- */
=20
- f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
- =3D ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or=20
- =3D ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
+typedef=20
+ struct {
+ UChar abits[8192];
+ UChar vbyte[65536];
+ }
+ SecMap;
=20
- ie the lowest two bits are placed above the 16 high address bits.
- If either of these two bits are nonzero, the address is misaligned;
- this will select a secondary map from the upper 3/4 of the primary
- map. Because this is always the distinguished secondary map, a
- (bogus) address check failure will result. The failure handling
- code can then figure out whether this is a genuine addr check
- failure or whether it is a possibly-legitimate access at a
- misaligned address. =20
+/* 3 distinguished secondary maps, one for no-access, one for
+ accessible but undefined, and one for accessible and defined.
+ Distinguished secondaries may never be modified.
*/
+#define SM_DIST_NOACCESS 0
+#define SM_DIST_ACCESS_UNDEFINED 1
+#define SM_DIST_ACCESS_DEFINED 2
=20
-/*------------------------------------------------------------*/
-/*--- Function declarations. ---*/
-/*------------------------------------------------------------*/
+static SecMap sm_distinguished[3];
=20
-static ULong mc_rd_V8_SLOWLY ( Addr a );
-static UInt mc_rd_V4_SLOWLY ( Addr a );
-static UInt mc_rd_V2_SLOWLY ( Addr a );
-static UInt mc_rd_V1_SLOWLY ( Addr a );
+static inline Bool is_distinguished_sm ( SecMap* sm ) {
+ return sm >=3D &sm_distinguished[0] && sm <=3D &sm_distinguished[2];
+}
=20
-static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes );
-static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
-static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
-static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
+/* dist_sm points to one of our three distinguished secondaries. Make
+ a copy of it so that we can write to it.
+*/
+static SecMap* copy_for_writing ( SecMap* dist_sm )
+{
+ SecMap* new_sm;
+ tl_assert(dist_sm =3D=3D &sm_distinguished[0]
+ || dist_sm =3D=3D &sm_distinguished[1]
+ || dist_sm =3D=3D &sm_distinguished[2]);
=20
-/*------------------------------------------------------------*/
-/*--- Data defns. ---*/
-/*------------------------------------------------------------*/
+ new_sm =3D VG_(shadow_alloc)(sizeof(SecMap));
+ VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
+ return new_sm;
+}
=20
-typedef=20
- struct {
- UChar abits[SECONDARY_SIZE/8];
- UChar vbyte[SECONDARY_SIZE];
- }
- SecMap;
=20
+/* --------------- Primary maps --------------- */
=20
-static SecMap* primary_map[ /*PRIMARY_SIZE*/ PRIMARY_SIZE*4 ];
+/* The main primary map. This covers some initial part of the address
+ space, addresses 0 .. (N_PRIMARY_MAPS << 16)-1. The rest of it is
+ handled using the auxiliary primary map. =20
+*/
+static SecMap* primary_map[N_PRIMARY_MAPS];
=20
-#define DSM_IDX(a, v) ((((a)&1) << 1) + ((v)&1))
=20
-/* 4 secondary maps, but one is redundant (because the !addressable &&
- valid state is meaningless) */
-static const SecMap distinguished_secondary_maps[4] =3D {
-#define INIT(a, v) \
- [ DSM_IDX(a, v) ] =3D { { [0 ... (SECONDARY_SIZE/8)-1] =3D BIT_EXPAND=
(a) }, \
- { [0 ... SECONDARY_SIZE-1] =3D BIT_EXPAND(a|v) } }
- INIT(VGM_BIT_VALID, VGM_BIT_VALID),
- INIT(VGM_BIT_VALID, VGM_BIT_INVALID),
- INIT(VGM_BIT_INVALID, VGM_BIT_VALID),
- INIT(VGM_BIT_INVALID, VGM_BIT_INVALID),
-#undef INIT
-};
-#define N_SECONDARY_MAPS (sizeof(distinguished_secondary_maps)/sizeof(*d=
istinguished_secondary_maps))
+/* An entry in the auxiliary primary map. base must be a 64k-aligned
+ value, and sm points at the relevant secondary map. As with the
+ main primary map, the secondary may be either a real secondary, or
+ one of the three distinguished secondaries.
+*/
+typedef
+ struct {=20
+ Addr base;
+ SecMap* sm;
+ }
+ AuxMapEnt;
=20
-#define DSM(a,v) ((SecMap *)&distinguished_secondary_maps[DSM_IDX(a, v)=
])
+/* An expanding array of AuxMapEnts. */
+#define N_AUXMAPS 500 /* HACK */
+static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
+static Int auxmap_size =3D N_AUXMAPS;
+static Int auxmap_used =3D 0;
+static AuxMapEnt* auxmap =3D &hacky_auxmaps[0];
=20
-#define DSM_NOTADDR DSM(VGM_BIT_INVALID, VGM_BIT_INVALID)
-#define DSM_ADDR_NOTVALID DSM(VGM_BIT_VALID, VGM_BIT_INVALID)
-#define DSM_ADDR_VALID DSM(VGM_BIT_VALID, VGM_BIT_VALID)
+/* Auxmap statistics */
+static ULong n_auxmap_searches =3D 0;
+static ULong n_auxmap_cmps =3D 0;
=20
-static void init_shadow_memory ( void )
+
+/* Find an entry in the auxiliary map. If an entry is found, move it
+ one step closer to the front of the array, then return its address.
+ If an entry is not found, allocate one. Note carefully that
+ because a each call potentially rearranges the entries, each call
+ to this function invalidates ALL AuxMapEnt*s previously obtained by
+ calling this fn. =20
+*/
+static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
{
- Int i, a, v;
+ UWord i;
+ tl_assert(a > MAX_PRIMARY_ADDRESS);
=20
- /* check construction of the 4 distinguished secondaries */
- tl_assert(VGM_BIT_INVALID =3D=3D 1);
- tl_assert(VGM_BIT_VALID =3D=3D 0);
+ a &=3D ~(Addr)0xFFFF;
=20
- for (a =3D 0; a <=3D 1; a++) {
- for (v =3D 0; v <=3D 1; v++) {
- if (DSM(a,v)->abits[0] !=3D BIT_EXPAND(a))
- VG_(printf)("DSM(%d,%d)[%d]->abits[0] =3D=3D %x not %x\n",
- a,v,DSM_IDX(a,v),DSM(a,v)->abits[0], BIT_EXPAND(a));
- if (DSM(a,v)->vbyte[0] !=3D BIT_EXPAND(a|v))
- VG_(printf)("DSM(%d,%d)[%d]->vbyte[0] =3D=3D %x not %x\n",
- a,v,DSM_IDX(a,v),DSM(a,v)->vbyte[0], BIT_EXPAND(a|v));
+ /* Search .. */
+ n_auxmap_searches++;
+ for (i =3D 0; i < auxmap_used; i++) {
+ if (auxmap[i].base =3D=3D a)
+ break;
+ }
+ n_auxmap_cmps +=3D (ULong)(i+1);
=20
- tl_assert(DSM(a,v)->abits[0] =3D=3D BIT_EXPAND(a));
- tl_assert(DSM(a,v)->vbyte[0] =3D=3D BIT_EXPAND(v|a));
+ if (i < auxmap_used) {
+ /* Found it. Nudge it a bit closer to the front. */
+ if (i > 0) {
+ AuxMapEnt tmp =3D auxmap[i-1];
+ auxmap[i-1] =3D auxmap[i];
+ auxmap[i] =3D tmp;
+ i--;
}
+ return &auxmap[i];
}
-=20
- /* These entries gradually get overwritten as the used address
- space expands. */
- for (i =3D 0; i < PRIMARY_SIZE; i++)
- primary_map[i] =3D DSM_NOTADDR;
=20
- /* These ones should never change; it's a bug in Valgrind if they do.=
*/
- for (i =3D PRIMARY_SIZE; i < PRIMARY_SIZE*4; i++)
- primary_map[i] =3D DSM_NOTADDR;
+ /* We didn't find it. Hmm. This is a new piece of address space.
+ We'll need to allocate a new AuxMap entry for it. */
+ if (auxmap_used >=3D auxmap_size) {
+ tl_assert(auxmap_used =3D=3D auxmap_size);
+ /* Out of auxmap entries. */
+ tl_assert2(0, "failed to expand the auxmap table");
+ }
+
+ tl_assert(auxmap_used < auxmap_size);
+
+ auxmap[auxmap_used].base =3D a & ~(Addr)0xFFFF;
+ auxmap[auxmap_used].sm =3D &sm_distinguished[SM_DIST_NOACCESS];
+
+ if (0)
+ VG_(printf)("new auxmap, base =3D 0x%llx\n",=20
+ (ULong)auxmap[auxmap_used].base );
+
+ auxmap_used++;
+ return &auxmap[auxmap_used-1];
}
=20
-/*------------------------------------------------------------*/
-/*--- Basic bitmap management, reading and writing. ---*/
-/*------------------------------------------------------------*/
=20
-/* Allocate and initialise a secondary map. */
+/* --------------- SecMap fundamentals --------------- */
=20
-static SecMap* alloc_secondary_map ( __attribute__ ((unused))=20
- Char* caller,
- const SecMap *prototype)
+/* Produce the secmap for 'a', either from the primary map or by
+ ensuring there is an entry for it in the aux primary map. The
+ secmap may be a distinguished one as the caller will only want to
+ be able to read it.=20
+*/
+static SecMap* get_secmap_readable ( Addr a )
{
- SecMap* map;
- PROF_EVENT(10);
+ if (a <=3D MAX_PRIMARY_ADDRESS) {
+ UWord pm_off =3D a >> 16;
+ return primary_map[ pm_off ];
+ } else {
+ AuxMapEnt* am =3D find_or_alloc_in_auxmap(a);
+ return am->sm;
+ }
+}
=20
- map =3D (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
+/* Produce the secmap for 'a', either from the primary map or by
+ ensuring there is an entry for it in the aux primary map. The
+ secmap may not be a distinguished one, since the caller will want
+ to be able to write it. If it is a distinguished secondary, make a
+ writable copy of it, install it, and return the copy instead. (COW
+ semantics).
+*/
+static SecMap* get_secmap_writable ( Addr a )
+{
+ if (a <=3D MAX_PRIMARY_ADDRESS) {
+ UWord pm_off =3D a >> 16;
+ if (is_distinguished_sm(primary_map[ pm_off ]))
+ primary_map[pm_off] =3D copy_for_writing(primary_map[pm_off]);
+ return primary_map[pm_off];
+ } else {
+ AuxMapEnt* am =3D find_or_alloc_in_auxmap(a);
+ if (is_distinguished_sm(am->sm))
+ am->sm =3D copy_for_writing(am->sm);
+ return am->sm;
+ }
+}
=20
- VG_(memcpy)(map, prototype, sizeof(*map));
=20
- /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
- return map;
+/* --------------- Endianness helpers --------------- */
+
+/* Returns the offset in memory of the byteno-th most significant byte
+ in a wordszB-sized word, given the specified endianness. */
+static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,=20
+ UWord byteno ) {
+ return bigendian ? (wordszB-1-byteno) : byteno;
}
=20
=20
-/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
+/* --------------- Fundamental functions --------------- */
=20
-static __inline__ UChar get_abit ( Addr a )
+static=20
+void get_abit_and_vbyte ( /*OUT*/UWord* abit,=20
+ /*OUT*/UWord* vbyte,
+ Addr a )
{
- SecMap* sm =3D primary_map[PM_IDX(a)];
- UInt sm_off =3D SM_OFF(a);
- PROF_EVENT(20);
-# if 0
- if (IS_DISTINGUISHED_SM(sm))
- VG_(message)(Vg_DebugMsg,=20
- "accessed distinguished 2ndary (A)map! 0x%x\n", a)=
;
-# endif
- return BITARR_TEST(sm->abits, sm_off)=20
- ? VGM_BIT_INVALID : VGM_BIT_VALID;
-}
+ SecMap* sm =3D get_secmap_readable(a);
+ *vbyte =3D 0xFF & sm->vbyte[a & 0xFFFF];
+ *abit =3D read_bit_array(sm->abits, a & 0xFFFF);
+}=20
=20
-static __inline__ UChar get_vbyte ( Addr a )
+static=20
+UWord get_abit ( Addr a )
{
- SecMap* sm =3D primary_map[PM_IDX(a)];
- UInt sm_off =3D SM_OFF(a);
- PROF_EVENT(21);
-# if 0
- if (IS_DISTINGUISHED_SM(sm))
- VG_(message)(Vg_DebugMsg,=20
- "accessed distinguished 2ndary (V)map! 0x%x\n", a)=
;
-# endif
- return sm->vbyte[sm_off];
+ SecMap* sm =3D get_secmap_readable(a);
+ return read_bit_array(sm->abits, a & 0xFFFF);
}
=20
-static /* __inline__ */ void set_abit ( Addr a, UChar abit )
+static
+void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
{
- SecMap* sm;
- UInt sm_off;
- PROF_EVENT(22);
- ENSURE_MAPPABLE(a, "set_abit");
- sm =3D primary_map[PM_IDX(a)];
- sm_off =3D SM_OFF(a);
- if (abit)=20
- BITARR_SET(sm->abits, sm_off);
- else
- BITARR_CLEAR(sm->abits, sm_off);
+ SecMap* sm =3D get_secmap_writable(a);
+ sm->vbyte[a & 0xFFFF] =3D 0xFF & vbyte;
+ write_bit_array(sm->abits, a & 0xFFFF, abit); =20
}
=20
-static __inline__ void set_vbyte ( Addr a, UChar vbyte )
+static
+void set_vbyte ( Addr a, UWord vbyte )
{
- SecMap* sm;
- UInt sm_off;
- PROF_EVENT(23);
- ENSURE_MAPPABLE(a, "set_vbyte");
- sm =3D primary_map[PM_IDX(a)];
- sm_off =3D SM_OFF(a);
- sm->vbyte[sm_off] =3D vbyte;
+ SecMap* sm =3D get_secmap_writable(a);
+ sm->vbyte[a & 0xFFFF] =3D 0xFF & vbyte;
}
=20
=20
-/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
+/* --------------- Load/store slow cases. --------------- */
=20
-static __inline__ UChar get_abits4_ALIGNED ( Addr a )
+static
+ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
{
- SecMap* sm;
- UInt sm_off;
- UChar abits8;
- PROF_EVENT(24);
-# ifdef VG_DEBUG_MEMORY
- tl_assert(VG_IS_4_ALIGNED(a));
-# endif
- sm =3D primary_map[PM_IDX(a)];
- sm_off =3D SM_OFF(a);
- abits8 =3D sm->abits[sm_off >> 3];
- abits8 >>=3D (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
- abits8 &=3D 0x0F;
- return abits8;
+ /* Make up a result V word, which contains the loaded data for
+ valid addresses and Undefined for invalid addresses. Iterate
+ over the bytes in the word, from the most significant down to
+ the least. */
+ ULong vw =3D VGM_WORD64_INVALID;
+ SizeT i =3D szB-1;
+ SizeT n_addrs_bad =3D 0;
+ Addr ai;
+ Bool aok;
+ UWord abit, vbyte;
+
+ PROF_EVENT(70);
+ tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
+
+ while (True) {
+ ai =3D a+byte_offset_w(szB,bigendian,i);
+ get_abit_and_vbyte(&abit, &vbyte, ai);
+ aok =3D abit =3D=3D VGM_BIT_VALID;
+ if (!aok)
+ n_addrs_bad++;
+ vw <<=3D 8;=20
+ vw |=3D 0xFF & (aok ? vbyte : VGM_BYTE_INVALID);
+ if (i =3D=3D 0) break;
+ i--;
+ }
+
+ if (n_addrs_bad > 0)
+ MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False =
);
+
+ //if (n_addrs_bad =3D=3D n)
+ // vw =3D VGM_WORD64_VALID;
+ return vw;
}
=20
-static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
+
+static=20
+void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
{
- SecMap* sm =3D primary_map[PM_IDX(a)];
- UInt sm_off =3D SM_OFF(a);
- PROF_EVENT(25);
-# ifdef VG_DEBUG_MEMORY
- tl_assert(VG_IS_4_ALIGNED(a));
-# endif
- return ((UInt*)(sm->vbyte))[sm_off >> 2];
-}
+ SizeT i;
+ SizeT n_addrs_bad =3D 0;
+ UWord abit;
+ Bool aok;
+ Addr ai;
=20
+ PROF_EVENT(71);
+ tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
=20
-static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
-{
- SecMap* sm;
- UInt sm_off;
- ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
- sm =3D primary_map[PM_IDX(a)];
- sm_off =3D SM_OFF(a);
- PROF_EVENT(23);
-# ifdef VG_DEBUG_MEMORY
- tl_assert(VG_IS_4_ALIGNED(a));
-# endif
- ((UInt*)(sm->vbyte))[sm_off >> 2] =3D vbytes;
+ /* Dump vbytes in memory, iterating from least to most significant
+ byte. At the same time establish addressibility of the
+ location. */
+ for (i =3D 0; i < szB; i++) {
+ ai =3D a+byte_offset_w(szB,bigendian,i);
+ abit =3D get_abit(ai);
+ aok =3D abit =3D=3D VGM_BIT_VALID;
+ if (!aok)
+ n_addrs_bad++;
+ set_vbyte(ai, vbytes & 0xFF );=20
+ vbytes >>=3D 8;
+ }
+
+ /* If an address error has happened, report it. */
+ if (n_addrs_bad > 0)
+ MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True )=
;
}
=20
=20
-/*------------------------------------------------------------*/
-/*--- Setting permissions over address ranges. ---*/
-/*------------------------------------------------------------*/
+///////////////////////////////////////////////////////////////
=20
-static void set_address_range_perms ( Addr a, SizeT len,=20
- UInt example_a_bit,
- UInt example_v_bit )
-{
- UChar vbyte, abyte8;
- UInt vword4, sm_off;
- SecMap* sm;
=20
- PROF_EVENT(30);
=20
- if (len =3D=3D 0)
- return;
=20
- if (VG_(clo_verbosity) > 0) {
- if (len > 100 * 1000 * 1000) {
- VG_(message)(Vg_UserMsg,=20
- "Warning: set address range perms: "
- "large range %u, a %d, v %d",
- len, example_a_bit, example_v_bit );
- }
- }
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
=20
- VGP_PUSHCC(VgpSetMem);
+//zz #if 0 /* this is the old implementation */
+//zz=20
+//zz /* Define to debug the mem audit system. */
+//zz /* #define VG_DEBUG_MEMORY */
+//zz=20
+//zz=20
+//zz /*------------------------------------------------------------*/
+//zz /*--- Low-level support for memory checking. ---*/
+//zz /*------------------------------------------------------------*/
+//zz=20
+//zz /* All reads and writes are checked against a memory map, which
+//zz records the state of all memory in the process. The memory map =
is
+//zz organised like this:
+//zz=20
+//zz The top 16 bits of an address are used to index into a top-level
+//zz map table, containing 65536 entries. Each entry is a pointer to=
a
+//zz second-level map, which records the accesibililty and validity
+//zz permissions for the 65536 bytes indexed by the lower 16 bits of =
the
+//zz address. Each byte is represented by nine bits, one indicating
+//zz accessibility, the other eight validity. So each second-level m=
ap
+//zz contains 73728 bytes. This two-level arrangement conveniently
+//zz divides the 4G address space into 64k lumps, each size 64k bytes=
.
+//zz=20
+//zz All entries in the primary (top-level) map must point to a valid
+//zz secondary (second-level) map. Since most of the 4G of address
+//zz space will not be in use -- ie, not mapped at all -- there is a
+//zz distinguished secondary map, which indicates `not addressible an=
d
+//zz not valid' writeable for all bytes. Entries in the primary map =
for
+//zz which the entire 64k is not in use at all point at this
+//zz distinguished map.
+//zz=20
+//zz There are actually 4 distinguished secondaries. These are used =
to
+//zz represent a memory range which is either not addressable (validi=
ty
+//zz doesn't matter), addressable+not valid, addressable+valid.
+//zz=20
+//zz [...] lots of stuff deleted due to out of date-ness
+//zz=20
+//zz As a final optimisation, the alignment and address checks for
+//zz 4-byte loads and stores are combined in a neat way. The primary
+//zz map is extended to have 262144 entries (2^18), rather than 2^16.
+//zz The top 3/4 of these entries are permanently set to the
+//zz distinguished secondary map. For a 4-byte load/store, the
+//zz top-level map is indexed not with (addr >> 16) but instead f(add=
r),
+//zz where
+//zz=20
+//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
+//zz =3D ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or=20
+//zz =3D ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
+//zz=20
+//zz ie the lowest two bits are placed above the 16 high address bits=
.
+//zz If either of these two bits are nonzero, the address is misalign=
ed;
+//zz this will select a secondary map from the upper 3/4 of the prima=
ry
+//zz map. Because this is always the distinguished secondary map, a
+//zz (bogus) address check failure will result. The failure handling
+//zz code can then figure out whether this is a genuine addr check
+//zz failure or whether it is a possibly-legitimate access at a
+//zz misaligned address. =20
+//zz */
+//zz=20
+//zz /*------------------------------------------------------------*/
+//zz /*--- Function declarations. ---*/
+//zz /*------------------------------------------------------------*/
+//zz=20
+//zz static ULong mc_rd_V8_SLOWLY ( Addr a );
+//zz static UInt mc_rd_V4_SLOWLY ( Addr a );
+//zz static UInt mc_rd_V2_SLOWLY ( Addr a );
+//zz static UInt mc_rd_V1_SLOWLY ( Addr a );
+//zz=20
+//zz static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes );
+//zz static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
+//zz static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
+//zz static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
+//zz=20
+//zz /*------------------------------------------------------------*/
+//zz /*--- Data defns. ---*/
+//zz /*------------------------------------------------------------*/
+//zz=20
+//zz typedef=20
+//zz struct {
+//zz UChar abits[SECONDARY_SIZE/8];
+//zz UChar vbyte[SECONDARY_SIZE];
+//zz }
+//zz SecMap;
+//zz=20
+//zz=20
+//zz static SecMap* primary_map[ /*PRIMARY_SIZE*/ PRIMARY_SIZE*4 ];
+//zz=20
+//zz #define DSM_IDX(a, v) ((((a)&1) << 1) + ((v)&1))
+//zz=20
+//zz /* 4 secondary maps, but one is redundant (because the !addressable=
&&
+//zz valid state is meaningless) */
+//zz static const SecMap distinguished_secondary_maps[4] =3D {
+//zz #define INIT(a, v) \
+//zz [ DSM_IDX(a, v) ] =3D { { [0 ... (SECONDARY_SIZE/8)-1] =3D BIT_E=
XPAND(a) }, \
+//zz { [0 ... SECONDARY_SIZE-1] =3D BIT_EXPAND(a|v) } }
+//zz INIT(VGM_BIT_VALID, VGM_BIT_VALID),
+//zz INIT(VGM_BIT_VALID, VGM_BIT_INVALID),
+//zz INIT(VGM_BIT_INVALID, VGM_BIT_VALID),
+//zz INIT(VGM_BIT_INVALID, VGM_BIT_INVALID),
+//zz #undef INIT
+//zz };
+//zz #define N_SECONDARY_MAPS (sizeof(distinguished_secondary_maps)/size=
of(*distinguished_secondary_maps))
+//zz=20
+//zz #define DSM(a,v) ((SecMap *)&distinguished_secondary_maps[DSM_IDX(=
a, v)])
+//zz=20
+//zz #define DSM_NOTADDR DSM(VGM_BIT_INVALID, VGM_BIT_INVALID)
+//zz #define DSM_ADDR_NOTVALID DSM(VGM_BIT_VALID, VGM_BIT_INVALID)
+//zz #define DSM_ADDR_VALID DSM(VGM_BIT_VALID, VGM_BIT_VALID)
=20
- /* Requests to change permissions of huge address ranges may
- indicate bugs in our machinery. 30,000,000 is arbitrary, but so
- far all legitimate requests have fallen beneath that size. */
- /* 4 Mar 02: this is just stupid; get rid of it. */
- /* tl_assert(len < 30000000); */
+static void init_shadow_memory ( void )
+{
+ Int i;
+ SecMap* sm;
=20
- /* Check the permissions make sense. */
- tl_assert(example_a_bit =3D=3D VGM_BIT_VALID=20
- || example_a_bit =3D=3D VGM_BIT_INVALID);
- tl_assert(example_v_bit =3D=3D VGM_BIT_VALID=20
- || example_v_bit =3D=3D VGM_BIT_INVALID);
- if (example_a_bit =3D=3D VGM_BIT_INVALID)
- tl_assert(example_v_bit =3D=3D VGM_BIT_INVALID);
+ /* Build the 3 distinguished secondaries */
+ tl_assert(VGM_BIT_INVALID =3D=3D 1);
+ tl_assert(VGM_BIT_VALID =3D=3D 0);
+ tl_assert(VGM_BYTE_INVALID =3D=3D 0xFF);
+ tl_assert(VGM_BYTE_VALID =3D=3D 0);
=20
- /* The validity bits to write. */
- vbyte =3D example_v_bit=3D=3DVGM_BIT_VALID=20
- ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
+ /* Set A invalid, V invalid. */
+ sm =3D &sm_distinguished[SM_DIST_NOACCESS];
+ for (i =3D 0; i < 65536; i++)
+ sm->vbyte[i] =3D VGM_BYTE_INVALID;
+ for (i =3D 0; i < 8192; i++)
+ sm->abits[i] =3D VGM_BYTE_INVALID;
=20
- /* In order that we can charge through the address space at 8
- bytes/main-loop iteration, make up some perms. */
- abyte8 =3D BIT_EXPAND(example_a_bit);
- vword4 =3D (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
+ /* Set A valid, V invalid. */
+ sm =3D &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
+ for (i =3D 0; i < 65536; i++)
+ sm->vbyte[i] =3D VGM_BYTE_INVALID;
+ for (i =3D 0; i < 8192; i++)
+ sm->abits[i] =3D VGM_BYTE_VALID;
=20
-# ifdef VG_DEBUG_MEMORY
- /* Do it ... */
- while (True) {
- PROF_EVENT(31);
- if (len =3D=3D 0) break;
- set_abit ( a, example_a_bit );
- set_vbyte ( a, vbyte );
- a++;
- len--;
- }
+ /* Set A valid, V valid. */
+ sm =3D &sm_distinguished[SM_DIST_ACCESS_DEFINED];
+ for (i =3D 0; i < 65536; i++)
+ sm->vbyte[i] =3D VGM_BYTE_VALID;
+ for (i =3D 0; i < 8192; i++)
+ sm->abits[i] =3D VGM_BYTE_VALID;
=20
-# else
- /* Slowly do parts preceding 8-byte alignment. */
- while (True) {
- PROF_EVENT(31);
- if (len =3D=3D 0) break;
- if ((a % 8) =3D=3D 0) break;
- set_abit ( a, example_a_bit );
- set_vbyte ( a, vbyte );
- a++;
- len--;
- } =20
+ /* Set up the primary map. */
+ /* These entries gradually get overwritten as the used address
+ space expands. */
+ for (i =3D 0; i < N_PRIMARY_MAPS; i++)
+ primary_map[i] =3D &sm_distinguished[SM_DIST_NOACCESS];
=20
- if (len =3D=3D 0) {
- VGP_POPCC(VgpSetMem);
- return;
- }
- tl_assert((a % 8) =3D=3D 0 && len > 0);
+ /* auxmap_size =3D auxmap_used =3D 0;=20
+ no ... these are statically initialised */
=20
- /* Now align to the next primary_map entry */
- for (; (a & SECONDARY_MASK) && len >=3D 8; a +=3D 8, len -=3D 8) {
+ tl_assert( TL_(expensive_sanity_check)() );
+}
=20
- PROF_EVENT(32);
- /* If the primary is already pointing to a distinguished map
- with the same properties as we're trying to set, then leave
- it that way. */
- if (primary_map[PM_IDX(a)] =3D=3D DSM(example_a_bit, example_v_bit=
))
- continue;
=20
- ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
- sm =3D primary_map[PM_IDX(a)];
- sm_off =3D SM_OFF(a);
- sm->abits[sm_off >> 3] =3D abyte8;
- ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] =3D vword4;
- ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] =3D vword4;
- }
+//zz /*------------------------------------------------------------*/
+//zz /*--- Basic bitmap management, reading and writing. ---*/
+//zz /*------------------------------------------------------------*/
+//zz=20
+//zz /* Allocate and initialise a secondary m...
[truncated message content] |
|
From: <sv...@va...> - 2005-04-20 14:32:35
|
Author: sewardj
Date: 2005-04-20 15:32:32 +0100 (Wed, 20 Apr 2005)
New Revision: 3534
Modified:
trunk/coregrind/vg_redir.c
Log:
Add another redirect that we need. This has no effect at present
because the redirect syms are set up only after the initial read of
/proc/self/maps and by then ld-linux.so.2 is already aboard. Fixing
this properly requires fixing the address space management stuff
properly.
Modified: trunk/coregrind/vg_redir.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/vg_redir.c 2005-04-20 14:30:19 UTC (rev 3533)
+++ trunk/coregrind/vg_redir.c 2005-04-20 14:32:32 UTC (rev 3534)
@@ -426,6 +426,8 @@
add_redirect_sym_to_sym("soname:libc.so.6", "stpcpy",
"*vgpreload_memcheck.so*", "stpcpy");
=20
+ add_redirect_sym_to_sym("soname:ld-linux.so.2", "strlen",
+ "*vgpreload_memcheck.so*", "strlen");
add_redirect_sym_to_sym("soname:libc.so.6", "strlen",
"*vgpreload_memcheck.so*", "strlen");
=20
|
|
From: <sv...@va...> - 2005-04-20 14:30:25
|
Author: sewardj Date: 2005-04-20 15:30:19 +0100 (Wed, 20 Apr 2005) New Revision: 3533 Modified: trunk/Makefile.am Log: Disable Addrcheck builds whilst I'm doing bull-in-a-china-shop stuff with Memcheck. Modified: trunk/Makefile.am =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- trunk/Makefile.am 2005-04-19 04:10:25 UTC (rev 3532) +++ trunk/Makefile.am 2005-04-20 14:30:19 UTC (rev 3533) @@ -6,13 +6,14 @@ ## include must be first for tool.h ## addrcheck must come after memcheck, for mac_*.o TOOLS =3D memcheck \ - addrcheck \ cachegrind \ corecheck \ massif \ lackey \ none =20 +## addrcheck \ + # Temporary: we want to compile Helgrind, but not regtest it. SUBDIRS =3D include coregrind . docs tests auxprogs $(TOOLS) helgrind ##DIST_SUBDIRS =3D $(SUBDIRS) helgrind |
|
From: Julian S. <js...@ac...> - 2005-04-20 14:03:09
|
Sorry, no silver bullet. For memcheck/addrcheck/helgrind, Valgrind needs a complete picture of the state of memory, and that can only be achieved by running the program right from the start. For cachegrind, in principle it would be possible to start collecting data part way through the run. That would give you some misleading cache-cold-start effects, but it would work in general. However, we have no mechanism to achieve that at present. J On Wednesday 20 April 2005 14:49, Rex Walburn wrote: > Hi All > > I wanted to know if there exists a macro such that, I can insert it in > my source code and the valgrind tool (like memcheck) will check for > memory errors or other cache misses only from that point onwards. For > example, if I can insert a macro like VALGRIND_TOOL_START and > VALGRIND_TOOL_END in 2 areas in my source code, then the tool like > memcheck or cachegrind will do their checking and stuff only for that > part of the code that lies between the two macros. Do these macros > exist? If not, can I write them using the VALGRIND_MAGIC_SEQUENCE > macro, or will I have to do something tricky ? > > Thanks. |
|
From: Rex W. <wa...@gm...> - 2005-04-20 13:49:50
|
Hi All I wanted to know if there exists a macro such that, I can insert it in my source code and the valgrind tool (like memcheck) will check for memory errors or other cache misses only from that point onwards. For example, if I can insert a macro like VALGRIND_TOOL_START and VALGRIND_TOOL_END in 2 areas in my source code, then the tool like memcheck or cachegrind will do their checking and stuff only for that part of the code that lies between the two macros. Do these macros exist? If not, can I write them using the VALGRIND_MAGIC_SEQUENCE macro, or will I have to do something tricky ? Thanks. --=20 Rex Walburn |
|
From: Julian S. <js...@ac...> - 2005-04-20 13:34:33
|
> I m working on a arm926 based Montavista linux for a mulimedia phone.I > want to know that is there any version of the valgrind which works for the > arm. Not at the moment. Running on arm is something that would be good to do in the long run, though. Can you describe the environment in which you want to run Valgrind? In particular, do you want to want to run valgrind itself on the arm target, or are you developing using some kind of cross-debugging arrangment (eg, running arm code on a simulator on x86 ?) J |
|
From: <js...@ac...> - 2005-04-20 03:04:38
|
Nightly build on phoenix ( SuSE 9.1 ) started at 2005-04-20 03:50:01 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_mmx: valgrind ./insn_mmx insn_mmxext: (skipping, prereq failed: ../../../tests/cputest x86-mmxext) insn_sse: valgrind ./insn_sse insn_sse2: (skipping, prereq failed: ../../../tests/cputest x86-sse2) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 201 tests, 5 stderr failures, 0 stdout failures ================= memcheck/tests/pth_once (stderr) memcheck/tests/scalar (stderr) memcheck/tests/threadederrno (stderr) memcheck/tests/writev (stderr) corecheck/tests/fdleak_fcntl (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <to...@co...> - 2005-04-20 02:36:21
|
Nightly build on dunsmere ( athlon, Fedora Core 3 ) started at 2005-04-20 03:30:05 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 185 tests, 78 stderr failures, 2 stdout failures ================= memcheck/tests/addressable (stderr) memcheck/tests/badaddrvalue (stderr) memcheck/tests/badfree-2trace (stderr) memcheck/tests/badfree (stderr) memcheck/tests/badjump (stderr) memcheck/tests/badjump2 (stderr) memcheck/tests/badloop (stderr) memcheck/tests/badpoll (stderr) memcheck/tests/badrw (stderr) memcheck/tests/brk (stderr) memcheck/tests/brk2 (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/clientperm (stderr) memcheck/tests/custom_alloc (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/doublefree (stderr) memcheck/tests/error_counts (stdout) memcheck/tests/errs1 (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/exitprog (stderr) memcheck/tests/fprw (stderr) memcheck/tests/fwrite (stderr) memcheck/tests/inits (stderr) memcheck/tests/inline (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/malloc1 (stderr) memcheck/tests/malloc2 (stderr) memcheck/tests/malloc3 (stderr) memcheck/tests/manuel1 (stderr) memcheck/tests/manuel2 (stderr) memcheck/tests/manuel3 (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/memalign2 (stderr) memcheck/tests/memalign_test (stderr) memcheck/tests/memcmptest (stderr) memcheck/tests/mempool (stderr) memcheck/tests/metadata (stderr) memcheck/tests/mismatches (stderr) memcheck/tests/mmaptest (stderr) memcheck/tests/nanoleak (stderr) memcheck/tests/nanoleak_supp (stderr) memcheck/tests/new_nothrow (stderr) memcheck/tests/new_override (stderr) memcheck/tests/null_socket (stderr) memcheck/tests/overlap (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/post-syscall (stderr) memcheck/tests/realloc1 (stderr) memcheck/tests/realloc2 (stderr) memcheck/tests/realloc3 (stderr) memcheck/tests/scalar (stderr) memcheck/tests/scalar_exit_group (stderr) memcheck/tests/scalar_fork (stderr) memcheck/tests/scalar_supp (stderr) memcheck/tests/scalar_vfork (stderr) memcheck/tests/sigaltstack (stderr) memcheck/tests/signal2 (stderr) memcheck/tests/sigprocmask (stderr) memcheck/tests/str_tester (stderr) memcheck/tests/supp1 (stderr) memcheck/tests/supp2 (stderr) memcheck/tests/suppfree (stderr) memcheck/tests/threadederrno (stderr) memcheck/tests/toobig-allocs (stderr) memcheck/tests/trivialleak (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/writev (stderr) memcheck/tests/x86/fpeflags (stderr) memcheck/tests/x86/pushfpopf (stderr) memcheck/tests/x86/tronical (stderr) memcheck/tests/zeropage (stderr) none/tests/faultstatus (stderr) none/tests/selfrun (stdout) none/tests/selfrun (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:30:57
|
Nightly build on audi ( i686, Red Hat 9 ) started at 2005-04-20 03:25:03 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 184 tests, 3 stderr failures, 0 stdout failures ================= memcheck/tests/scalar (stderr) none/tests/faultstatus (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:30:47
|
Nightly build on honda ( x86_64, Fedora Core 3 ) started at 2005-04-20 03:10:09 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 165 tests, 82 stderr failures, 22 stdout failures ================= memcheck/tests/addressable (stdout) memcheck/tests/addressable (stderr) memcheck/tests/badaddrvalue (stdout) memcheck/tests/badaddrvalue (stderr) memcheck/tests/badfree-2trace (stderr) memcheck/tests/badfree (stderr) memcheck/tests/badjump (stderr) memcheck/tests/badjump2 (stderr) memcheck/tests/badloop (stderr) memcheck/tests/badpoll (stderr) memcheck/tests/badrw (stderr) memcheck/tests/brk (stderr) memcheck/tests/brk2 (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/clientperm (stdout) memcheck/tests/clientperm (stderr) memcheck/tests/custom_alloc (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/doublefree (stderr) memcheck/tests/error_counts (stdout) memcheck/tests/errs1 (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/exitprog (stderr) memcheck/tests/fprw (stderr) memcheck/tests/fwrite (stdout) memcheck/tests/fwrite (stderr) memcheck/tests/inits (stderr) memcheck/tests/inline (stdout) memcheck/tests/inline (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/leakotron (stdout) memcheck/tests/malloc1 (stderr) memcheck/tests/malloc2 (stderr) memcheck/tests/malloc3 (stdout) memcheck/tests/malloc3 (stderr) memcheck/tests/manuel1 (stdout) memcheck/tests/manuel1 (stderr) memcheck/tests/manuel2 (stdout) memcheck/tests/manuel2 (stderr) memcheck/tests/manuel3 (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/memalign_test (stderr) memcheck/tests/memcmptest (stdout) memcheck/tests/memcmptest (stderr) memcheck/tests/mempool (stderr) memcheck/tests/metadata (stdout) memcheck/tests/metadata (stderr) memcheck/tests/mismatches (stderr) memcheck/tests/nanoleak (stderr) memcheck/tests/new_override (stdout) memcheck/tests/new_override (stderr) memcheck/tests/overlap (stdout) memcheck/tests/overlap (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/post-syscall (stdout) memcheck/tests/post-syscall (stderr) memcheck/tests/realloc3 (stderr) memcheck/tests/scalar (stderr) memcheck/tests/scalar_exit_group (stderr) memcheck/tests/scalar_fork (stderr) memcheck/tests/scalar_supp (stderr) memcheck/tests/scalar_vfork (stderr) memcheck/tests/sigaltstack (stderr) memcheck/tests/signal2 (stdout) memcheck/tests/signal2 (stderr) memcheck/tests/sigprocmask (stderr) memcheck/tests/supp2 (stderr) memcheck/tests/suppfree (stderr) memcheck/tests/threadederrno (stdout) memcheck/tests/toobig-allocs (stderr) memcheck/tests/trivialleak (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stdout) memcheck/tests/weirdioctl (stderr) memcheck/tests/writev (stderr) memcheck/tests/zeropage (stdout) addrcheck/tests/addressable (stdout) addrcheck/tests/addressable (stderr) addrcheck/tests/badrw (stderr) addrcheck/tests/fprw (stderr) addrcheck/tests/leak-0 (stderr) addrcheck/tests/leak-cycle (stderr) addrcheck/tests/leak-regroot (stderr) addrcheck/tests/leak-tree (stderr) addrcheck/tests/overlap (stdout) addrcheck/tests/overlap (stderr) addrcheck/tests/toobig-allocs (stderr) corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_creat (stderr) corecheck/tests/fdleak_dup (stderr) corecheck/tests/fdleak_dup2 (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_open (stderr) corecheck/tests/fdleak_pipe (stderr) corecheck/tests/fdleak_socketpair (stderr) massif/tests/toobig-allocs (stderr) none/tests/faultstatus (stderr) none/tests/selfrun (stdout) none/tests/selfrun (stderr) |
|
From: Tom H. <to...@co...> - 2005-04-20 02:26:25
|
Nightly build on dunsmere ( Fedora Core 3 ) started at 2005-04-20 03:20:03 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (skipping, prereq failed: ../../../tests/cputest x86-sse2) int: valgrind ./int sh: line 1: 30071 Segmentation fault VALGRINDLIB=/tmp/valgrind.4392/valgrind/.in_place /tmp/valgrind.4392/valgrind/./coregrind/valgrind --command-line-only=yes --memcheck:leak-check=no --addrcheck:leak-check=no --tool=none ./int >int.stdout.out 2>int.stderr.out pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 207 tests, 4 stderr failures, 0 stdout failures ================= memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/scalar (stderr) memcheck/tests/scalar_supp (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:26:11
|
Nightly build on ginetta ( i686, Red Hat 8.0 ) started at 2005-04-20 03:20:02 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 183 tests, 4 stderr failures, 0 stdout failures ================= memcheck/tests/scalar (stderr) memcheck/tests/threadederrno (stderr) none/tests/faultstatus (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:22:34
|
Nightly build on audi ( Red Hat 9 ) started at 2005-04-20 03:15:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow fpu_lazy_eflags: valgrind ./fpu_lazy_eflags insn_basic: valgrind ./insn_basic insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (skipping, prereq failed: ../../../tests/cputest x86-sse2) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 206 tests, 1 stderr failure, 0 stdout failures ================= memcheck/tests/scalar (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:22:06
|
Nightly build on alvis ( i686, Red Hat 7.3 ) started at 2005-04-20 03:15:02 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 183 tests, 17 stderr failures, 0 stdout failures ================= memcheck/tests/addressable (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/scalar (stderr) memcheck/tests/threadederrno (stderr) memcheck/tests/vgtest_ume (stderr) addrcheck/tests/leak-0 (stderr) addrcheck/tests/leak-cycle (stderr) addrcheck/tests/leak-regroot (stderr) addrcheck/tests/leak-tree (stderr) none/tests/faultstatus (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:16:45
|
Nightly build on ginetta ( Red Hat 8.0 ) started at 2005-04-20 03:10:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (skipping, prereq failed: ../../../tests/cputest x86-sse2) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 205 tests, 3 stderr failures, 0 stdout failures ================= memcheck/tests/pth_once (stderr) memcheck/tests/scalar (stderr) memcheck/tests/threadederrno (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:11:54
|
Nightly build on alvis ( Red Hat 7.3 ) started at 2005-04-20 03:05:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow == 205 tests, 17 stderr failures, 0 stdout failures ================= memcheck/tests/addressable (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/distinguished-writes (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/pth_once (stderr) memcheck/tests/scalar (stderr) memcheck/tests/threadederrno (stderr) memcheck/tests/vgtest_ume (stderr) addrcheck/tests/leak-0 (stderr) addrcheck/tests/leak-cycle (stderr) addrcheck/tests/leak-regroot (stderr) addrcheck/tests/leak-tree (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2005-04-20 02:03:11
|
Nightly build on gill ( x86_64, Fedora Core 2 ) started at 2005-04-20 03:00:03 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 165 tests, 82 stderr failures, 22 stdout failures ================= memcheck/tests/addressable (stdout) memcheck/tests/addressable (stderr) memcheck/tests/badaddrvalue (stdout) memcheck/tests/badaddrvalue (stderr) memcheck/tests/badfree-2trace (stderr) memcheck/tests/badfree (stderr) memcheck/tests/badjump (stderr) memcheck/tests/badjump2 (stderr) memcheck/tests/badloop (stderr) memcheck/tests/badpoll (stderr) memcheck/tests/badrw (stderr) memcheck/tests/brk (stderr) memcheck/tests/brk2 (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/clientperm (stdout) memcheck/tests/clientperm (stderr) memcheck/tests/custom_alloc (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/doublefree (stderr) memcheck/tests/error_counts (stdout) memcheck/tests/errs1 (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/exitprog (stderr) memcheck/tests/fprw (stderr) memcheck/tests/fwrite (stdout) memcheck/tests/fwrite (stderr) memcheck/tests/inits (stderr) memcheck/tests/inline (stdout) memcheck/tests/inline (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/leakotron (stdout) memcheck/tests/malloc1 (stderr) memcheck/tests/malloc2 (stderr) memcheck/tests/malloc3 (stdout) memcheck/tests/malloc3 (stderr) memcheck/tests/manuel1 (stdout) memcheck/tests/manuel1 (stderr) memcheck/tests/manuel2 (stdout) memcheck/tests/manuel2 (stderr) memcheck/tests/manuel3 (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/memalign_test (stderr) memcheck/tests/memcmptest (stdout) memcheck/tests/memcmptest (stderr) memcheck/tests/mempool (stderr) memcheck/tests/metadata (stdout) memcheck/tests/metadata (stderr) memcheck/tests/mismatches (stderr) memcheck/tests/nanoleak (stderr) memcheck/tests/new_override (stdout) memcheck/tests/new_override (stderr) memcheck/tests/overlap (stdout) memcheck/tests/overlap (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/post-syscall (stdout) memcheck/tests/post-syscall (stderr) memcheck/tests/realloc3 (stderr) memcheck/tests/scalar (stderr) memcheck/tests/scalar_exit_group (stderr) memcheck/tests/scalar_fork (stderr) memcheck/tests/scalar_supp (stderr) memcheck/tests/scalar_vfork (stderr) memcheck/tests/sigaltstack (stderr) memcheck/tests/signal2 (stdout) memcheck/tests/signal2 (stderr) memcheck/tests/sigprocmask (stderr) memcheck/tests/supp2 (stderr) memcheck/tests/suppfree (stderr) memcheck/tests/threadederrno (stdout) memcheck/tests/toobig-allocs (stderr) memcheck/tests/trivialleak (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stdout) memcheck/tests/weirdioctl (stderr) memcheck/tests/writev (stderr) memcheck/tests/zeropage (stdout) addrcheck/tests/addressable (stdout) addrcheck/tests/addressable (stderr) addrcheck/tests/badrw (stderr) addrcheck/tests/fprw (stderr) addrcheck/tests/leak-0 (stderr) addrcheck/tests/leak-cycle (stderr) addrcheck/tests/leak-regroot (stderr) addrcheck/tests/leak-tree (stderr) addrcheck/tests/overlap (stdout) addrcheck/tests/overlap (stderr) addrcheck/tests/toobig-allocs (stderr) corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_creat (stderr) corecheck/tests/fdleak_dup (stderr) corecheck/tests/fdleak_dup2 (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_open (stderr) corecheck/tests/fdleak_pipe (stderr) corecheck/tests/fdleak_socketpair (stderr) massif/tests/toobig-allocs (stderr) none/tests/faultstatus (stderr) none/tests/selfrun (stdout) none/tests/selfrun (stderr) |