You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
1
(9) |
2
(19) |
|
3
(5) |
4
(20) |
5
(5) |
6
(9) |
7
(6) |
8
(8) |
9
(5) |
|
10
(5) |
11
(5) |
12
(7) |
13
(7) |
14
(6) |
15
(5) |
16
(5) |
|
17
(5) |
18
(7) |
19
(21) |
20
(9) |
21
(6) |
22
(8) |
23
(6) |
|
24
(9) |
25
(9) |
26
(8) |
27
(19) |
28
(15) |
29
(8) |
30
(5) |
|
31
(6) |
|
|
|
|
|
|
|
From: <sv...@va...> - 2008-08-29 23:34:00
|
Author: sewardj
Date: 2008-08-30 00:34:06 +0100 (Sat, 30 Aug 2008)
New Revision: 8561
Log:
First implementation of a conflicting-access backtrace cache, that
uses a LRU scheme to throw away "old" history and so keep memory usage
for retaining conflicting-access history within some fixed bounds.
Modified:
branches/YARD/coregrind/m_execontext.c
branches/YARD/helgrind/hg_main.c
branches/YARD/helgrind/libhb.h
branches/YARD/helgrind/libhb_core.c
branches/YARD/helgrind/libhb_sa.c
branches/YARD/helgrind/libhb_vg.c
branches/YARD/include/pub_tool_execontext.h
Modified: branches/YARD/coregrind/m_execontext.c
===================================================================
--- branches/YARD/coregrind/m_execontext.c 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/coregrind/m_execontext.c 2008-08-29 23:34:06 UTC (rev 8561)
@@ -469,6 +469,11 @@
return NULL;
}
+ExeContext* VG_(make_ExeContext_from_StackTrace)( Addr* ips, UInt n_ips )
+{
+ return record_ExeContext_wrk2(ips, n_ips);
+}
+
/*--------------------------------------------------------------------*/
/*--- end m_execontext.c ---*/
/*--------------------------------------------------------------------*/
Modified: branches/YARD/helgrind/hg_main.c
===================================================================
--- branches/YARD/helgrind/hg_main.c 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/helgrind/hg_main.c 2008-08-29 23:34:06 UTC (rev 8561)
@@ -4947,11 +4947,36 @@
}
}
-/* FIXME: move this somewhere sane */
-static struct EC_* get_EC_for_libhb ( Thr* hbt )
+/* FIXME: move these somewhere sane */
+
+static
+void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
{
Thread* thr;
ThreadId tid;
+ UWord nActual;
+ tl_assert(hbt);
+ thr = libhb_get_Thr_opaque( hbt );
+ tl_assert(thr);
+ tid = map_threads_maybe_reverse_lookup_SLOW(thr);
+ nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
+ NULL, NULL, 0 );
+ tl_assert(nActual <= nRequest);
+ for (; nActual < nRequest; nActual++)
+ frames[nActual] = 0;
+}
+
+static
+struct EC_* for_libhb__stacktrace_to_EC ( Addr* frames, UWord nFrames )
+{
+ return VG_(make_ExeContext_from_StackTrace)( frames, (UInt)nFrames );
+}
+
+static
+struct EC_* for_libhb__get_EC ( Thr* hbt )
+{
+ Thread* thr;
+ ThreadId tid;
ExeContext* ec;
tl_assert(hbt);
thr = libhb_get_Thr_opaque( hbt );
@@ -5046,7 +5071,9 @@
/////////////////////////////////////////////
hbthr_root = libhb_init( hg_zalloc, hg_free, shmem__bigchunk_alloc,
- get_EC_for_libhb );
+ for_libhb__get_stacktrace,
+ for_libhb__stacktrace_to_EC,
+ for_libhb__get_EC );
/////////////////////////////////////////////
initialise_data_structures(hbthr_root);
Modified: branches/YARD/helgrind/libhb.h
===================================================================
--- branches/YARD/helgrind/libhb.h 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/helgrind/libhb.h 2008-08-29 23:34:06 UTC (rev 8561)
@@ -51,13 +51,17 @@
/* Initialise library; returns Thr* for root thread. 'alloc' and
'shadow_alloc' should never return NULL, instead they should simply
not return if they encounter an out-of-memory condition. */
-Thr* libhb_init ( void* (*zalloc)( SizeT ),
- void (*dealloc)( void* ),
- void* (*shadow_alloc)( SizeT ),
- struct EC_* (*get_EC)( Thr* ) );
+Thr* libhb_init (
+ void* (*zalloc)( SizeT ),
+ void (*dealloc)( void* ),
+ void* (*shadow_alloc)( SizeT ),
+ void (*get_stacktrace)( Thr*, Addr*, UWord ),
+ struct EC_* (*stacktrace_to_EC)( Addr*, UWord ),
+ struct EC_* (*get_EC)( Thr* )
+ );
/* Shut down the library, and print stats (in fact that's _all_
- this is for. */
+ this is for.) */
void libhb_shutdown ( Bool show_stats );
/* Thread creation: returns Thr* for new thread */
Modified: branches/YARD/helgrind/libhb_core.c
===================================================================
--- branches/YARD/helgrind/libhb_core.c 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/helgrind/libhb_core.c 2008-08-29 23:34:06 UTC (rev 8561)
@@ -50,10 +50,12 @@
/* fwds for
Globals needed by other parts of the library. These are set
once at startup and then never changed. */
-void* (*main_zalloc_P)(SizeT);
-void (*main_dealloc_P)(void*);
-void* (*main_shadow_alloc_P)(SizeT);
-struct EC_* (*main_get_EC)(Thr*);
+static void* (*main_zalloc_P)( SizeT ) = NULL;
+static void (*main_dealloc_P)( void* ) = NULL;
+static void* (*main_shadow_alloc_P)( SizeT ) = NULL;
+static void (*main_get_stacktrace)( Thr*, Addr*, UWord ) = NULL;
+static struct EC_* (*main_stacktrace_to_EC)( Addr*, UWord ) = NULL;
+static struct EC_* (*main_get_EC)( Thr* ) = NULL;
static ULong stats__zallocd = 0;
static ULong stats__freed = 0;
@@ -2314,7 +2316,7 @@
}
SecMap;
-#define SecMap_MAGIC 0x571e58cb
+#define SecMap_MAGIC 0x571e58cbU
static inline Bool is_sane_SecMap ( SecMap* sm ) {
return sm != NULL && sm->magic == SecMap_MAGIC;
@@ -5165,6 +5167,7 @@
// Change-event map //
// //
/////////////////////////////////////////////////////////
+#if 0
#define N_EVENT_MAPS 4
#define N_EVENTS_PER_MAP (1*1000*1000)
@@ -5252,33 +5255,67 @@
return False;
}
+#else
-#if 0
/////////////////////////////////////////////////////////
// //
// Change-event map2 //
// //
/////////////////////////////////////////////////////////
+#define EVENT_MAP_GC_AT 500000
+#define EVENT_MAP_GC_DISCARD_FRACTION 0.5
+
+/* This is in two parts:
+
+ 1. An OSet of RCECs. This is a set of reference-counted stack
+ traces. When the reference count of a stack trace becomes zero,
+ it is removed from the set and freed up. The intent is to have
+ a set of stack traces which can be referred to from (2), but to
+ only represent each one once. The set is indexed/searched by
+ ordering on the stack trace vectors.
+
+ 2. An OSet of OldRefs. These store information about each old ref
+ that we need to record. It is indexed by address (of the
+ location for which the information is recorded), and contains a
+ pointer to a RCEC in (1). Each OldRef also contains a
+ generation number, indicating when it was most recently
+ accessed.
+
+ When we this set becomes too big, we can throw away the subset
+ of this set whose generation numbers are below some threshold;
+ hence doing approximate LRU discarding. For each discarded
+ OldRef we must of course decrement the reference count on the
+ ECEC it refers to, in order that entries from (1) eventually get
+ discarded too.
+*/
+
+///////////////////////////////////////////////////////
+//// Part (1): An OSet of RCECs
+///
+
#define N_FRAMES 8
-#define N_TREES 4
+// (UInt) `echo "Reference Counted Execution Context" | md5sum`
+#define RCEC_MAGIC 0xab88abb2UL
+
typedef
struct {
- /* Put .frames first so that it has offset zero within the
- element. This allows us to use RCEC__cmp as the comparison
- fn in the oset. */
+ UWord magic;
+ UWord rc;
+ UWord rcX; /* used for crosschecking */
UWord frames[N_FRAMES];
- UWord rc;
}
RCEC;
-static OSet* contextTree = NULL; /* OSet* of RC_EC */
+static OSet* contextTree = NULL; /* OSet* of RCEC */
/* Gives an arbitrary total order on RCEC .frames fields */
-static Word RCEC__cmp ( RCEC* ec1, RCEC* ec2 ) {
+static Word RCEC__cmp_by_frames ( RCEC* ec1, RCEC* ec2 ) {
Word i;
+ tl_assert(ec1 && ec1->magic == RCEC_MAGIC);
+ tl_assert(ec2 && ec2->magic == RCEC_MAGIC);
for (i = 0; i < N_FRAMES; i++) {
if (ec1->frames[i] < ec2->frames[i]) return -1;
if (ec1->frames[i] > ec2->frames[i]) return 1;
@@ -5289,93 +5326,310 @@
/* Dec the ref of this EC_RC, and if it becomes zero,
delete it from the contextTree. */
-static void ctxt__rcdec ( RC_EC* ec )
+static void ctxt__rcdec ( RCEC* ec )
{
+ tl_assert(ec && ec->magic == RCEC_MAGIC);
tl_assert(ec->rc > 0);
ec->rc--;
if (ec->rc == 0) {
- void* nd = OSetGen_Remove( contextTree, ec );
+ void* nd = VG_(OSetGen_Remove)( contextTree, ec );
tl_assert(nd); /* must be in the tree */
+ tl_assert(nd == ec);
+ tl_assert( ((RCEC*)nd)->magic == RCEC_MAGIC );
VG_(OSetGen_FreeNode)( contextTree, nd );
}
}
-static void ctxt__rcinc ( RC_EC* ec )
+static void ctxt__rcinc ( RCEC* ec )
{
+ tl_assert(ec && ec->magic == RCEC_MAGIC);
ec->rc++;
}
/* Find the given RCEC in the tree, and return a pointer to it. Or,
if not present, add the given one to the tree (by making a copy of
it, so the caller can immediately deallocate the original) and
- return a pointer to the copy. Note that the inserted node will
- have .rc of zero and so the caller must immediatly increment it. */
+ return a pointer to the copy. The caller can safely have 'example'
+ on its stack, since we will always return a pointer to a copy of
+ it, not to the original. Note that the inserted node will have .rc
+ of zero and so the caller must immediatly increment it. */
static RCEC* ctxt__find_or_add ( RCEC* example )
{
+ RCEC* copy;
+ tl_assert(example && example->magic == RCEC_MAGIC);
tl_assert(example->rc == 0);
- RCEC* copy = VG_(OSetGen_Lookup)( contextTree, example );
- if (!copy) {
+ copy = VG_(OSetGen_Lookup)( contextTree, example );
+ if (copy) {
+ tl_assert(copy != example);
+ } else {
copy = VG_(OSetGen_AllocNode)( contextTree, sizeof(RCEC) );
- VG_(memcpy)(copy, example, sizeof(RCEC));
+ tl_assert(copy != example);
+ *copy = *example;
VG_(OSetGen_Insert)( contextTree, copy );
}
return copy;
}
-/////////////////////////////////////////////////////////
+static RCEC* get_RCEC ( Thr* thr )
+{
+ RCEC example;
+ example.magic = RCEC_MAGIC;
+ example.rc = 0;
+ example.rcX = 0;
+ main_get_stacktrace( thr, &example.frames[0], N_FRAMES );
+ return ctxt__find_or_add( &example );
+}
+///////////////////////////////////////////////////////
+//// Part (2): An OSet of OldRefs, that refer to (1)
+///
+
+// (UInt) `echo "Old Reference Information" | md5sum`
+#define OldRef_MAGIC 0x30b1f075UL
+
typedef
struct {
+ UWord magic;
+ UWord gen; /* when most recently accessed */
Addr ea;
RCEC* rcec;
Thr* thr;
}
OldRef;
-static Word OldRef__cmp ( OldRef* r1, OldRef* r2 ) {
+static Word OldRef__cmp_by_EA ( OldRef* r1, OldRef* r2 ) {
+ tl_assert(r1 && r1->magic == OldRef_MAGIC);
+ tl_assert(r2 && r2->magic == OldRef_MAGIC);
if (r1->ea < r2->ea) return -1;
if (r1->ea > r2->ea) return 1;
return 0;
}
-static OSet* oldrefTrees[N_TREES]; /* OSet* of OldRef */
-static Word oldrefTSizes[N_TREES]; /* # elements in corresponding OSet */
-static Word oldrefCurr;
+static OSet* oldrefTree = NULL; /* OSet* of OldRef */
+static UWord oldrefGen = 0; /* current LRU generation # */
+static UWord oldrefTreeN = 0; /* # elems in oldrefTree */
+static UWord oldrefGenIncAt = 0; /* inc gen # when size hits this */
-static void event_map_bind ( Addr a, struct EC_* ec, Thr* thr ) {
- OldRef* prev = VG_(OSetGen_Lookup)( oldrefTrees[oldrefCurr],
- &a );
- if (prev) {
+static void event_map_bind ( Addr a, struct EC_* ec, Thr* thr )
+{
+ OldRef key, *ref;
+ RCEC* here;
- }
+ key.ea = a;
+ key.magic = OldRef_MAGIC;
+
+ ref = VG_(OSetGen_Lookup)( oldrefTree, &key );
+ if (ref) {
+ tl_assert(ref->magic == OldRef_MAGIC);
+ here = get_RCEC( thr );
+ ctxt__rcinc( here );
+ ctxt__rcdec( ref->rcec );
+ ref->gen = oldrefGen;
+ ref->ea = a;
+ ref->rcec = here;
+ ref->thr = thr;
+ } else {
+ if (oldrefTreeN >= oldrefGenIncAt) {
+ oldrefGen++;
+ oldrefGenIncAt = oldrefTreeN + 50000;
+ VG_(printf)("oldrefTree: new gen %lu at size %lu\n",
+ oldrefGen, oldrefTreeN );
+ }
+
+ here = get_RCEC( thr );
+ ctxt__rcinc(here);
+ ref = VG_(OSetGen_AllocNode)( oldrefTree, sizeof(OldRef) );
+ ref->magic = OldRef_MAGIC;
+ ref->gen = oldrefGen;
+ ref->ea = a;
+ ref->rcec = here;
+ ref->thr = thr;
+ VG_(OSetGen_Insert)( oldrefTree, ref );
+ oldrefTreeN++;
+ }
}
-/////////////////////////////////////////////////////////
-static void event_map2_init ( void )
+static
+Bool event_map_lookup ( /*OUT*/struct EC_** resEC,
+ /*OUT*/Thr** resThr, Addr a )
{
- Word i;
- tl_assert(offsetof(RCEC,frames) == 0);
+ OldRef key, *ref;
+ key.ea = a;
+ key.magic = OldRef_MAGIC;
+
+ ref = VG_(OSetGen_Lookup)( oldrefTree, &key );
+ if (ref) {
+ tl_assert(ref->magic == OldRef_MAGIC);
+ tl_assert(ref->rcec);
+ tl_assert(ref->rcec->magic == RCEC_MAGIC);
+ *resEC = main_stacktrace_to_EC(&ref->rcec->frames[0], N_FRAMES);
+ *resThr = ref->thr;
+ return True;
+ } else {
+ return False;
+ }
+}
+
+static void event_map_init ( void )
+{
tl_assert(!contextTree);
- contextTree = OSetGen_Create( offsetof(RCEC,frames), RCEC__cmp,
- main_zalloc, main_free );
+ contextTree = VG_(OSetGen_Create)(
+ 0,
+ (Word(*)(const void *, const void*))RCEC__cmp_by_frames,
+ main_zalloc, main_dealloc
+ );
tl_assert(contextTree);
- for (i = 0; i < N_TREES; i++) {
- oldrefTrees[i] = NULL;
- oldrefTSizes[i] = 0;
+ tl_assert(!oldrefTree);
+ oldrefTree = VG_(OSetGen_Create)(
+ 0,
+ (Word(*)(const void *, const void*))OldRef__cmp_by_EA,
+ main_zalloc, main_dealloc
+ );
+ tl_assert(oldrefTree);
+
+ oldrefGen = 0;
+ oldrefGenIncAt = 0;
+ oldrefTreeN = 0;
+}
+
+static void event_map__check_reference_counts ( void )
+{
+ RCEC* rcec;
+ OldRef* oldref;
+
+ /* Set the 'check' reference counts to zero */
+ VG_(OSetGen_ResetIter)( contextTree );
+ while ( (rcec = VG_(OSetGen_Next)( contextTree )) ) {
+ tl_assert(rcec->magic == RCEC_MAGIC);
+ tl_assert(rcec->rc > 0); /* unrefd nodes should be immediately rm'd */
+ rcec->rcX = 0;
}
- tl_assert(offsetof(OldRef,ea) == 0);
- oldrefCurr = 0;
- oldrefTrees[oldrefCurr] = VG_(OSetGen_Create)( offsetof(OldRef,ea),
- OldRef__cmp,
- main_zalloc, main_free );
- tl_assert(oldrefTrees[oldrefCurr]);
+ /* visit all the referencing points, inc check ref counts */
+ VG_(OSetGen_ResetIter)( oldrefTree );
+ while ( (oldref = VG_(OSetGen_Next)( oldrefTree )) ) {
+ tl_assert(oldref->magic == OldRef_MAGIC);
+ tl_assert(oldref->rcec);
+ tl_assert(oldref->rcec->magic == RCEC_MAGIC);
+ oldref->rcec->rcX++;
+ }
+
+ /* compare check ref counts with actual */
+ VG_(OSetGen_ResetIter)( contextTree );
+ while ( (rcec = VG_(OSetGen_Next)( contextTree )) ) {
+ tl_assert(rcec->rc == rcec->rcX);
+ }
}
+
+static void event_map_maybe_GC ( void )
+{
+ RCEC* rcec;
+ OldRef* oldref;
+ UWord keyW, valW;
+ WordFM* genMap;
+
+ if (LIKELY(oldrefTreeN < EVENT_MAP_GC_AT))
+ return;
+ VG_(printf)("libhb: event_map GC at size %lu\n", oldrefTreeN);
+
+ /* Check our counting is sane */
+ tl_assert(oldrefTreeN == (UWord) VG_(OSetGen_Size)( oldrefTree ));
+
+ /* Check the reference counts */
+ event_map__check_reference_counts();
+
+ /* Compute the distribution of generation values in the ref tree */
+ /* genMap :: generation-number -> count-of-nodes-with-that-number */
+ genMap = HG_(newFM)( main_zalloc, main_dealloc, NULL );
+
+ VG_(OSetGen_ResetIter)( oldrefTree );
+ while ( (oldref = VG_(OSetGen_Next)( oldrefTree )) ) {
+ UWord key = oldref->gen;
+ keyW = valW = 0;
+ if (HG_(lookupFM)(genMap, &keyW, &valW, NULL, key )) {
+ tl_assert(keyW == key);
+ tl_assert(valW > 0);
+ }
+ /* now valW is the old count for generation 'key' */
+ HG_(addToFM)(genMap, key, valW+1, 0);
+ }
+
+ tl_assert(HG_(sizeFM)(genMap) > 0);
+
+ UWord retained = oldrefTreeN, maxGen = 0;
+ HG_(initIterFM)( genMap );
+ while (HG_(nextIterFM)( genMap, &keyW, &valW, NULL )) {
+ tl_assert(keyW > 0); /* can't allow a generation # 0 */
+ VG_(printf)(" XXX: gen %lu has %lu\n", keyW, valW );
+ tl_assert(keyW >= maxGen);
+ tl_assert(retained >= valW);
+ if (retained - valW
+ > (UWord)(EVENT_MAP_GC_AT * EVENT_MAP_GC_DISCARD_FRACTION)) {
+ retained -= valW;
+ maxGen = keyW;
+ } else {
+ break;
+ }
+ }
+ HG_(doneIterFM)( genMap );
+
+ VG_(printf)(
+ " XXX: delete generations %lu and below, retaining %lu entries\n",
+ maxGen, retained );
+
+ HG_(deleteFM)( genMap, NULL, NULL, NULL );
+
+ /* If this fails, it means there's only one generation in the
+ entire tree. So we're kind of in a bad situation, and need to
+ do some stop-gap measure, such as randomly deleting half the
+ entires. */
+ tl_assert(retained < oldrefTreeN);
+
+ /* Now make up a big list of the oldrefTree entries we want to
+ delete. We can't simultaneously traverse the tree and delete
+ stuff from it, so first we need to copy them off somewhere
+ else. (sigh) */
+ XArray* refs2del;
+ refs2del = VG_(newXA)( main_zalloc, main_dealloc, sizeof(OldRef*) );
+
+ VG_(OSetGen_ResetIter)( oldrefTree );
+ while ( (oldref = VG_(OSetGen_Next)( oldrefTree )) ) {
+ if (oldref->gen <= maxGen)
+ VG_(addToXA)( refs2del, &oldref );
+ }
+
+ Word i, n2del = VG_(sizeXA)( refs2del );
+ tl_assert(n2del == (Word)(oldrefTreeN - retained));
+
+ VG_(printf)("%s","deleting entries\n");
+ for (i = 0; i < n2del; i++) {
+ OldRef* ref = *(OldRef**)VG_(indexXA)( refs2del, i );
+ tl_assert(ref);
+ tl_assert(ref->magic == OldRef_MAGIC);
+ void* nd = VG_(OSetGen_Remove)( oldrefTree, ref );
+ ctxt__rcdec( ref->rcec );
+ VG_(OSetGen_FreeNode)( oldrefTree, nd );
+ }
+
+ VG_(deleteXA)( refs2del );
+
+ tl_assert( VG_(OSetGen_Size)( oldrefTree ) == retained );
+
+ oldrefTreeN = retained;
+ oldrefGenIncAt = oldrefTreeN; /* start new gen right away */
+
+ /* Check the reference counts */
+ event_map__check_reference_counts();
+
+ VG_(printf)("XXXX final sizes: oldrefTree %ld, contextTree %ld\n\n",
+ VG_(OSetGen_Size)(oldrefTree), VG_(OSetGen_Size)(contextTree));
+
+}
+
#endif
-
/////////////////////////////////////////////////////////
// //
// Core MSM //
@@ -5575,7 +5829,7 @@
/////////////////////////////////////////////////////////
// (UInt) `echo "Synchronisation object" | md5sum`
-#define SO_MAGIC 0x56b3c5b0
+#define SO_MAGIC 0x56b3c5b0U
struct _SO {
VtsID viR; /* r-clock of sender */
@@ -5628,28 +5882,29 @@
}
-/* Globals needed by other parts of the library. These are set
- once at startup and then never changed. */
-void* (*main_zalloc_P)(SizeT) = NULL;
-void (*main_dealloc_P)(void*) = NULL;
-void* (*main_shadow_alloc_P)(SizeT) = NULL;
-struct EC_* (*main_get_EC)(Thr*) = NULL;
-
-Thr* libhb_init ( void* (*zalloc)( SizeT ),
- void (*dealloc)( void* ),
- void* (*shadow_alloc)( SizeT ),
- struct EC_* (*get_EC)( Thr* ) )
+Thr* libhb_init (
+ void* (*zalloc)( SizeT ),
+ void (*dealloc)( void* ),
+ void* (*shadow_alloc)( SizeT ),
+ void (*get_stacktrace)( Thr*, Addr*, UWord ),
+ struct EC_* (*stacktrace_to_EC)( Addr*, UWord ),
+ struct EC_* (*get_EC)( Thr* )
+ )
{
Thr* thr;
VtsID vi;
tl_assert(zalloc);
tl_assert(dealloc);
tl_assert(shadow_alloc);
+ tl_assert(get_stacktrace);
+ tl_assert(stacktrace_to_EC);
tl_assert(get_EC);
- main_zalloc_P = zalloc;
- main_dealloc_P = dealloc;
- main_shadow_alloc_P = shadow_alloc;
- main_get_EC = get_EC;
+ main_zalloc_P = zalloc;
+ main_dealloc_P = dealloc;
+ main_shadow_alloc_P = shadow_alloc;
+ main_get_stacktrace = get_stacktrace;
+ main_stacktrace_to_EC = stacktrace_to_EC;
+ main_get_EC = get_EC;
// No need to initialise hg_wordfm.
// No need to initialise hg_wordset.
@@ -6059,6 +6314,7 @@
void libhb_maybe_GC ( void )
{
+ event_map_maybe_GC();
/* If there are still freelist entries available, no need for a
GC. */
if (vts_tab_freelist != VtsID_INVALID)
Modified: branches/YARD/helgrind/libhb_sa.c
===================================================================
--- branches/YARD/helgrind/libhb_sa.c 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/helgrind/libhb_sa.c 2008-08-29 23:34:06 UTC (rev 8561)
@@ -88,6 +88,14 @@
return memalign(16,n);
}
+static void get_stacktrace ( Thr* thr, Addr* frames, UWord nFrames ) {
+ memset(frames, 0, nFrames * sizeof(frames[0]));
+}
+
+static struct EC_* stacktrace_to_EC ( Addr* frames, UWord nFrames ) {
+ return NULL;
+}
+
static struct EC_* get_EC ( Thr* thr ) {
return NULL;
}
@@ -121,17 +129,22 @@
libhb_so_recv(t, so, False/*weak*/);
}
-static void unlock ( Thr* t, SO* so ) {
- libhb_so_send(t, so);
+static void wrunlk ( Thr* t, SO* so ) {
+ libhb_so_send(t, so, True/*strong*/);
}
+static void rdunlk ( Thr* t, SO* so ) {
+ libhb_so_send(t, so, False/*weak*/);
+}
+
int main ( void )
{
// Addr a;
printf("\ndriver, blah\n");
Thr* t1
- = libhb_init( zalloc_nofail, dealloc, shadow_alloc_nofail, get_EC );
+ = libhb_init( zalloc_nofail, dealloc, shadow_alloc_nofail,
+ get_stacktrace, stacktrace_to_EC, get_EC );
SO* lk = libhb_so_alloc();
libhb_range_new( t1, 100, 8 );
@@ -140,7 +153,7 @@
Int do_test_RW1 = 1;
Int do_test_RW2 = 1;
- Int do_test_RW3 = 0;
+ Int do_test_RW3 = 1;
Int do_test0 = 1;
Int do_test1 = 1;
Int do_test2 = 1;
@@ -155,12 +168,12 @@
rdlock(t2, lk);
rd(t1, 100, 4);
rd(t2, 100, 4);
- unlock(t1, lk);
- unlock(t2, lk);
+ rdunlk(t1, lk);
+ rdunlk(t2, lk);
wrlock(t2, lk);
wr(t2, 100, 4);
- unlock(t2, lk);
+ wrunlk(t2, lk);
}
if (do_test_RW2) {
@@ -170,12 +183,12 @@
rdlock(t2, lk);
rd(t1, 100, 4);
rd(t2, 100, 4);
- unlock(t1, lk);
- unlock(t2, lk);
+ rdunlk(t1, lk);
+ rdunlk(t2, lk);
wrlock(t1, lk);
wr(t1, 100, 4);
- unlock(t1, lk);
+ wrunlk(t1, lk);
}
if (do_test0) {
@@ -196,10 +209,10 @@
wrlock(t1, lk);
wr(t1, 100, 4);
- unlock(t1, lk);
+ wrunlk(t1, lk);
wrlock(t2, lk);
wr(t2, 100, 4);
- unlock(t2, lk);
+ wrunlk(t2, lk);
}
if (do_test2)
@@ -208,10 +221,10 @@
wrlock(t1, lk);
wr(t1, 100, 4);
- unlock(t1, lk);
+ wrunlk(t1, lk);
// wrlock(t2, lk);
wr(t2, 100, 4); // race
- // unlock(t2, lk);
+ // wrunlk(t2, lk);
}
if (do_test3)
@@ -220,10 +233,10 @@
wrlock(t1, lk);
wr(t1, 100, 4);
- unlock(t1, lk);
+ wrunlk(t1, lk);
// wrlock(t2, lk);
rd(t2, 100, 4); // race
- // unlock(t2, lk);
+ // wrunlk(t2, lk);
}
// no race
@@ -244,10 +257,10 @@
for (i = 0; i < 1000; i++) {
wrlock(t1, lk);
wr(t1, 100, 4);
- unlock(t1, lk);
+ wrunlk(t1, lk);
wrlock(t2, lk);
wr(t2, 100, 4);
- unlock(t2, lk);
+ wrunlk(t2, lk);
}
}
Modified: branches/YARD/helgrind/libhb_vg.c
===================================================================
--- branches/YARD/helgrind/libhb_vg.c 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/helgrind/libhb_vg.c 2008-08-29 23:34:06 UTC (rev 8561)
@@ -9,6 +9,7 @@
#include "pub_tool_libcbase.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_libcprint.h"
+#include "pub_tool_oset.h"
static void* libhbPlainVG_memset ( void *s, Int c, SizeT sz ) {
return VG_(memset)(s,c,sz);
@@ -44,6 +45,35 @@
vgPlain_sprintf(_str, _format, _args)
+#define libhbPlainVG_OSetGen_Remove(_arg1, _arg2) \
+ vgPlain_OSetGen_Remove((_arg1),(_arg2))
+
+#define libhbPlainVG_OSetGen_FreeNode(_arg1, _arg2) \
+ vgPlain_OSetGen_FreeNode((_arg1),(_arg2))
+
+#define libhbPlainVG_OSetGen_Lookup(_arg1, _arg2) \
+ vgPlain_OSetGen_Lookup((_arg1),(_arg2))
+
+#define libhbPlainVG_OSetGen_AllocNode(_arg1, _arg2) \
+ vgPlain_OSetGen_AllocNode((_arg1),(_arg2))
+
+#define libhbPlainVG_OSetGen_Insert(_arg1, _arg2) \
+ vgPlain_OSetGen_Insert((_arg1),(_arg2))
+
+#define libhbPlainVG_OSetGen_Create(_arg1, _arg2, _arg3, _arg4) \
+ vgPlain_OSetGen_Create((_arg1),(_arg2),(_arg3),(_arg4))
+
+#define libhbPlainVG_OSetGen_Size(_arg1) \
+ vgPlain_OSetGen_Size((_arg1))
+
+#define libhbPlainVG_OSetGen_ResetIter(_arg1) \
+ vgPlain_OSetGen_ResetIter((_arg1))
+
+#define libhbPlainVG_OSetGen_Next(_arg1) \
+ vgPlain_OSetGen_Next((_arg1))
+
+
+
//////////////////////////////////////
#include "libhb_core.c"
//////////////////////////////////////
Modified: branches/YARD/include/pub_tool_execontext.h
===================================================================
--- branches/YARD/include/pub_tool_execontext.h 2008-08-28 20:35:29 UTC (rev 8560)
+++ branches/YARD/include/pub_tool_execontext.h 2008-08-29 23:34:06 UTC (rev 8561)
@@ -104,6 +104,8 @@
return (ecu > 0) && ((ecu & 3) == 0);
}
+// Make an ExeContext containing exactly the specified stack frames.
+ExeContext* VG_(make_ExeContext_from_StackTrace)( Addr* ips, UInt n_ips );
#endif // __PUB_TOOL_EXECONTEXT_H
|
|
From: Steve V.
|
First try was missing user visible define for CLONE_CHILD_LETGO.
This patch implements a valgrind specific clone flag (VKI_CLONE_CHILD_LETGO),
which instructs valgrind to run the child nativley. It is only implemented
for fork like clones and on x86 (currently).
Index: valgrind/coregrind/m_syswrap/syswrap-linux.c
===================================================================
--- valgrind.orig/coregrind/m_syswrap/syswrap-linux.c 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/coregrind/m_syswrap/syswrap-linux.c 2008-08-29 15:39:59.000000000 -0700
@@ -339,6 +339,14 @@
VG_(do_atfork_child)(tid);
+ if (flags & VKI_CLONE_CHILD_LETGO) {
+#if defined(VGP_x86_linux)
+ letgo_vex_x86_linux(&ctst->arch.vex, tid);
+ /* NORETURN */
+#else
+ VG_(message)(Vg_UserMsg, "CLONE_CHILD_LETGO is not yet implmented on this platform; ignoring.");
+#endif
+ }
/* restore signal mask */
VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
Index: valgrind/coregrind/m_syswrap/syswrap-x86-linux.c
===================================================================
--- valgrind.orig/coregrind/m_syswrap/syswrap-x86-linux.c 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/coregrind/m_syswrap/syswrap-x86-linux.c 2008-08-29 15:39:59.000000000 -0700
@@ -190,6 +190,83 @@
// forward declarations
static void setup_child ( ThreadArchState*, ThreadArchState*, Bool );
static SysRes sys_set_thread_area ( ThreadId, vki_modify_ldt_t* );
+static SysRes propagate_thread_area(ThreadId tid, VexGuestArchState *vex);
+SysRes letgo_vex_x86_linux(VexGuestArchState *vex, ThreadId tid);
+
+static SysRes propagate_thread_area(ThreadId tid, VexGuestArchState *vex)
+{
+ VexGuestX86SegDescr * sdp;
+ vki_modify_ldt_t ud;
+ SysRes res;
+
+ if (! vex->guest_GS)
+ return VG_(mk_SysRes_Success)( 0 );
+
+ VG_(memset)(&ud, 0, sizeof(ud));
+ sdp = (vex->guest_GS >> 3) + ((VexGuestX86SegDescr*)vex->guest_GDT);
+
+ ud.entry_number = -1;
+ ud.base_addr = sdp->LdtEnt.Bits.BaseLow | (sdp->LdtEnt.Bits.BaseMid << 16)
+ | (sdp->LdtEnt.Bits.BaseHi << 24);
+ ud.limit = sdp->LdtEnt.Bits.LimitLow | (sdp->LdtEnt.Bits.LimitHi << 16);
+ ud.seg_32bit = sdp->LdtEnt.Bits.Default_Big;
+ ud.contents = sdp->LdtEnt.Bits.Type >> 2;
+ ud.read_exec_only = 1 ^ (1 & (sdp->LdtEnt.Bits.Type >> 1));
+ ud.limit_in_pages = sdp->LdtEnt.Bits.Granularity;
+ ud.seg_not_present = 0;
+ ud.useable = 1;
+
+ res = VG_(do_syscall1)( __NR_set_thread_area, (UWord)&ud);
+ if (res.isError) {
+ VG_(printf)("propagate_thread_area set_thread_area failed\n");
+ } else {
+ vex->guest_GS = 3 | (ud.entry_number << 3);
+ }
+ return res;
+}
+
+SysRes letgo_vex_x86_linux(VexGuestArchState *vex, ThreadId tid)
+{
+ vki_sigset_t clear;
+ unsigned long * vex_stack;
+ unsigned short es;
+ SysRes res;
+
+ res = propagate_thread_area(tid, vex);
+ if (res.isError)
+ return res;
+
+ /* Put EIP and a PUSHAD frame on the vex stack */
+ vex->guest_ESP -= sizeof(unsigned long *);
+ vex_stack = ((unsigned long *) vex->guest_ESP) - 8;
+ vex_stack[8] = vex->guest_EIP;
+ vex_stack[7] = 0; /* return value of clone*/
+ vex_stack[6] = vex->guest_ECX;
+ vex_stack[5] = vex->guest_EDX;
+ vex_stack[4] = vex->guest_EBX;
+ vex_stack[3] = vex->guest_ESP;
+ vex_stack[2] = vex->guest_EBP;
+ vex_stack[1] = vex->guest_ESI;
+ vex_stack[0] = vex->guest_EDI;
+
+ es = vex->guest_ES;
+ if (es == 0)
+ asm("mov %%ds, %0\n" : "=r"(es));
+
+ VG_(sigemptyset)(&clear);
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &clear, NULL);
+
+ /* Set the segment regs, change to the vex stack, restore regs and return */
+ asm("mov %0, %%es\n"
+ "mov %1, %%fs\n"
+ "mov %2, %%gs\n"
+ "movl %3, %%esp\n"
+ "popa\n"
+ "ret\n"
+ :: "r"(es), "r"(vex->guest_FS), "r"(vex->guest_GS), "r"(vex_stack) );
+
+ return VG_(mk_SysRes_Error)(-1); /* Make gcc happy */
+}
/*
When a client clones, we need to keep track of the new thread. This means:
@@ -901,6 +978,11 @@
goto reject;
}
+ /* Can't let a child run wild in a shared address space */
+ if ((cloneflags & (VKI_CLONE_VM | VKI_CLONE_VFORK | VKI_CLONE_CHILD_LETGO))
+ == (VKI_CLONE_VM | VKI_CLONE_CHILD_LETGO))
+ goto reject;
+
/* Only look at the flags we really care about */
switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS
| VKI_CLONE_FILES | VKI_CLONE_VFORK)) {
Index: valgrind/include/vki/vki-linux.h
===================================================================
--- valgrind.orig/include/vki/vki-linux.h 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/include/vki/vki-linux.h 2008-08-29 15:39:59.000000000 -0700
@@ -322,6 +322,7 @@
#define VKI_CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
#define VKI_CLONE_DETACHED 0x00400000 /* Unused, ignored */
#define VKI_CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
+#define VKI_CLONE_CHILD_LETGO 0x80000000 /* do not track fork like children*/
struct vki_sched_param {
int sched_priority;
Index: valgrind/memcheck/tests/x86/clone-fork-child-letgo.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ valgrind/memcheck/tests/x86/clone-fork-child-letgo.c 2008-08-29 15:39:59.000000000 -0700
@@ -0,0 +1,62 @@
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+int fn_child(void *argp)
+{
+ int *const p = (int *)argp;
+ printf("child stack=%p\n", &argp);
+ fflush(stdout);
+ if (p[1]) {
+ return p[0];
+ }
+ else {
+ return p[1];
+ }
+}
+
+typedef void (*sighandler_t)(int);
+
+void handler(int signo)
+{
+ printf("handler signo=%d\n", signo);
+}
+
+int my_clone(int (*fn)(void *), void *stack, unsigned xflags, void *arg)
+{
+/* The flags lack CLONE_VM, so the clone() is like a fork().
+ * A new process gets created, not a new thread.
+ */
+ int pid = clone(fn, stack,
+ xflags | SIGCHLD, arg, 0,0,0);
+ if (pid < 0)
+ perror("clone failed");
+ //waitpid(pid, &status, 0);
+ //printf("status= %x\n", status);
+ return 0;
+}
+
+#define L_STACKS (1024*1024)
+
+#define CLONE_CHILD_LETGO 0x80000000
+
+int main(int argc, char *argv[])
+{
+ int x[2];
+/* Space for two stacks. */
+ void *const stack = mmap((void *)0x300000, L_STACKS, PROT_READ|PROT_WRITE,
+ MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ x[0]= 12345; /* x[1] undefined */
+ signal(SIGCHLD, handler);
+
+/* Stack uses second half of mmap()ed space. */
+ my_clone(fn_child, L_STACKS + stack, 0, x);
+
+/* Stack uses first half of mmap()ed space. */
+ my_clone(fn_child, (L_STACKS>>1) + stack, CLONE_CHILD_LETGO, x);
+ return 0;
+}
Index: valgrind/memcheck/tests/x86/clone-fork-child-letgo.stderr.exp
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ valgrind/memcheck/tests/x86/clone-fork-child-letgo.stderr.exp 2008-08-29 15:39:59.000000000 -0700
@@ -0,0 +1,9 @@
+
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: fn_child (clone-fork-child-letgo.c:13)
+
+ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
+malloc/free: in use at exit: 0 bytes in 0 blocks.
+malloc/free: 0 allocs, 0 frees, 0 bytes allocated.
+For a detailed leak analysis, rerun with: --leak-check=yes
+For counts of detected errors, rerun with: -v
Index: valgrind/memcheck/tests/x86/clone-fork-child-letgo.stdout.exp
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ valgrind/memcheck/tests/x86/clone-fork-child-letgo.stdout.exp 2008-08-29 15:39:59.000000000 -0700
@@ -0,0 +1,2 @@
+child stack=0x37fff0
+child stack=0x3ffff0
Index: valgrind/memcheck/tests/x86/clone-fork-child-letgo.vgtest
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ valgrind/memcheck/tests/x86/clone-fork-child-letgo.vgtest 2008-08-29 15:39:59.000000000 -0700
@@ -0,0 +1 @@
+prog: clone-fork-child-letgo
Index: valgrind/memcheck/tests/x86/Makefile.am
===================================================================
--- valgrind.orig/memcheck/tests/x86/Makefile.am 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/memcheck/tests/x86/Makefile.am 2008-08-29 15:39:59.000000000 -0700
@@ -7,6 +7,7 @@
EXTRA_DIST = $(noinst_SCRIPTS) \
bug133694.vgtest bug133694.stderr.exp bug133694.stdout.exp \
bug152022.vgtest bug152022.stderr.exp bug152022.stdout.exp \
+ clone-fork-child-letgo.vgtest clone-fork-child-letgo.stderr.exp clone-fork-child-letgo.stdout.exp \
espindola2.vgtest espindola2.stderr.exp \
fpeflags.stderr.exp fpeflags.vgtest \
fxsave.vgtest fxsave.stdout.exp fxsave.stderr.exp \
@@ -35,6 +36,7 @@
check_PROGRAMS = \
bug133694 \
bug152022 \
+ clone-fork-child-letgo \
espindola2 \
fxsave \
int3-x86 \
Index: valgrind/coregrind/m_syswrap/priv_syswrap-linux.h
===================================================================
--- valgrind.orig/coregrind/m_syswrap/priv_syswrap-linux.h 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/coregrind/m_syswrap/priv_syswrap-linux.h 2008-08-29 15:39:59.000000000 -0700
@@ -40,6 +40,10 @@
void (*f)(Word), Word arg1 );
extern SysRes ML_(do_fork_clone) ( ThreadId tid, UInt flags, Addr child_esp,
Int* parent_tidptr, Int* child_tidptr );
+#if defined(VGP_x86_linux)
+extern SysRes letgo_vex_x86_linux(VexGuestArchState *vex, ThreadId tid);
+#endif
+
// Linux-specific (but non-arch-specific) syscalls
Index: valgrind/include/valgrind.h
===================================================================
--- valgrind.orig/include/valgrind.h 2008-08-29 15:39:51.000000000 -0700
+++ valgrind/include/valgrind.h 2008-08-29 15:40:20.000000000 -0700
@@ -3913,6 +3913,16 @@
id, start, end, 0, 0); \
}
+#if defined(NVALGRIND)
+
+#define VALGRIND_CLONE_LETGO 0
+
+#else /* ! NVALGRIND */
+
+#define VALGRIND_CLONE_LETGO 0x80000000 /* do not track fork like children*/
+
+#endif /* NVALGRIND */
+
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
|
|
From: Cerion Armour-B. <ce...@va...> - 2008-08-29 20:18:26
|
Evan, Ivan, (& anyone else) If you've come across any code in valgrind that you think is broken, we'd appreciate your feedback... Cerion (CAB) On Thursday 28 Aug 2008, Evan Geller wrote: ... > I'm not even sure any more. It took me a good week or so to get the > flags right, they were a total pain haha. It wasn't your code that was > broken I don't think, I think it was CAB's code that you converted > into IR (some of the stuff you commented "this doesn't look right...") ... |
|
From: Tom H. <th...@cy...> - 2008-08-29 03:20:39
|
Nightly build on alvis ( i686, Red Hat 7.3 ) started at 2008-08-29 03:14:57 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 346 tests, 60 stderr failures, 1 stdout failure, 29 post failures == memcheck/tests/file_locking (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/long_namespace_xml (stderr) memcheck/tests/malloc_free_fill (stderr) memcheck/tests/origin1-yes (stderr) memcheck/tests/origin4-many (stderr) memcheck/tests/origin5-bz2 (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/stack_changes (stderr) memcheck/tests/varinfo1 (stderr) memcheck/tests/varinfo2 (stderr) memcheck/tests/varinfo3 (stderr) memcheck/tests/varinfo4 (stderr) memcheck/tests/varinfo5 (stderr) memcheck/tests/varinfo6 (stderr) memcheck/tests/x86/bug152022 (stderr) memcheck/tests/x86/scalar (stderr) memcheck/tests/x86/scalar_supp (stderr) memcheck/tests/x86/xor-undef-x86 (stderr) memcheck/tests/xml1 (stderr) massif/tests/alloc-fns-A (post) massif/tests/alloc-fns-B (post) massif/tests/basic (post) massif/tests/basic2 (post) massif/tests/big-alloc (post) massif/tests/culling1 (stderr) massif/tests/culling2 (stderr) massif/tests/custom_alloc (post) massif/tests/deep-A (post) massif/tests/deep-B (stderr) massif/tests/deep-B (post) massif/tests/deep-C (stderr) massif/tests/deep-C (post) massif/tests/deep-D (post) massif/tests/ignoring (post) massif/tests/insig (post) massif/tests/long-names (post) massif/tests/long-time (post) massif/tests/new-cpp (post) massif/tests/null (post) massif/tests/one (post) massif/tests/overloaded-new (post) massif/tests/peak (post) massif/tests/peak2 (stderr) massif/tests/peak2 (post) massif/tests/realloc (stderr) massif/tests/realloc (post) massif/tests/thresholds_0_0 (post) massif/tests/thresholds_0_10 (post) massif/tests/thresholds_10_0 (post) massif/tests/thresholds_10_10 (post) massif/tests/thresholds_5_0 (post) massif/tests/thresholds_5_10 (post) massif/tests/zero1 (post) massif/tests/zero2 (post) none/tests/blockfault (stderr) none/tests/mremap2 (stdout) none/tests/shell (stderr) none/tests/shell_valid1 (stderr) none/tests/shell_valid2 (stderr) none/tests/shell_valid3 (stderr) helgrind/tests/hg01_all_ok (stderr) helgrind/tests/hg02_deadlock (stderr) helgrind/tests/hg03_inherit (stderr) helgrind/tests/hg04_race (stderr) helgrind/tests/hg05_race2 (stderr) helgrind/tests/hg06_readshared (stderr) helgrind/tests/tc01_simple_race (stderr) helgrind/tests/tc02_simple_tls (stderr) helgrind/tests/tc03_re_excl (stderr) helgrind/tests/tc05_simple_race (stderr) helgrind/tests/tc06_two_races (stderr) helgrind/tests/tc07_hbl1 (stderr) helgrind/tests/tc08_hbl2 (stderr) helgrind/tests/tc09_bad_unlock (stderr) helgrind/tests/tc11_XCHG (stderr) helgrind/tests/tc12_rwl_trivial (stderr) helgrind/tests/tc14_laog_dinphils (stderr) helgrind/tests/tc16_byterace (stderr) helgrind/tests/tc17_sembar (stderr) helgrind/tests/tc18_semabuse (stderr) helgrind/tests/tc19_shadowmem (stderr) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc22_exit_w_lock (stderr) helgrind/tests/tc23_bogus_condwait (stderr) helgrind/tests/tc24_nonzero_sem (stderr) |
|
From: Tom H. <th...@cy...> - 2008-08-29 02:55:49
|
Nightly build on aston ( x86_64, Fedora Core 5 ) started at 2008-08-29 03:20:05 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 444 tests, 8 stderr failures, 1 stdout failure, 0 post failures == memcheck/tests/file_locking (stderr) memcheck/tests/malloc_free_fill (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/x86/scalar (stderr) none/tests/blockfault (stderr) none/tests/mremap2 (stdout) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc22_exit_w_lock (stderr) |
|
From: Tom H. <th...@cy...> - 2008-08-29 02:48:01
|
Nightly build on lloyd ( x86_64, Fedora 7 ) started at 2008-08-29 03:05:05 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 438 tests, 6 stderr failures, 2 stdout failures, 0 post failures == memcheck/tests/file_locking (stderr) memcheck/tests/malloc_free_fill (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/vcpu_fnfns (stdout) memcheck/tests/x86/scalar (stderr) none/tests/mremap2 (stdout) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc22_exit_w_lock (stderr) |
|
From: Tom H. <th...@cy...> - 2008-08-29 02:42:22
|
Nightly build on trojan ( x86_64, Fedora Core 6 ) started at 2008-08-29 03:25:03 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 442 tests, 9 stderr failures, 5 stdout failures, 0 post failures == memcheck/tests/file_locking (stderr) memcheck/tests/malloc_free_fill (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/vcpu_fnfns (stdout) memcheck/tests/x86/bug133694 (stdout) memcheck/tests/x86/bug133694 (stderr) memcheck/tests/x86/scalar (stderr) none/tests/cmdline1 (stdout) none/tests/cmdline2 (stdout) none/tests/mremap2 (stdout) helgrind/tests/tc17_sembar (stderr) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc22_exit_w_lock (stderr) |
|
From: Tom H. <th...@cy...> - 2008-08-29 02:23:52
|
Nightly build on gill ( x86_64, Fedora Core 2 ) started at 2008-08-29 03:00:03 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 444 tests, 31 stderr failures, 3 stdout failures, 0 post failures == memcheck/tests/file_locking (stderr) memcheck/tests/malloc_free_fill (stderr) memcheck/tests/origin5-bz2 (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/stack_switch (stderr) memcheck/tests/varinfo6 (stderr) memcheck/tests/x86/scalar (stderr) memcheck/tests/x86/scalar_supp (stderr) none/tests/amd64/insn_ssse3 (stdout) none/tests/amd64/insn_ssse3 (stderr) none/tests/amd64/ssse3_misaligned (stderr) none/tests/blockfault (stderr) none/tests/fdleak_fcntl (stderr) none/tests/mremap2 (stdout) none/tests/x86/insn_ssse3 (stdout) none/tests/x86/insn_ssse3 (stderr) none/tests/x86/ssse3_misaligned (stderr) helgrind/tests/hg01_all_ok (stderr) helgrind/tests/hg02_deadlock (stderr) helgrind/tests/hg03_inherit (stderr) helgrind/tests/hg04_race (stderr) helgrind/tests/hg05_race2 (stderr) helgrind/tests/tc01_simple_race (stderr) helgrind/tests/tc05_simple_race (stderr) helgrind/tests/tc06_two_races (stderr) helgrind/tests/tc09_bad_unlock (stderr) helgrind/tests/tc14_laog_dinphils (stderr) helgrind/tests/tc16_byterace (stderr) helgrind/tests/tc17_sembar (stderr) helgrind/tests/tc19_shadowmem (stderr) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc22_exit_w_lock (stderr) helgrind/tests/tc23_bogus_condwait (stderr) |