|
From: Jeremy F. <je...@go...> - 2005-02-25 01:54:13
|
CVS commit by fitzhardinge:
Group leaked blocks, so that only the "head" of linked blocks is reported.
For each of those, it also reports the amount of indirect leaking, so
you can tell what the total amount of leaked memory due to a particular
allocation site is. --show-reachable=yes will show both reachable allocated
blocks and indirectly leaked blocks.
M +109 -48 mac_leakcheck.c 1.20
--- valgrind/memcheck/mac_leakcheck.c #1.19:1.20
@@ -35,5 +35,6 @@
/* Define to debug the memory-leak-detector. */
-/* #define VG_DEBUG_LEAKCHECK */
+#define VG_DEBUG_LEAKCHECK 0
+#define VG_DEBUG_CLIQUE 0
#define ROUNDDN(p, a) ((Addr)(p) & ~((a)-1))
@@ -104,5 +105,5 @@ typedef
has been sorted on the ->data field. */
-#ifdef VG_DEBUG_LEAKCHECK
+#if VG_DEBUG_LEAKCHECK
/* Used to sanity-check the fast binary-search mechanism. */
static
@@ -159,5 +160,5 @@ Int find_shadow_for ( Addr ptr,
}
-# ifdef VG_DEBUG_LEAKCHECK
+# if VG_DEBUG_LEAKCHECK
sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
# endif
@@ -173,16 +174,14 @@ static Int lc_markstack_top;
static Addr lc_min_mallocd_addr;
static Addr lc_max_mallocd_addr;
+static SizeT lc_scanned;
static Bool (*lc_is_valid_chunk) (UInt chunk);
static Bool (*lc_is_valid_address)(Addr addr);
-/* Used for printing leak errors, avoids exposing the LossRecord type (which
- comes in as void*, requiring a cast. */
-void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
+static const Char *pp_lossmode(Reachedness lossmode)
{
- LossRecord* l = (LossRecord*)vl;
const Char *loss = "?";
- switch(l->loss_mode) {
+ switch(lossmode) {
case Unreached: loss = "definitely lost"; break;
case IndirectLeak: loss = "indirectly lost"; break;
@@ -191,9 +190,27 @@ void MAC_(pp_LeakError)(void* vl, UInt n
}
+ return loss;
+}
+
+/* Used for printing leak errors, avoids exposing the LossRecord type (which
+ comes in as void*, requiring a cast. */
+void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
+{
+ LossRecord* l = (LossRecord*)vl;
+ const Char *loss = pp_lossmode(l->loss_mode);
+
VG_(message)(Vg_UserMsg, "");
+ if (l->indirect_bytes) {
VG_(message)(Vg_UserMsg,
- "%d+%d bytes in %d blocks are %s in loss record %d of %d",
+ "%d (%d+%d) bytes in %d blocks are %s in loss record %d of %d",
+ l->total_bytes + l->indirect_bytes,
l->total_bytes, l->indirect_bytes, l->num_blocks,
loss, n_this_record, n_total_records);
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "%d bytes in %d blocks are %s in loss record %d of %d",
+ l->total_bytes, l->num_blocks,
+ loss, n_this_record, n_total_records);
+ }
VG_(pp_ExeContext)(l->allocated_at);
}
@@ -213,8 +230,17 @@ static Int lc_compar(void* n1, void* n2)
/* If ptr is pointing to a heap-allocated block which hasn't been seen
- before, push it onto the mark stack. */
-static void _lc_markstack_push(Addr ptr, Bool mopup)
+ before, push it onto the mark stack. Clique is the index of the
+ clique leader; -1 if none. */
+static void _lc_markstack_push(Addr ptr, Int clique)
{
- Int sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
+ Int sh_no;
+
+ if (!VG_(is_client_addr)(ptr)) /* quick filter */
+ return;
+
+ sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
+
+ if (VG_DEBUG_LEAKCHECK)
+ VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
if (sh_no == -1)
@@ -234,11 +260,35 @@ static void _lc_markstack_push(Addr ptr,
}
- if (mopup) {
+ if (clique != -1) {
if (0)
VG_(printf)("mopup: %d: %p is %d\n",
sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
- if (lc_markstack[sh_no].state == Unreached)
+ /* An unmarked block - add it to the clique. Add its size to
+ the clique-leader's indirect size. If the new block was
+ itself a clique leader, it isn't any more, so add its
+ indirect to the new clique leader.
+
+ If this block *is* the clique leader, it means this is a
+ cyclic structure, so none of this applies. */
+ if (lc_markstack[sh_no].state == Unreached) {
lc_markstack[sh_no].state = IndirectLeak;
+
+ if (sh_no != clique) {
+ if (VG_DEBUG_CLIQUE) {
+ if (lc_markstack[sh_no].indirect)
+ VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
+ sh_no, clique,
+ lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
+ else
+ VG_(printf)(" %d joining %d adding %d\n",
+ sh_no, clique, lc_shadows[sh_no]->size);
+ }
+
+ lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
+ lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
+ lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
+ }
+ }
} else if (ptr == lc_shadows[sh_no]->data) {
lc_markstack[sh_no].state = Proper;
@@ -251,5 +301,5 @@ static void _lc_markstack_push(Addr ptr,
static void lc_markstack_push(Addr ptr)
{
- _lc_markstack_push(ptr, False);
+ _lc_markstack_push(ptr, -1);
}
@@ -268,6 +318,9 @@ static Int lc_markstack_pop(void)
/* Scan a block of memory between [start, start+len). This range may
- be bogus, inaccessable, or otherwise strange; we deal with it. */
-static void _lc_scan_memory(Addr start, SizeT len, Bool mopup)
+ be bogus, inaccessable, or otherwise strange; we deal with it.
+
+ If clique != -1, it means we're gathering leaked memory into
+ cliques, and clique is the index of the current clique leader. */
+static void _lc_scan_memory(Addr start, SizeT len, Int clique)
{
Addr ptr = ROUNDUP(start, sizeof(Addr));
@@ -275,7 +328,11 @@ static void _lc_scan_memory(Addr start,
vki_sigset_t sigmask;
+ if (VG_DEBUG_LEAKCHECK)
+ VG_(printf)("scan %p-%p\n", start, len);
VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
VG_(set_fault_catcher)(vg_scan_all_valid_memory_catcher);
+ lc_scanned += end-ptr;
+
if (!VG_(is_client_addr)(ptr) ||
!VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
@@ -301,6 +358,7 @@ static void _lc_scan_memory(Addr start,
if ((*lc_is_valid_address)(ptr)) {
addr = *(Addr *)ptr;
- _lc_markstack_push(addr, mopup);
- }
+ _lc_markstack_push(addr, clique);
+ } else if (0 && VG_DEBUG_LEAKCHECK)
+ VG_(printf)("%p not valid\n", ptr);
ptr += sizeof(Addr);
} else {
@@ -309,5 +367,5 @@ static void _lc_scan_memory(Addr start,
VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
- ptr = PGROUNDUP(ptr); /* bad page - skip it */
+ ptr = PGROUNDUP(ptr+1); /* bad page - skip it */
}
}
@@ -319,5 +377,5 @@ static void _lc_scan_memory(Addr start,
static void lc_scan_memory(Addr start, SizeT len)
{
- _lc_scan_memory(start, len, False);
+ _lc_scan_memory(start, len, -1);
}
@@ -325,7 +383,6 @@ static void lc_scan_memory(Addr start, S
actually gathering leaked blocks, so they should be marked
IndirectLeak. */
-static SizeT lc_do_leakcheck(Bool mopup)
+static void lc_do_leakcheck(Int clique)
{
- Int scanned = 0;
Int top;
@@ -334,10 +391,6 @@ static SizeT lc_do_leakcheck(Bool mopup)
sk_assert(lc_markstack[top].state != Unreached);
- scanned += lc_shadows[top]->size;
-
- _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, mopup);
+ _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
}
-
- return scanned;
}
@@ -363,5 +416,4 @@ void MAC_(do_detect_memory_leaks) (
Int blocks_suppressed;
Int n_lossrecords;
- UInt bytes_notified;
Bool is_suppressed;
@@ -416,4 +468,6 @@ void MAC_(do_detect_memory_leaks) (
lc_is_valid_address = is_valid_address;
+ lc_scanned = 0;
+
/* Do the scan of memory, pushing any pointers onto the mark stack */
VG_(find_root_memory)(lc_scan_memory);
@@ -423,14 +477,19 @@ void MAC_(do_detect_memory_leaks) (
/* Keep walking the heap until everything is found */
- bytes_notified = lc_do_leakcheck(False);
+ lc_do_leakcheck(-1);
if (VG_(clo_verbosity) > 0)
- VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
+ VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
- /* Go through and find the heads of lost data-structures; we don't
- want to report every single lost block individually. */
+ /* Go through and group lost structures into cliques. For each
+ Unreached block, push it onto the mark stack, and find all the
+ blocks linked to it. These are marked IndirectLeak, and their
+ size is added to the clique leader's indirect size. If one of
+ the found blocks was itself a clique leader (from a previous
+ pass), then the cliques are merged. */
for (i = 0; i < lc_n_shadows; i++) {
- SizeT indirect = 0;
-
+ if (VG_DEBUG_CLIQUE)
+ VG_(printf)("cliques: %d at %p -> %s\n",
+ i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
if (lc_markstack[i].state != Unreached)
continue;
@@ -438,16 +497,17 @@ void MAC_(do_detect_memory_leaks) (
sk_assert(lc_markstack_top == -1);
- if (0)
- VG_(printf)("%d: mopping up from %p\n", i, lc_shadows[i]->data);
+ if (VG_DEBUG_CLIQUE)
+ VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
- _lc_markstack_push(lc_shadows[i]->data, True);
+ _lc_markstack_push(lc_shadows[i]->data, i);
- indirect = lc_do_leakcheck(True);
+ lc_do_leakcheck(i);
sk_assert(lc_markstack_top == -1);
sk_assert(lc_markstack[i].state == IndirectLeak);
- lc_markstack[i].state = Unreached; /* return to unreached state */
- lc_markstack[i].indirect = indirect;
+ lc_markstack[i].state = Unreached; /* Return to unreached state,
+ to indicate its a clique
+ leader */
}
@@ -496,5 +556,5 @@ void MAC_(do_detect_memory_leaks) (
for (p = errlist; p != NULL; p = p->next) {
if (p->num_blocks > 0 && p->total_bytes < n_min) {
- n_min = p->total_bytes;
+ n_min = p->total_bytes + p->indirect_bytes;
p_min = p;
}
@@ -505,8 +565,9 @@ void MAC_(do_detect_memory_leaks) (
we disallow that when --leak-check=yes.
- Prints the error if not suppressed, unless it's reachable (Proper)
+ Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
and --show-reachable=no */
- print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
+ print_record = ( MAC_(clo_show_reachable) ||
+ Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
is_suppressed =
VG_(unique_error) ( VG_(get_VCPU_tid)(), LeakErr, (UInt)i+1,
|