Index: valgrind/memcheck/mac_leakcheck.c
===================================================================
--- valgrind.orig/memcheck/mac_leakcheck.c	2005-02-23 18:50:10.000000000 -0800
+++ valgrind/memcheck/mac_leakcheck.c	2005-02-24 00:26:44.000000000 -0800
@@ -34,7 +34,8 @@
 #include "mac_shared.h"
 
 /* Define to debug the memory-leak-detector. */
-/* #define VG_DEBUG_LEAKCHECK */
+#define VG_DEBUG_LEAKCHECK 0
+#define VG_DEBUG_CLIQUE	   0
 
 #define ROUNDDN(p, a)	((Addr)(p) & ~((a)-1))
 #define ROUNDUP(p, a)	ROUNDDN((p)+(a)-1, (a))
@@ -103,7 +104,7 @@ typedef
    shadows[i].  Return -1 if none found.  This assumes that shadows[]
    has been sorted on the ->data field. */
 
-#ifdef VG_DEBUG_LEAKCHECK
+#if VG_DEBUG_LEAKCHECK
 /* Used to sanity-check the fast binary-search mechanism. */
 static 
 Int find_shadow_for_OLD ( Addr        ptr, 
@@ -158,7 +159,7 @@ Int find_shadow_for ( Addr        ptr, 
       break;
    }
 
-#  ifdef VG_DEBUG_LEAKCHECK
+#  if VG_DEBUG_LEAKCHECK
    sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
 #  endif
    /* VG_(printf)("%d\n", retVal); */
@@ -176,20 +177,27 @@ static Addr         lc_max_mallocd_addr;
 static Bool	  (*lc_is_valid_chunk)  (UInt chunk);
 static Bool	  (*lc_is_valid_address)(Addr addr);
 
-/* Used for printing leak errors, avoids exposing the LossRecord type (which
-   comes in as void*, requiring a cast. */
-void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
+static const Char *pp_lossmode(Reachedness lossmode)
 {
-   LossRecord* l = (LossRecord*)vl;
    const Char *loss = "?";
 
-   switch(l->loss_mode) {
+   switch(lossmode) {
    case Unreached:	loss = "definitely lost"; break;
    case IndirectLeak:	loss = "indirectly lost"; break;
    case Interior:	loss = "possibly lost"; break;
    case Proper:		loss = "still reachable"; break;
    }
 
+   return loss;
+}
+
+/* Used for printing leak errors, avoids exposing the LossRecord type (which
+   comes in as void*, requiring a cast. */
+void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records)
+{
+   LossRecord* l = (LossRecord*)vl;
+   const Char *loss = pp_lossmode(l->loss_mode);
+
    VG_(message)(Vg_UserMsg, "");
    VG_(message)(Vg_UserMsg, 
                 "%d+%d bytes in %d blocks are %s in loss record %d of %d",
@@ -212,8 +220,9 @@ static Int lc_compar(void* n1, void* n2)
 }
 
 /* If ptr is pointing to a heap-allocated block which hasn't been seen
-   before, push it onto the mark stack. */
-static void _lc_markstack_push(Addr ptr, Bool mopup)
+   before, push it onto the mark stack.  Clique is the index of the
+   clique leader; -1 if none. */
+static void _lc_markstack_push(Addr ptr, Int clique)
 {
    Int sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
 
@@ -233,13 +242,37 @@ static void _lc_markstack_push(Addr ptr,
       lc_markstack_top = sh_no;
    }
 
-   if (mopup) {
+   if (clique != -1) {
       if (0)
 	 VG_(printf)("mopup: %d: %p is %d\n", 
 		     sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
 
-      if (lc_markstack[sh_no].state == Unreached)
+      /* An unmarked block - add it to the clique.  Add its size to
+	 the clique-leader's indirect size.  If the new block was
+	 itself a clique leader, it isn't any more, so add its
+	 indirect to the new clique leader.
+
+	 If this block *is* the clique leader, it means this is a
+	 cyclic structure, so none of this applies. */
+      if (lc_markstack[sh_no].state == Unreached) {
 	 lc_markstack[sh_no].state = IndirectLeak;
+
+	 if (sh_no != clique) {
+	    if (VG_DEBUG_CLIQUE) {
+	       if (lc_markstack[sh_no].indirect)
+		  VG_(printf)("  clique %d joining clique %d adding %d+%d bytes\n", 
+			      sh_no, clique, 
+			      lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
+	       else
+		  VG_(printf)("  %d joining %d adding %d\n", 
+			      sh_no, clique, lc_shadows[sh_no]->size);
+	    }
+
+	    lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
+	    lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
+	    lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
+	 }
+      }
    } else if (ptr == lc_shadows[sh_no]->data) {
       lc_markstack[sh_no].state = Proper;
    } else {
@@ -250,7 +283,7 @@ static void _lc_markstack_push(Addr ptr,
 
 static void lc_markstack_push(Addr ptr)
 {
-   _lc_markstack_push(ptr, False);
+   _lc_markstack_push(ptr, -1);
 }
 
 /* Return the top of the mark stack, if any. */
@@ -267,8 +300,11 @@ static Int lc_markstack_pop(void)
 }
 
 /* Scan a block of memory between [start, start+len).  This range may
-   be bogus, inaccessable, or otherwise strange; we deal with it. */
-static void _lc_scan_memory(Addr start, SizeT len, Bool mopup)
+   be bogus, inaccessable, or otherwise strange; we deal with it.
+
+   If clique != -1, it means we're gathering leaked memory into
+   cliques, and clique is the index of the current clique leader. */
+static void _lc_scan_memory(Addr start, SizeT len, Int clique)
 {
    Addr ptr = ROUNDUP(start, sizeof(Addr));
    Addr end = ROUNDDN(start+len, sizeof(Addr));
@@ -300,7 +336,7 @@ static void _lc_scan_memory(Addr start, 
       if (__builtin_setjmp(memscan_jmpbuf) == 0) {
 	 if ((*lc_is_valid_address)(ptr)) {
 	    addr = *(Addr *)ptr;
-	    _lc_markstack_push(addr, mopup);
+	    _lc_markstack_push(addr, clique);
 	 }
 	 ptr += sizeof(Addr);
       } else {
@@ -318,13 +354,13 @@ static void _lc_scan_memory(Addr start, 
 
 static void lc_scan_memory(Addr start, SizeT len)
 {
-   _lc_scan_memory(start, len, False);
+   _lc_scan_memory(start, len, -1);
 }
 
 /* Process the mark stack until empty.  If mopup is true, then we're
    actually gathering leaked blocks, so they should be marked
    IndirectLeak. */
-static SizeT lc_do_leakcheck(Bool mopup)
+static SizeT lc_do_leakcheck(Int clique)
 {
    Int scanned = 0;
    Int top;
@@ -335,7 +371,7 @@ static SizeT lc_do_leakcheck(Bool mopup)
 
       scanned += lc_shadows[top]->size;
 
-      _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, mopup);
+      _lc_scan_memory(lc_shadows[top]->data, lc_shadows[top]->size, clique);
    }
 
    return scanned;
@@ -422,33 +458,39 @@ void MAC_(do_detect_memory_leaks) (
    VG_(mark_from_registers)(lc_markstack_push);
 
    /* Keep walking the heap until everything is found */
-   bytes_notified = lc_do_leakcheck(False);
+   bytes_notified = lc_do_leakcheck(-1);
 
    if (VG_(clo_verbosity) > 0)
       VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
 
-   /* Go through and find the heads of lost data-structures; we don't
-      want to report every single lost block individually. */
+   /* Go through and group lost structures into cliques.  For each
+      Unreached block, push it onto the mark stack, and find all the
+      blocks linked to it.  These are marked IndirectLeak, and their
+      size is added to the clique leader's indirect size.  If one of
+      the found blocks was itself a clique leader (from a previous
+      pass), then the cliques are merged. */
    for (i = 0; i < lc_n_shadows; i++) {
-      SizeT indirect = 0;
-
+      if (VG_DEBUG_CLIQUE)
+	 VG_(printf)("cliques: %d at %p -> %s\n",
+		     i, lc_shadows[i]->data, pp_lossmode(lc_markstack[i].state));
       if (lc_markstack[i].state != Unreached)
 	 continue;
 
       sk_assert(lc_markstack_top == -1);
 
-      if (0)
-	 VG_(printf)("%d: mopping up from %p\n", i, lc_shadows[i]->data);
-
-      _lc_markstack_push(lc_shadows[i]->data, True);
+      if (VG_DEBUG_CLIQUE)
+	 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
+      
+      _lc_markstack_push(lc_shadows[i]->data, i);
 
-      indirect = lc_do_leakcheck(True);
+      lc_do_leakcheck(i);
 
       sk_assert(lc_markstack_top == -1);
       sk_assert(lc_markstack[i].state == IndirectLeak);
 
-      lc_markstack[i].state = Unreached; /* return to unreached state */
-      lc_markstack[i].indirect = indirect;
+      lc_markstack[i].state = Unreached; /* Return to unreached state,
+					    to indicate its a clique
+					    leader */
    }
       
    /* Common up the lost blocks so we can print sensible error messages. */
@@ -495,7 +537,7 @@ void MAC_(do_detect_memory_leaks) (
       UInt        n_min = 0xFFFFFFFF;
       for (p = errlist; p != NULL; p = p->next) {
          if (p->num_blocks > 0 && p->total_bytes < n_min) {
-            n_min = p->total_bytes;
+            n_min = p->total_bytes + p->indirect_bytes;
             p_min = p;
          }
       }
@@ -504,10 +546,11 @@ void MAC_(do_detect_memory_leaks) (
       /* Ok to have tst==NULL;  it's only used if --gdb-attach=yes, and
          we disallow that when --leak-check=yes.  
          
-         Prints the error if not suppressed, unless it's reachable (Proper)
+         Prints the error if not suppressed, unless it's reachable (Proper or IndirectLeak)
          and --show-reachable=no */
 
-      print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
+      print_record = ( MAC_(clo_show_reachable) || 
+		       Unreached == p_min->loss_mode || Interior == p_min->loss_mode );
       is_suppressed = 
          VG_(unique_error) ( VG_(get_VCPU_tid)(), LeakErr, (UInt)i+1,
                              (Char*)n_lossrecords, (void*) p_min,
