|
From: <sv...@va...> - 2017-01-11 22:13:58
|
Author: philippe
Date: Wed Jan 11 22:13:52 2017
New Revision: 16199
Log:
Do not capture the free stacktrace in memcheck, unless we have
either to keep the free stacktrace and/or to compute full xtree memory.
Also, properly compute avg nr of IP per execontext: the avg must
be computed using the real nr of execontext stored, not the hash
table size.
Modified:
trunk/coregrind/m_execontext.c
trunk/memcheck/mc_malloc_wrappers.c
Modified: trunk/coregrind/m_execontext.c
==============================================================================
--- trunk/coregrind/m_execontext.c (original)
+++ trunk/coregrind/m_execontext.c Wed Jan 11 22:13:52 2017
@@ -186,7 +186,7 @@
" exectx: %'lu lists, %'llu contexts (avg %3.2f per list)"
" (avg %3.2f IP per context)\n",
ec_htab_size, ec_totstored, (Double)ec_totstored / (Double)ec_htab_size,
- (Double)total_n_ips / (Double)ec_htab_size
+ (Double)total_n_ips / (Double)ec_totstored
);
VG_(message)(Vg_DebugMsg,
" exectx: %'llu searches, %'llu full compares (%'llu per 1000)\n",
Modified: trunk/memcheck/mc_malloc_wrappers.c
==============================================================================
--- trunk/memcheck/mc_malloc_wrappers.c (original)
+++ trunk/memcheck/mc_malloc_wrappers.c Wed Jan 11 22:13:52 2017
@@ -318,7 +318,11 @@
switch (MC_(clo_keep_stacktraces)) {
case KS_none: return;
- case KS_alloc: pos = -1; break;
+ case KS_alloc:
+ if (LIKELY(VG_(clo_xtree_memory)
+ != Vg_XTMemory_Full))
+ return;
+ pos = -1; break;
case KS_free: pos = 0; break;
case KS_alloc_then_free: pos = 0; break;
case KS_alloc_and_free: pos = 1; break;
@@ -332,7 +336,7 @@
ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
- if (pos >= 0)
+ if (LIKELY(pos >= 0))
mc->where[pos] = ec_free;
}
|