Index: docs/xml/manual-core.xml
===================================================================
--- docs/xml/manual-core.xml (revision 12798)
+++ docs/xml/manual-core.xml (working copy)
@@ -1411,8 +1411,8 @@
For tools that use their own version of
-malloc (e.g. Memcheck and
-Massif), the following options apply.
+malloc (e.g. Memcheck,
+Massif, Helgrind, DRD), the following options apply.
@@ -1431,6 +1431,25 @@
+
+
+
+
+
+ Valgrind's malloc, realloc, etc, add padding
+ blocks before and after each block allocated for the client. Such padding
+ blocks are called redzones.
+ The default value for the redzone size depends on the tool.
+ For example, Memcheck adds and protects 16 bytes before and after
+ each block allocated by the client to detect block overrun or underrun.
+
+ Increasing the redzone size allows to detect more cases of
+ blocks overrun or underrun. Decreasing the redzone size will
+ reduce the memory needed by Valgrind but reduces the chance to
+ detect block overrun/underrun.
+
+
+
@@ -2156,7 +2175,7 @@
If you get an assertion failure
in m_mallocfree.c, this may have happened because
your program wrote off the end of a heap block, or before its
-beginning, thus corrupting head metadata. Valgrind hopefully will have
+beginning, thus corrupting heap metadata. Valgrind hopefully will have
emitted a message to that effect before dying in this way.Read the for more advice about common problems,
Index: helgrind/hg_main.c
===================================================================
--- helgrind/hg_main.c (revision 12798)
+++ helgrind/hg_main.c (working copy)
@@ -102,7 +102,7 @@
static void all__sanity_check ( Char* who ); /* fwds */
-#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
+#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
// 0 for none, 1 for dump at end of run
#define SHOW_DATA_STRUCTURES 0
@@ -5159,7 +5159,7 @@
hg_cli____builtin_vec_delete,
hg_cli__realloc,
hg_cli_malloc_usable_size,
- HG_CLI__MALLOC_REDZONE_SZB );
+ HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
/* 21 Dec 08: disabled this; it mostly causes H to start more
slowly and use significantly more memory, without very often
Index: memcheck/tests/clo_redzone_128.vgtest
===================================================================
--- memcheck/tests/clo_redzone_128.vgtest (revision 0)
+++ memcheck/tests/clo_redzone_128.vgtest (revision 0)
@@ -0,0 +1,2 @@
+vgopts: --leak-check=no -q --client-redzone=128
+prog: clo_redzone
Index: memcheck/tests/clo_redzone.c
===================================================================
--- memcheck/tests/clo_redzone.c (revision 0)
+++ memcheck/tests/clo_redzone.c (revision 0)
@@ -0,0 +1,17 @@
+#include
+#include
+int main()
+{
+ __attribute__((unused)) char *p = malloc (1);
+ char *b1 = malloc (128);
+ char *b2 = malloc (128);
+ fprintf (stderr, "b1 %p b2 %p\n", b1, b2);
+
+ // Try to land in b2 from b1, causing no error
+ // with the default client-redzone, but having
+ // an error with a bigger client-redzone.
+ // We need to choose a value which lands in b2
+ // on 32 bits and 64 bits.
+ b1[127 + 70] = 'a';
+ return 0;
+}
Index: memcheck/tests/clo_redzone_default.stderr.exp
===================================================================
--- memcheck/tests/clo_redzone_default.stderr.exp (revision 0)
+++ memcheck/tests/clo_redzone_default.stderr.exp (revision 0)
@@ -0,0 +1 @@
+b1 0x........ b2 0x........
Index: memcheck/tests/clo_redzone_128.stderr.exp
===================================================================
--- memcheck/tests/clo_redzone_128.stderr.exp (revision 0)
+++ memcheck/tests/clo_redzone_128.stderr.exp (revision 0)
@@ -0,0 +1,7 @@
+b1 0x........ b2 0x........
+Invalid write of size 1
+ ...
+ Address 0x........ is 69 bytes after a block of size 128 alloc'd
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ ...
+
Index: memcheck/tests/Makefile.am
===================================================================
--- memcheck/tests/Makefile.am (revision 12798)
+++ memcheck/tests/Makefile.am (working copy)
@@ -74,6 +74,8 @@
clientperm.stdout.exp clientperm.vgtest \
clireq_nofill.stderr.exp \
clireq_nofill.stdout.exp clireq_nofill.vgtest \
+ clo_redzone_default.vgtest clo_redzone_128.vgtest \
+ clo_redzone_default.stderr.exp clo_redzone_128.stderr.exp \
custom_alloc.stderr.exp custom_alloc.vgtest custom_alloc.stderr.exp-s390x-mvc \
custom-overlap.stderr.exp custom-overlap.vgtest \
deep-backtrace.vgtest deep-backtrace.stderr.exp \
@@ -240,6 +242,7 @@
calloc-overflow \
clientperm \
clireq_nofill \
+ clo_redzone \
custom_alloc \
custom-overlap \
deep-backtrace \
Index: memcheck/tests/clo_redzone_default.vgtest
===================================================================
--- memcheck/tests/clo_redzone_default.vgtest (revision 0)
+++ memcheck/tests/clo_redzone_default.vgtest (revision 0)
@@ -0,0 +1,2 @@
+vgopts: --leak-check=no -q
+prog: clo_redzone
Index: memcheck/mc_main.c
===================================================================
--- memcheck/mc_main.c (revision 12798)
+++ memcheck/mc_main.c (working copy)
@@ -6335,7 +6335,8 @@
MC_(__builtin_vec_delete),
MC_(realloc),
MC_(malloc_usable_size),
- MC_MALLOC_REDZONE_SZB );
+ MC_MALLOC_DEFAULT_REDZONE_SZB );
+ MC_(Malloc_Redzone_Szb) = VG_(malloc_effective_client_redzone)();
VG_(needs_xml_output) ();
Index: memcheck/mc_include.h
===================================================================
--- memcheck/mc_include.h (revision 12798)
+++ memcheck/mc_include.h (working copy)
@@ -42,8 +42,12 @@
/*--- Tracking the heap ---*/
/*------------------------------------------------------------*/
-/* We want at least a 16B redzone on client heap blocks for Memcheck */
-#define MC_MALLOC_REDZONE_SZB 16
+/* By default, we want at least a 16B redzone on client heap blocks
+ for Memcheck.
+ The default can be modified by --client-redzone. */
+#define MC_MALLOC_DEFAULT_REDZONE_SZB 16
+// effective redzone, as (possibly) modified by --client-redzone:
+extern SizeT MC_(Malloc_Redzone_Szb);
/* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
typedef
Index: memcheck/mc_malloc_wrappers.c
===================================================================
--- memcheck/mc_malloc_wrappers.c (revision 12798)
+++ memcheck/mc_malloc_wrappers.c (working copy)
@@ -63,6 +63,8 @@
/*--- Tracking malloc'd and free'd blocks ---*/
/*------------------------------------------------------------*/
+SizeT MC_(Malloc_Redzone_Szb) = -10000000; // If used before set, should BOMB
+
/* Record malloc'd blocks. */
VgHashTable MC_(malloc_list) = NULL;
@@ -174,7 +176,7 @@
mc = freed_list_start[i];
while (mc) {
if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
- MC_MALLOC_REDZONE_SZB ))
+ MC_(Malloc_Redzone_Szb) ))
return mc;
mc = mc->next;
}
@@ -387,19 +389,19 @@
void MC_(free) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
+ tid, (Addr)p, MC_(Malloc_Redzone_Szb), MC_AllocMalloc );
}
void MC_(__builtin_delete) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
+ tid, (Addr)p, MC_(Malloc_Redzone_Szb), MC_AllocNew);
}
void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
+ tid, (Addr)p, MC_(Malloc_Redzone_Szb), MC_AllocNewVec);
}
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
@@ -454,10 +456,10 @@
tl_assert(ec);
/* Retained part is copied, red zones set as normal */
- MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_Szb),
+ MC_(Malloc_Redzone_Szb) );
MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
- MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_Szb) );
/* Copy from old to new */
VG_(memcpy)((void*)a_new, p_old, new_szB);
@@ -472,7 +474,7 @@
/* Nb: we have to allocate a new MC_Chunk for the new memory rather
than recycling the old one, so that any erroneous accesses to the
old memory are reported. */
- die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+ die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_Szb) );
// Allocate a new chunk.
mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
@@ -497,12 +499,12 @@
tl_assert(VG_(is_plausible_ECU)(ecu));
/* First half kept and copied, second half new, red zones as normal */
- MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_Szb),
+ MC_(Malloc_Redzone_Szb) );
MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
ecu | MC_OKIND_HEAP );
- MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_Szb) );
/* Possibly fill new area with specified junk */
if (MC_(clo_malloc_fill) != -1) {
@@ -525,7 +527,7 @@
/* Nb: we have to allocate a new MC_Chunk for the new memory rather
than recycling the old one, so that any erroneous accesses to the
old memory are reported. */
- die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+ die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_Szb) );
// Allocate a new chunk.
mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
Index: memcheck/mc_errors.c
===================================================================
--- memcheck/mc_errors.c (revision 12798)
+++ memcheck/mc_errors.c (working copy)
@@ -1082,7 +1082,7 @@
Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
{
return VG_(addr_is_in_block)( a, mc->data, mc->szB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(Malloc_Redzone_Szb) );
}
static
Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
Index: NEWS
===================================================================
--- NEWS (revision 12798)
+++ NEWS (working copy)
@@ -61,6 +61,12 @@
* ==================== OTHER CHANGES ====================
+* For tools replacing the malloc library (e.g. Memcheck, Helgrind, ...),
+ the option --client-redzone= allows to control the padding
+ blocks (redzones) added before and after each client allocated block.
+ Smaller redzones decrease the memory needed by Valgrind. Bigger
+ redzones increase the chance to detect blocks overrun or underrun.
+
* The C++ demangler has been updated so as to work well with C++
compiled by up to at least g++ 4.6.
Index: include/pub_tool_mallocfree.h
===================================================================
--- include/pub_tool_mallocfree.h (revision 12798)
+++ include/pub_tool_mallocfree.h (working copy)
@@ -47,6 +47,14 @@
// possibly some more due to rounding up.
extern SizeT VG_(malloc_usable_size)( void* p );
+// If tool is replacing malloc for the client, the below returns
+// the effective client redzone as derived from the default
+// provided by the tool, VG_(clo_client_redzone) and the minimum
+// redzone required by m_mallocfree.c.
+// It is an error to call this before VG_(needs_malloc_replacement) has
+// been called.
+extern SizeT VG_(malloc_effective_client_redzone)(void);
+
// TODO: move somewhere else
// Call here to bomb the system when out of memory (mmap anon fails)
__attribute__((noreturn))
Index: none/tests/cmdline2.stdout.exp
===================================================================
--- none/tests/cmdline2.stdout.exp (revision 12798)
+++ none/tests/cmdline2.stdout.exp (working copy)
@@ -51,6 +51,8 @@
user options for Valgrind tools that replace malloc:
--alignment= set minimum alignment of heap allocations [...]
+ --client-redzone= set minimum size of redzones added before/after
+ heap blocks (in bytes). [default depends on the tool]
uncommon user options for all Valgrind tools:
--fullpath-after= (with nothing after the '=')
@@ -106,6 +108,8 @@
--trace-redir=no|yes show redirection details? [no]
--trace-sched=no|yes show thread scheduler details? [no]
--profile-heap=no|yes profile Valgrind's own space use
+ --core-redzone= set minimum size of redzones added before/after
+ heap blocks allocated for Valgrind internal use (in bytes) [4]
--wait-for-gdb=yes|no pause on startup to wait for gdb attach
--sym-offsets=yes|no show syms in form 'name+offset' ? [no]
--command-line-only=no|yes only use command line options [no]
Index: none/tests/cmdline1.stdout.exp
===================================================================
--- none/tests/cmdline1.stdout.exp (revision 12798)
+++ none/tests/cmdline1.stdout.exp (working copy)
@@ -51,6 +51,8 @@
user options for Valgrind tools that replace malloc:
--alignment= set minimum alignment of heap allocations [...]
+ --client-redzone= set minimum size of redzones added before/after
+ heap blocks (in bytes). [default depends on the tool]
uncommon user options for all Valgrind tools:
--fullpath-after= (with nothing after the '=')
Index: coregrind/m_mallocfree.c
===================================================================
--- coregrind/m_mallocfree.c (revision 12798)
+++ coregrind/m_mallocfree.c (working copy)
@@ -269,6 +269,10 @@
return bszB & (~SIZE_T_0x1);
}
+// Forward definition.
+static
+void ensure_mm_init ( ArenaId aid );
+
// return either 0 or sizeof(ULong) depending on whether or not
// heap profiling is engaged
#define hp_overhead_szB() set_at_init_hp_overhead_szB
@@ -491,6 +495,19 @@
return & vg_arena[arena];
}
+SizeT VG_(malloc_effective_client_redzone)(void)
+{
+ SizeT result;
+ ensure_mm_init (VG_AR_CLIENT);
+ vg_assert(VG_(needs).malloc_replacement);
+ if (VG_(clo_client_redzone) == -1)
+ result = VG_(tdict).tool_client_redzone_szB;
+ else
+ result = VG_(clo_client_redzone);
+ if (result < sizeof(void*)) result = sizeof(void*);
+ return result;
+}
+
// Initialise an arena. rz_szB is the minimum redzone size; it might be
// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
static
@@ -503,9 +520,9 @@
// Ensure redzones are a reasonable size. They must always be at least
// the size of a pointer, for holding the prev/next pointer (see the layout
// details at the top of this file).
- vg_assert(rz_szB < 128);
+ vg_assert(rz_szB <= MAX_REDZONE_SZB);
if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
-
+
vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
a->name = name;
a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
@@ -615,8 +632,7 @@
// Check and set the client arena redzone size
if (VG_(needs).malloc_replacement) {
client_rz_szB = VG_(tdict).tool_client_redzone_szB;
- // 128 is no special figure, just something not too big
- if (client_rz_szB > 128) {
+ if (client_rz_szB > MAX_REDZONE_SZB) {
VG_(printf)( "\nTool error:\n"
" specified redzone size is too big (%llu)\n",
(ULong)client_rz_szB);
@@ -629,7 +645,9 @@
ar_client_sbszB = 4194304;
// superblocks with a size > ar_client_sbszB will be unsplittable
// (unless used for providing memalign-ed blocks).
- arena_init ( VG_AR_CLIENT, "client", client_rz_szB,
+ arena_init ( VG_AR_CLIENT, "client",
+ (VG_(clo_client_redzone) == -1 ? // not overriden ?
+ client_rz_szB : VG_(clo_client_redzone)),
ar_client_sbszB, ar_client_sbszB+1);
client_inited = True;
@@ -641,13 +659,20 @@
VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
// Initialise the non-client arenas
// Similarly to client arena, big allocations will be unsplittable.
- arena_init ( VG_AR_CORE, "core", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_TOOL, "tool", 4, 4194304, 4194304+1 );
- arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536, 65536+1 );
- arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_ERRORS, "errors", 4, 65536, 65536+1 );
- arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536, 65536+1 );
+ arena_init ( VG_AR_CORE, "core",
+ VG_(clo_core_redzone), 1048576, 1048576+1 );
+ arena_init ( VG_AR_TOOL, "tool",
+ VG_(clo_core_redzone), 4194304, 4194304+1 );
+ arena_init ( VG_AR_DINFO, "dinfo",
+ VG_(clo_core_redzone), 1048576, 1048576+1 );
+ arena_init ( VG_AR_DEMANGLE, "demangle",
+ VG_(clo_core_redzone), 65536, 65536+1 );
+ arena_init ( VG_AR_EXECTXT, "exectxt",
+ VG_(clo_core_redzone), 1048576, 1048576+1 );
+ arena_init ( VG_AR_ERRORS, "errors",
+ VG_(clo_core_redzone), 65536, 65536+1 );
+ arena_init ( VG_AR_TTAUX, "ttaux",
+ VG_(clo_core_redzone), 65536, 65536+1 );
nonclient_inited = True;
}
Index: coregrind/m_main.c
===================================================================
--- coregrind/m_main.c (revision 12798)
+++ coregrind/m_main.c (working copy)
@@ -162,6 +162,8 @@
"\n"
" user options for Valgrind tools that replace malloc:\n"
" --alignment= set minimum alignment of heap allocations [%ld]\n"
+" --client-redzone= set minimum size of redzones added before/after\n"
+" heap blocks (in bytes). [default depends on the tool]\n"
"\n"
" uncommon user options for all Valgrind tools:\n"
" --fullpath-after= (with nothing after the '=')\n"
@@ -217,6 +219,8 @@
" --trace-redir=no|yes show redirection details? [no]\n"
" --trace-sched=no|yes show thread scheduler details? [no]\n"
" --profile-heap=no|yes profile Valgrind's own space use\n"
+" --core-redzone= set minimum size of redzones added before/after\n"
+" heap blocks allocated for Valgrind internal use (in bytes) [4]\n"
" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
" --sym-offsets=yes|no show syms in form 'name+offset' ? [no]\n"
" --command-line-only=no|yes only use command line options [no]\n"
@@ -467,6 +471,8 @@
else if VG_STREQN(16, arg, "--main-stacksize") {}
else if VG_STREQN(11, arg, "--sim-hints") {}
else if VG_STREQN(14, arg, "--profile-heap") {}
+ else if VG_STREQN(14, arg, "--core-redzone") {}
+ else if VG_STREQN(14, arg, "--client-redzone") {}
// These options are new.
else if (VG_STREQ(arg, "-v") ||
@@ -1523,14 +1529,18 @@
//--------------------------------------------------------------
/* Start the debugging-log system ASAP. First find out how many
"-d"s were specified. This is a pre-scan of the command line. Also
- get --profile-heap=yes which is needed by the time we start up dynamic
- memory management. */
+ get --profile-heap=yes, --core-redzone, --client-redzone which are
+ needed by the time we start up dynamic memory management. */
loglevel = 0;
for (i = 1; i < argc; i++) {
if (argv[i][0] != '-') break;
if VG_STREQ(argv[i], "--") break;
if VG_STREQ(argv[i], "-d") loglevel++;
if VG_BOOL_CLO(argv[i], "--profile-heap", VG_(clo_profile_heap)) {}
+ if VG_BINT_CLO(argv[i], "--core-redzone", VG_(clo_core_redzone),
+ 0, MAX_REDZONE_SZB) {}
+ if VG_BINT_CLO(argv[i], "--client-redzone", VG_(clo_client_redzone),
+ 0, MAX_REDZONE_SZB) {}
}
/* ... and start the debug logger. Now we can safely emit logging
@@ -1590,7 +1600,7 @@
//--------------------------------------------------------------
// Start up the dynamic memory manager
// p: address space management
- // p: getting --profile-heap
+ // p: getting --profile-heap,--core-redzone,--client-redzone
// In fact m_mallocfree is self-initialising, so there's no
// initialisation call to do. Instead, try a simple malloc/
// free pair right now to check that nothing is broken.
Index: coregrind/m_options.c
===================================================================
--- coregrind/m_options.c (revision 12798)
+++ coregrind/m_options.c (working copy)
@@ -96,6 +96,10 @@
VG_(clo_fair_sched) = disable_fair_sched;
Bool VG_(clo_trace_sched) = False;
Bool VG_(clo_profile_heap) = False;
+Int VG_(clo_core_redzone) = 4;
+// VG_(clo_client_redzone) != -1 means to override the value
+// provided by tool in VG_(needs_malloc_replacement).tool_client_redzone_szB
+Int VG_(clo_client_redzone) = -1;
Int VG_(clo_dump_error) = 0;
Int VG_(clo_backtrace_size) = 12;
Char* VG_(clo_sim_hints) = NULL;
Index: coregrind/pub_core_options.h
===================================================================
--- coregrind/pub_core_options.h (revision 12798)
+++ coregrind/pub_core_options.h (working copy)
@@ -159,6 +159,12 @@
extern Bool VG_(clo_trace_sched);
/* DEBUG: do heap profiling? default: NO */
extern Bool VG_(clo_profile_heap);
+#define MAX_REDZONE_SZB 128
+// 128 is no special figure, just something not too big
+extern Int VG_(clo_core_redzone);
+// VG_(clo_client_redzone) has default value -1, indicating to keep
+// the tool provided value.
+extern Int VG_(clo_client_redzone);
/* DEBUG: display gory details for the k'th most popular error.
default: Infinity. */
extern Int VG_(clo_dump_error);