|
From: <sv...@va...> - 2005-05-10 02:47:29
|
Author: njn
Date: 2005-05-10 03:47:21 +0100 (Tue, 10 May 2005)
New Revision: 3648
Added:
trunk/coregrind/m_mallocfree.c
trunk/coregrind/m_replacemalloc/
trunk/coregrind/m_replacemalloc/Makefile.am
trunk/coregrind/m_replacemalloc/README_REPLACEMALLOC.txt
trunk/coregrind/m_replacemalloc/replacemalloc_core.c
trunk/coregrind/m_replacemalloc/vg_replace_malloc.c
trunk/coregrind/pub_core_mallocfree.h
trunk/coregrind/pub_core_replacemalloc.h
trunk/include/pub_tool_mallocfree.h
trunk/include/pub_tool_replacemalloc.h
Removed:
trunk/coregrind/vg_malloc2.c
trunk/coregrind/vg_replace_malloc.c
Modified:
trunk/Makefile.tool.am
trunk/addrcheck/Makefile.am
trunk/cachegrind/cg_main.c
trunk/configure.in
trunk/coregrind/Makefile.am
trunk/coregrind/README_MODULES.txt
trunk/coregrind/core.h
trunk/coregrind/vg_pthreadmodel.c
trunk/coregrind/vg_scheduler.c
trunk/helgrind/Makefile.am
trunk/helgrind/hg_main.c
trunk/include/Makefile.am
trunk/include/tool.h
trunk/massif/Makefile.am
trunk/massif/ms_main.c
trunk/memcheck/Makefile.am
trunk/memcheck/mac_shared.c
trunk/memcheck/mac_shared.h
trunk/memcheck/mc_main.c
Log:
Modularised the malloc/free stuff into two modules: m_mallocfree for the
malloc/free implementation, and m_replacemalloc with the stuff for the to=
ols
that replace malloc with their own version. Previously these two areas o=
f
functionality were mixed up somewhat.
Modified: trunk/Makefile.tool.am
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/Makefile.tool.am 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/Makefile.tool.am 2005-05-10 02:47:21 UTC (rev 3648)
@@ -4,3 +4,6 @@
include $(top_srcdir)/Makefile.all.am
include $(top_srcdir)/Makefile.tool-flags.am
include $(top_srcdir)/Makefile.tool-inplace.am
+
+LIBREPLACEMALLOC =3D $(top_builddir)/coregrind/m_replacemalloc/libreplac=
emalloc_toolpreload.a
+
Modified: trunk/addrcheck/Makefile.am
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/addrcheck/Makefile.am 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/addrcheck/Makefile.am 2005-05-10 02:47:21 UTC (rev 3648)
@@ -16,10 +16,10 @@
vgpreload_addrcheck_so_LDADD =3D \
../memcheck/mac_replace_strmem.o
vgpreload_addrcheck_so_DEPENDENCIES =3D \
- $(top_builddir)/coregrind/lib_replace_malloc.a \
+ $(LIBREPLACEMALLOC) \
../memcheck/mac_replace_strmem.o
vgpreload_addrcheck_so_LDFLAGS =3D -shared -Wl,-z,interpose,-z,initfirst=
\
-Wl,--whole-archive \
- $(top_builddir)/coregrind/lib_replace_malloc.a \
+ $(LIBREPLACEMALLOC) \
-Wl,--no-whole-archive
=20
Modified: trunk/cachegrind/cg_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/cachegrind/cg_main.c 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/cachegrind/cg_main.c 2005-05-10 02:47:21 UTC (rev 3648)
@@ -30,7 +30,7 @@
*/
=20
#include "tool.h"
-//#include "vg_profile.c"
+#include "pub_tool_mallocfree.h"
=20
#include "cg_arch.h"
#include "cg_sim.c"
Modified: trunk/configure.in
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/configure.in 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/configure.in 2005-05-10 02:47:21 UTC (rev 3648)
@@ -395,6 +395,7 @@
coregrind/Makefile=20
coregrind/demangle/Makefile=20
coregrind/m_aspacemgr/Makefile=20
+ coregrind/m_replacemalloc/Makefile=20
coregrind/m_sigframe/Makefile=20
coregrind/m_syscalls/Makefile=20
coregrind/amd64/Makefile
Modified: trunk/coregrind/Makefile.am
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/Makefile.am 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/coregrind/Makefile.am 2005-05-10 02:47:21 UTC (rev 3648)
@@ -4,6 +4,7 @@
MODULES =3D \
demangle \
m_aspacemgr \
+ m_replacemalloc \
m_sigframe \
m_syscalls
=20
@@ -33,8 +34,6 @@
stage2 \
vg_inject.so
=20
-noinst_LIBRARIES =3D lib_replace_malloc.a
-
noinst_HEADERS =3D \
core.h \
core_asm.h \
@@ -42,6 +41,8 @@
pub_core_debuglog.h \
pub_core_errormgr.h \
pub_core_execontext.h \
+ pub_core_mallocfree.h \
+ pub_core_replacemalloc.h\
pub_core_sigframe.h \
pub_core_stacktrace.h \
pub_core_syscalls.h \
@@ -66,6 +67,7 @@
stage2_SOURCES =3D \
m_errormgr.c \
m_execontext.c \
+ m_mallocfree.c \
m_stacktrace.c \
m_debuglog.c \
ume.c \
@@ -73,9 +75,7 @@
vg_scheduler.c \
vg_demangle.c \
vg_hashtable.c \
- vg_replace_malloc.c \
vg_main.c \
- vg_malloc2.c \
vg_messages.c \
vg_mylibc.c \
vg_needs.c \
@@ -103,29 +103,42 @@
${VG_OS}/libos.a \
@VEX_DIR@/libvex.a
=20
-## Test repeated in both arms of the if-then-else because older versions=
of
-## automake don't seem to like having +=3D within an if-then-else.
+## These ones must be linked in with the --whole-archive flag, because t=
hey
+## wouldn't get pulled into stage otherwise (because they contain symbol=
s
+## only referred to by tool shared objects).
+stage2_extra2 =3D \
+ m_replacemalloc/libreplacemalloc_core.a
+=09
+## Nb: older versions of automake don't seem to like having +=3D within =
an
+## if-then-else, so we have to use these variables for the common parts.
+st2_DEPS_common =3D \
+ $(srcdir)/valgrind.vs \
+ $(stage2_extra) \
+ $(stage2_extra2)
+
+st2_LDFLAGS_common =3D \
+ -Wl,--export-dynamic -g \
+ -Wl,-version-script $(srcdir)/valgrind.vs
+
if USE_PIE
stage2_CFLAGS =3D $(AM_CFLAGS) -fpie
-stage2_DEPENDENCIES =3D \
- $(srcdir)/valgrind.vs \
- $(stage2_extra)
-stage2_LDFLAGS =3D -Wl,--export-dynamic -g \
- -Wl,-version-script $(srcdir)/valgrind.vs \
+stage2_DEPENDENCIES =3D $(st2_DEPS_common)
+stage2_LDFLAGS =3D \
+ $(st2_LDFLAGS_common) \
+ -Wl,--whole-archive $(stage2_extra2) -Wl,--no-whole-archive \
-pie
else
stage2_CFLAGS =3D $(AM_CFLAGS)
-stage2_DEPENDENCIES =3D \
- $(srcdir)/valgrind.vs ${VG_ARCH}/stage2.lds \
- $(stage2_extra)
-stage2_LDFLAGS =3D -Wl,--export-dynamic -g \
- -Wl,-version-script $(srcdir)/valgrind.vs \
+stage2_DEPENDENCIES =3D $(st2_DEPS_common) ${VG_ARCH}/stage2.lds
+stage2_LDFLAGS =3D \
+ $(st2_LDFLAGS_common) \
+ -Wl,--whole-archive $(stage2_extra2) -Wl,--no-whole-archive \
-Wl,-defsym,kickstart_base=3D@KICKSTART_BASE@ -Wl,-T,${VG_ARCH}/stage2.=
lds
endif
=20
-stage2_LDADD=3D $(stage2_extra) -ldl
+stage2_LDADD=3D $(stage2_extra) \
+ -ldl
=20
-
vg_inject_so_SOURCES =3D vg_intercept.c
vg_inject_so_CFLAGS =3D $(AM_CFLAGS) -fpic
vg_inject_so_LDADD =3D -ldl
@@ -134,9 +147,6 @@
-Wl,--soname,vg_inject.so \
-Wl,-z,initfirst
=20
-lib_replace_malloc_a_SOURCES =3D vg_replace_malloc.c
-lib_replace_malloc_a_CFLAGS =3D $(AM_CFLAGS) -fpic -fno-omit-frame-poin=
ter
-
MANUAL_DEPS =3D $(noinst_HEADERS) $(include_HEADERS)
=20
all-local:
Modified: trunk/coregrind/README_MODULES.txt
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/README_MODULES.txt 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/coregrind/README_MODULES.txt 2005-05-10 02:47:21 UTC (rev 3648)
@@ -43,6 +43,8 @@
pub_tool_foo.h, if it exists. pub_tool_foo.h *must not* #include
pub_core_foo.h, nor any other pub_core_ header for that matter.
=20
+Module-private headers are named "priv_foo.h".
+
No module may include the private headers of any other module. If a
type/enum/function/struct/whatever is stated in neither
include/pub_tool_foo.h nor coregrind/pub_core_foo.h then module 'foo'
Modified: trunk/coregrind/core.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/core.h 2005-05-09 22:01:37 UTC (rev 3647)
+++ trunk/coregrind/core.h 2005-05-10 02:47:21 UTC (rev 3648)
@@ -1,7 +1,6 @@
=20
/*--------------------------------------------------------------------*/
-/*--- A header file for all private parts of Valgrind's core. ---*/
-/*--- Include no other! (more or less...) ---*/
+/*--- A header file for various private parts of Valgrind's core. ---*/
/*--- core.h ---*/
/*--------------------------------------------------------------------*/
=20
@@ -97,6 +96,7 @@
// eg. x86-linux/core_platform.h
#include "core_os.h" // OS-specific stuff, eg. linux/core_os.h
=20
+#include "pub_core_mallocfree.h" // for type 'ArenaId'
#include "pub_core_stacktrace.h" // for type 'StackTrace'
=20
#include "valgrind.h"
@@ -458,57 +458,6 @@
=20
=20
/* ---------------------------------------------------------------------
- Exports of vg_malloc2.c
- ------------------------------------------------------------------ */
-
-/* Allocation arenas. =20
-
- CORE for the core's general use.
- TOOL for the tool to use (and the only one it uses).
- SYMTAB for Valgrind's symbol table storage.
- CLIENT for the client's mallocs/frees, if the tool replaces gli=
bc's
- malloc() et al -- redzone size is chosen by the tool=
.
- DEMANGLE for the C++ demangler.
- EXECTXT for storing ExeContexts.
- ERRORS for storing CoreErrors.
-
- When adding a new arena, remember also to add it to ensure_mm_init().=
=20
-*/
-typedef Int ArenaId;
-
-#define VG_N_ARENAS 7
-
-#define VG_AR_CORE 0
-#define VG_AR_TOOL 1
-#define VG_AR_SYMTAB 2
-#define VG_AR_CLIENT 3
-#define VG_AR_DEMANGLE 4
-#define VG_AR_EXECTXT 5
-#define VG_AR_ERRORS 6
-
-// This is both the minimum payload size of a malloc'd block, and its
-// minimum alignment. Must be a power of 2 greater than 4, and should b=
e
-// greater than 8.
-#define VG_MIN_MALLOC_SZB 8
-
-extern void* VG_(arena_malloc) ( ArenaId arena, SizeT nbytes );
-extern void VG_(arena_free) ( ArenaId arena, void* ptr );
-extern void* VG_(arena_calloc) ( ArenaId arena,=20
- SizeT nmemb, SizeT bytes_per_memb );
-extern void* VG_(arena_realloc) ( ArenaId arena, void* ptr, SizeT size )=
;
-
-/* Sets the size of the redzones at the start and end of heap blocks. T=
his
- must be called before any of VG_(malloc) and friends are called. */
-extern void VG_(set_client_malloc_redzone_szB) ( SizeT rz_szB );
-
-extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
-
-extern void VG_(sanity_check_malloc_all) ( void );
-
-extern void VG_(print_all_arena_stats) ( void );
-
-
-/* ---------------------------------------------------------------------
Exports of vg_intercept.c
------------------------------------------------------------------ */
=20
@@ -575,10 +524,10 @@
A synonym for exit. */
#define VG_USERREQ__LIBC_FREERES_DONE 0x3029
=20
-/* Intercept prefix stuff. See coregrind/vg_replace_malloc.c for
- details. Unfortunately the "_vgi_" literal is also hardcoded in
- that file, so if you change this one you must also change the other
- one. */
+/* Intercept prefix stuff. See
+ coregrind/m_replace_malloc/vg_replace_malloc.c for details.
+ Unfortunately the "_vgi_" literal is also hardcoded in that file, so =
if
+ you change this one you must also change the other one. */
#define VG_INTERCEPT_PREFIX "_vgi_"
#define VG_INTERCEPT_PREFIX_LEN 5
=20
Copied: trunk/coregrind/m_mallocfree.c (from rev 3646, trunk/coregrind/vg=
_malloc2.c)
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/vg_malloc2.c 2005-05-09 01:23:49 UTC (rev 3646)
+++ trunk/coregrind/m_mallocfree.c 2005-05-10 02:47:21 UTC (rev 3648)
@@ -0,0 +1,1295 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
+/*--- m_mallocfree.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward=20
+ js...@ac...
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+
+#include "core.h"
+#include "pub_core_aspacemgr.h"
+
+//zz#include "memcheck/memcheck.h"
+
+//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
+//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
+
+/*------------------------------------------------------------*/
+/*--- Main types ---*/
+/*------------------------------------------------------------*/
+
+#define N_MALLOC_LISTS 16 // do not change this
+
+// The amount you can ask for is limited only by sizeof(SizeT)...
+#define MAX_PSZB (~((SizeT)0x0))
+
+typedef UChar UByte;
+
+/* Block layout:
+
+ this block total szB (sizeof(SizeT) bytes)
+ freelist previous ptr (sizeof(void*) bytes)
+ red zone bytes (depends on .rz_szB field of Arena)
+ (payload bytes)
+ red zone bytes (depends on .rz_szB field of Arena)
+ freelist next ptr (sizeof(void*) bytes)
+ this block total szB (sizeof(SizeT) bytes)
+
+ Total size in bytes (bszB) and payload size in bytes (pszB)
+ are related by:
+
+ bszB =3D=3D pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_s=
zB
+
+ Furthermore, both size fields in the block have their least-sifnifi=
cant
+ bit set if the block is not in use, and unset if it is in use.
+ (The bottom 3 or so bits are always free for this because of alignm=
ent.)
+ A block size of zero is not possible, because a block always has at
+ least two SizeTs and two pointers of overhead. =20
+
+ Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
+ achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
+ (see newSuperblock() for how), and that the lengths of the followin=
g
+ things are a multiple of VG_MIN_MALLOC_SZB:
+ - Superblock admin section lengths (due to elastic padding)
+ - Block admin section (low and high) lengths (due to elastic redzon=
es)
+ - Block payload lengths (due to req_pszB rounding up)
+*/
+typedef
+ struct {
+ // No fields are actually used in this struct, because a Block has
+ // loads of variable sized fields and so can't be accessed
+ // meaningfully with normal fields. So we use access functions al=
l
+ // the time. This struct gives us a type to use, though. Also, w=
e
+ // make sizeof(Block) 1 byte so that we can do arithmetic with the
+ // Block* type in increments of 1!
+ UByte dummy;
+ }=20
+ Block;
+
+// A superblock. 'padding' is never used, it just ensures that if the
+// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes=
[]
+// will be too. It can add small amounts of padding unnecessarily -- eg=
.
+// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- becaus=
e
+// it's too hard to make a constant expression that works perfectly in a=
ll
+// cases.
+// payload_bytes[] is made a single big Block when the Superblock is
+// created, and then can be split and the splittings remerged, but Block=
s
+// always cover its entire length -- there's never any unused bytes at t=
he
+// end, for example.
+typedef=20
+ struct _Superblock {
+ struct _Superblock* next;
+ SizeT n_payload_bytes;
+ UByte padding[ VG_MIN_MALLOC_SZB -=20
+ ((sizeof(struct _Superblock*) + sizeof(SizeT)) %=
=20
+ VG_MIN_MALLOC_SZB) ];
+ UByte payload_bytes[0];
+ }
+ Superblock;
+
+// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
+// elastic, in that it can be bigger than asked-for to ensure alignment.
+typedef=20
+ struct {
+ Char* name;
+ Bool clientmem; // Allocates in the client address s=
pace?
+ SizeT rz_szB; // Red zone size in bytes
+ SizeT min_sblock_szB; // Minimum superblock size in bytes
+ Block* freelist[N_MALLOC_LISTS];
+ Superblock* sblocks;
+ // Stats only.
+ SizeT bytes_on_loan;
+ SizeT bytes_mmaped;
+ SizeT bytes_on_loan_max;
+ }=20
+ Arena;
+
+
+/*------------------------------------------------------------*/
+/*--- Low-level functions for working with Blocks. ---*/
+/*------------------------------------------------------------*/
+
+#define SIZE_T_0x1 ((SizeT)0x1)
+
+// Mark a bszB as in-use, and not in-use.
+static __inline__
+SizeT mk_inuse_bszB ( SizeT bszB )
+{
+ vg_assert(bszB !=3D 0);
+ return bszB & (~SIZE_T_0x1);
+}
+static __inline__
+SizeT mk_free_bszB ( SizeT bszB )
+{
+ vg_assert(bszB !=3D 0);
+ return bszB | SIZE_T_0x1;
+}
+
+// Remove the in-use/not-in-use attribute from a bszB, leaving just
+// the size.
+static __inline__
+SizeT mk_plain_bszB ( SizeT bszB )
+{
+ vg_assert(bszB !=3D 0);
+ return bszB & (~SIZE_T_0x1);
+}
+
+// Does this bszB have the in-use attribute?
+static __inline__
+Bool is_inuse_bszB ( SizeT bszB )
+{
+ vg_assert(bszB !=3D 0);
+ return (0 !=3D (bszB & SIZE_T_0x1)) ? False : True;
+}
+
+
+// Set and get the lower size field of a block.
+static __inline__
+void set_bszB_lo ( Block* b, SizeT bszB )
+{=20
+ *(SizeT*)&b[0] =3D bszB;
+}
+static __inline__
+SizeT get_bszB_lo ( Block* b )
+{
+ return *(SizeT*)&b[0];
+}
+
+// Get the address of the last byte in a block
+static __inline__
+UByte* last_byte ( Block* b )
+{
+ UByte* b2 =3D (UByte*)b;
+ return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
+}
+
+// Set and get the upper size field of a block.
+static __inline__
+void set_bszB_hi ( Block* b, SizeT bszB )
+{
+ UByte* b2 =3D (UByte*)b;
+ UByte* lb =3D last_byte(b);
+ vg_assert(lb =3D=3D &b2[mk_plain_bszB(bszB) - 1]);
+ *(SizeT*)&lb[-sizeof(SizeT) + 1] =3D bszB;
+}
+static __inline__
+SizeT get_bszB_hi ( Block* b )
+{
+ UByte* lb =3D last_byte(b);
+ return *(SizeT*)&lb[-sizeof(SizeT) + 1];
+}
+
+
+// Return the lower, upper and total overhead in bytes for a block.
+// These are determined purely by which arena the block lives in.
+static __inline__
+SizeT overhead_szB_lo ( Arena* a )
+{
+ return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
+}
+static __inline__
+SizeT overhead_szB_hi ( Arena* a )
+{
+ return a->rz_szB + sizeof(void*) + sizeof(SizeT);
+}
+static __inline__
+SizeT overhead_szB ( Arena* a )
+{
+ return overhead_szB_lo(a) + overhead_szB_hi(a);
+}
+
+// Given the addr of a block, return the addr of its payload.
+static __inline__
+UByte* get_block_payload ( Arena* a, Block* b )
+{
+ UByte* b2 =3D (UByte*)b;
+ return & b2[ overhead_szB_lo(a) ];
+}
+// Given the addr of a block's payload, return the addr of the block its=
elf.
+static __inline__
+Block* get_payload_block ( Arena* a, UByte* payload )
+{
+ return (Block*)&payload[ -overhead_szB_lo(a) ];
+}
+
+
+// Set and get the next and previous link fields of a block.
+static __inline__
+void set_prev_b ( Block* b, Block* prev_p )
+{=20
+ UByte* b2 =3D (UByte*)b;
+ *(Block**)&b2[sizeof(SizeT)] =3D prev_p;
+}
+static __inline__
+void set_next_b ( Block* b, Block* next_p )
+{
+ UByte* lb =3D last_byte(b);
+ *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] =3D next_p;
+}
+static __inline__
+Block* get_prev_b ( Block* b )
+{=20
+ UByte* b2 =3D (UByte*)b;
+ return *(Block**)&b2[sizeof(SizeT)];
+}
+static __inline__
+Block* get_next_b ( Block* b )
+{=20
+ UByte* lb =3D last_byte(b);
+ return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
+}
+
+
+// Get the block immediately preceding this one in the Superblock.
+static __inline__
+Block* get_predecessor_block ( Block* b )
+{
+ UByte* b2 =3D (UByte*)b;
+ SizeT bszB =3D mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
+ return (Block*)&b2[-bszB];
+}
+
+// Read and write the lower and upper red-zone bytes of a block.
+static __inline__
+void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
+{
+ UByte* b2 =3D (UByte*)b;
+ b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] =3D v;
+}
+static __inline__
+void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
+{
+ UByte* lb =3D last_byte(b);
+ lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] =3D v;
+}
+static __inline__
+UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
+{
+ UByte* b2 =3D (UByte*)b;
+ return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
+}
+static __inline__
+UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
+{
+ UByte* lb =3D last_byte(b);
+ return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
+}
+
+
+// Return the minimum bszB for a block in this arena. Can have zero-len=
gth
+// payloads, so it's the size of the admin bytes.
+static __inline__
+SizeT min_useful_bszB ( Arena* a )
+{
+ return overhead_szB(a);
+}
+
+// Convert payload size <--> block size (both in bytes).
+static __inline__
+SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
+{
+ return pszB + overhead_szB(a);
+}
+static __inline__
+SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
+{
+ vg_assert(bszB >=3D overhead_szB(a));
+ return bszB - overhead_szB(a);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Arena management ---*/
+/*------------------------------------------------------------*/
+
+#define CORE_ARENA_MIN_SZB 1048576
+
+// The arena structures themselves.
+static Arena vg_arena[VG_N_ARENAS];
+
+// Functions external to this module identify arenas using ArenaIds,
+// not Arena*s. This fn converts the former to the latter.
+static Arena* arenaId_to_ArenaP ( ArenaId arena )
+{
+ vg_assert(arena >=3D 0 && arena < VG_N_ARENAS);
+ return & vg_arena[arena];
+}
+
+// Initialise an arena. rz_szB is the minimum redzone size; it might b=
e
+// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
+static
+void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sbloc=
k_szB )
+{
+ SizeT i;
+ Arena* a =3D arenaId_to_ArenaP(aid);
+ =20
+ vg_assert(rz_szB < 128); // ensure reasonable size
+ vg_assert((min_sblock_szB % VKI_PAGE_SIZE) =3D=3D 0);
+ a->name =3D name;
+ a->clientmem =3D ( VG_AR_CLIENT =3D=3D aid ? True : False );
+
+ // The size of the low and high admin sections in a block must be a
+ // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
+ // redzone size if necessary to achieve this.
+ a->rz_szB =3D rz_szB;
+ while (0 !=3D overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
+ vg_assert(overhead_szB_lo(a) =3D=3D overhead_szB_hi(a));
+
+ a->min_sblock_szB =3D min_sblock_szB;
+ for (i =3D 0; i < N_MALLOC_LISTS; i++) a->freelist[i] =3D NULL;
+ a->sblocks =3D NULL;
+ a->bytes_on_loan =3D 0;
+ a->bytes_mmaped =3D 0;
+ a->bytes_on_loan_max =3D 0;
+}
+
+/* Print vital stats for an arena. */
+void VG_(print_all_arena_stats) ( void )
+{
+ UInt i;
+ for (i =3D 0; i < VG_N_ARENAS; i++) {
+ Arena* a =3D arenaId_to_ArenaP(i);
+ VG_(message)(Vg_DebugMsg,
+ "%8s: %8d mmap'd, %8d/%8d max/curr",
+ a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loa=
n=20
+ );
+ }
+}
+
+static Bool init_done =3D False;
+static SizeT client_malloc_redzone_szB =3D 8; // default: be paranoid
+
+// Nb: this must be called before the client arena is initialised, ie.
+// before any memory is allocated.
+void VG_(set_client_malloc_redzone_szB)(SizeT rz_szB)
+{
+ if (init_done) {
+ VG_(printf)(
+ "\nTool error:\n"
+ "%s cannot be called after the first allocation.\n",
+ __PRETTY_FUNCTION__);
+ VG_(exit)(1);
+ }
+ // This limit is no special figure, just something not too big
+ if (rz_szB > 128) {
+ VG_(printf)(
+ "\nTool error:\n"
+ " %s passed a too-big value (%llu)",=20
+ __PRETTY_FUNCTION__, (ULong)rz_szB);
+ VG_(exit)(1);
+ }
+ client_malloc_redzone_szB =3D rz_szB;
+}
+
+/* This library is self-initialising, as it makes this more self-contain=
ed,
+ less coupled with the outside world. Hence VG_(arena_malloc)() and
+ VG_(arena_free)() below always call ensure_mm_init() to ensure things=
are
+ correctly initialised. */
+static
+void ensure_mm_init ( void )
+{
+ if (init_done) {
+ return;
+ }
+
+ /* Use checked red zones (of various sizes) for our internal stuff,
+ and an unchecked zone of arbitrary size for the client. Of
+ course the client's red zone can be checked by the tool, eg.=20
+ by using addressibility maps, but not by the mechanism implemented
+ here, which merely checks at the time of freeing that the red=20
+ zone bytes are unchanged.
+
+ Nb: redzone sizes are *minimums*; they could be made bigger to en=
sure
+ alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16=
;
+ but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the ex=
tra
+ 4 bytes in both are accounted for by the larger prev/next ptr.
+ */
+ arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
+ arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
+ arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
+ arena_init ( VG_AR_CLIENT, "client", client_malloc_redzone_szB, 10=
48576 );
+ arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
+ arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
+ arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
+
+ init_done =3D True;
+# ifdef DEBUG_MALLOC
+ VG_(sanity_check_malloc_all)();
+# endif
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Superblock management ---*/
+/*------------------------------------------------------------*/
+
+// Align ptr p upwards to an align-sized boundary.
+static
+void* align_upwards ( void* p, SizeT align )
+{
+ Addr a =3D (Addr)p;
+ if ((a % align) =3D=3D 0) return (void*)a;
+ return (void*)(a - (a % align) + align);
+}
+
+// If not enough memory available, either aborts (for non-client memory)
+// or returns 0 (for client memory).
+static
+Superblock* newSuperblock ( Arena* a, SizeT cszB )
+{
+ // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
+ static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZ=
B];
+ static Bool called_before =3D True; //False;
+ Superblock* sb;
+
+ // Take into account admin bytes in the Superblock.
+ cszB +=3D sizeof(Superblock);
+
+ if (cszB < a->min_sblock_szB) cszB =3D a->min_sblock_szB;
+ while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
+
+ if (!called_before) {
+ // First time we're called -- use the special static bootstrap
+ // superblock (see comment at top of main() for details).
+ called_before =3D True;
+ vg_assert(a =3D=3D arenaId_to_ArenaP(VG_AR_CORE));
+ vg_assert(CORE_ARENA_MIN_SZB >=3D cszB);
+ // Ensure sb is suitably aligned.
+ sb =3D (Superblock*)align_upwards( bootstrap_superblock,=20
+ VG_MIN_MALLOC_SZB );
+ } else if (a->clientmem) {
+ // client allocation -- return 0 to client if it fails
+ sb =3D (Superblock *)
+ VG_(get_memory_from_mmap_for_client)
+ (0, cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
+ if (NULL =3D=3D sb)
+ return 0;
+ } else {
+ // non-client allocation -- aborts if it fails
+ sb =3D VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
+ }
+ vg_assert(NULL !=3D sb);
+ //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
+ vg_assert(0 =3D=3D (Addr)sb % VG_MIN_MALLOC_SZB);
+ sb->n_payload_bytes =3D cszB - sizeof(Superblock);
+ a->bytes_mmaped +=3D cszB;
+ if (0)
+ VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",=20
+ sb->n_payload_bytes);
+ return sb;
+}
+
+// Find the superblock containing the given chunk.
+static
+Superblock* findSb ( Arena* a, Block* b )
+{
+ Superblock* sb;
+ for (sb =3D a->sblocks; sb; sb =3D sb->next)
+ if ((Block*)&sb->payload_bytes[0] <=3D b
+ && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
+ return sb;
+ VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->na=
me );
+ VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
+ return NULL; /*NOTREACHED*/
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions for working with freelists. ---*/
+/*------------------------------------------------------------*/
+
+// Nb: Determination of which freelist a block lives on is based on the
+// payload size, not block size.
+
+// Convert a payload size in bytes to a freelist number.
+static
+UInt pszB_to_listNo ( SizeT pszB )
+{
+ vg_assert(0 =3D=3D pszB % VG_MIN_MALLOC_SZB);
+ pszB /=3D VG_MIN_MALLOC_SZB;
+ if (pszB <=3D 2) return 0;
+ if (pszB <=3D 3) return 1;
+ if (pszB <=3D 4) return 2;
+ if (pszB <=3D 5) return 3;
+ if (pszB <=3D 6) return 4;
+ if (pszB <=3D 7) return 5;
+ if (pszB <=3D 8) return 6;
+ if (pszB <=3D 9) return 7;
+ if (pszB <=3D 10) return 8;
+ if (pszB <=3D 11) return 9;
+ if (pszB <=3D 12) return 10;
+ if (pszB <=3D 16) return 11;
+ if (pszB <=3D 32) return 12;
+ if (pszB <=3D 64) return 13;
+ if (pszB <=3D 128) return 14;
+ return 15;
+}
+
+// What is the minimum payload size for a given list?
+static
+SizeT listNo_to_pszB_min ( UInt listNo )
+{
+ SizeT pszB =3D 0;
+ vg_assert(listNo <=3D N_MALLOC_LISTS);
+ while (pszB_to_listNo(pszB) < listNo) pszB +=3D VG_MIN_MALLOC_SZB;
+ return pszB;
+}
+
+// What is the maximum payload size for a given list?
+static
+SizeT listNo_to_pszB_max ( UInt listNo )
+{
+ vg_assert(listNo <=3D N_MALLOC_LISTS);
+ if (listNo =3D=3D N_MALLOC_LISTS-1) {
+ return MAX_PSZB;
+ } else {
+ return listNo_to_pszB_min(listNo+1) - 1;
+ }
+}
+
+
+/* A nasty hack to try and reduce fragmentation. Try and replace
+ a->freelist[lno] with another block on the same list but with a
+ lower address, with the idea of attempting to recycle the same
+ blocks rather than cruise through the address space. */
+static=20
+void swizzle ( Arena* a, UInt lno )
+{
+ Block* p_best;
+ Block* pp;
+ Block* pn;
+ UInt i;
+
+ p_best =3D a->freelist[lno];
+ if (p_best =3D=3D NULL) return;
+
+ pn =3D pp =3D p_best;
+ for (i =3D 0; i < 20; i++) {
+ pn =3D get_next_b(pn);
+ pp =3D get_prev_b(pp);
+ if (pn < p_best) p_best =3D pn;
+ if (pp < p_best) p_best =3D pp;
+ }
+ if (p_best < a->freelist[lno]) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
+# endif
+ a->freelist[lno] =3D p_best;
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Sanity-check/debugging machinery. ---*/
+/*------------------------------------------------------------*/
+
+#define REDZONE_LO_MASK 0x31
+#define REDZONE_HI_MASK 0x7c
+
+// Do some crude sanity checks on a Block.
+static=20
+Bool blockSane ( Arena* a, Block* b )
+{
+# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
+ UInt i;
+ if (get_bszB_lo(b) !=3D get_bszB_hi(b))
+ {BLEAT("sizes");return False;}
+ if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
+ for (i =3D 0; i < a->rz_szB; i++) {
+ if (get_rz_lo_byte(a, b, i) !=3D=20
+ (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
+ {BLEAT("redzone-lo");return False;}
+ if (get_rz_hi_byte(a, b, i) !=3D=20
+ (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
+ {BLEAT("redzone-hi");return False;}
+ } =20
+ }
+ return True;
+# undef BLEAT
+}
+
+// Print superblocks (only for debugging).
+static=20
+void ppSuperblocks ( Arena* a )
+{
+ UInt i, blockno;
+ SizeT b_bszB;
+ Block* b;
+ Superblock* sb =3D a->sblocks;
+ blockno =3D 1;
+
+ while (sb) {
+ VG_(printf)( "\n" );
+ VG_(printf)( "superblock %d at %p, sb->n_pl_bs =3D %d, next =3D %p=
\n",=20
+ blockno++, sb, sb->n_payload_bytes, sb->next );
+ for (i =3D 0; i < sb->n_payload_bytes; i +=3D mk_plain_bszB(b_bszB=
)) {
+ b =3D (Block*)&sb->payload_bytes[i];
+ b_bszB =3D get_bszB_lo(b);
+ VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bs=
zB) );
+ VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
+ VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
+ }
+ vg_assert(i =3D=3D sb->n_payload_bytes); // no overshoot at end =
of Sb
+ sb =3D sb->next;
+ }
+ VG_(printf)( "end of superblocks\n\n" );
+}
+
+// Sanity check both the superblocks and the chains.
+static void sanity_check_malloc_arena ( ArenaId aid )
+{
+ UInt i, superblockctr, blockctr_sb, blockctr_li;
+ UInt blockctr_sb_free, listno;
+ SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
+ Superblock* sb;
+ Bool thisFree, lastWasFree;
+ Block* b;
+ Block* b_prev;
+ SizeT arena_bytes_on_loan;
+ Arena* a;
+
+# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
+
+ a =3D arenaId_to_ArenaP(aid);
+ =20
+ // First, traverse all the superblocks, inspecting the Blocks in each=
.
+ superblockctr =3D blockctr_sb =3D blockctr_sb_free =3D 0;
+ arena_bytes_on_loan =3D 0;
+ sb =3D a->sblocks;
+ while (sb) {
+ lastWasFree =3D False;
+ superblockctr++;
+ for (i =3D 0; i < sb->n_payload_bytes; i +=3D mk_plain_bszB(b_bszB=
)) {
+ blockctr_sb++;
+ b =3D (Block*)&sb->payload_bytes[i];
+ b_bszB =3D get_bszB_lo(b);
+ if (!blockSane(a, b)) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bsz=
B %d): "
+ " BAD\n", sb, i, b_bszB );
+ BOMB;
+ }
+ thisFree =3D !is_inuse_bszB(b_bszB);
+ if (thisFree && lastWasFree) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bsz=
B %d): "
+ "UNMERGED FREES\n",
+ sb, i, b_bszB );
+ BOMB;
+ }
+ if (thisFree) blockctr_sb_free++;
+ if (!thisFree)=20
+ arena_bytes_on_loan +=3D bszB_to_pszB(a, b_bszB);
+ lastWasFree =3D thisFree;
+ }
+ if (i > sb->n_payload_bytes) {
+ VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
+ "overshoots end\n", sb);
+ BOMB;
+ }
+ sb =3D sb->next;
+ }
+
+ if (arena_bytes_on_loan !=3D a->bytes_on_loan) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
+ "arena_bytes_on_loan %d: "
+ "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
+# endif
+ ppSuperblocks(a);
+ BOMB;
+ }
+
+ /* Second, traverse each list, checking that the back pointers make
+ sense, counting blocks encountered, and checking that each block
+ is an appropriate size for this list. */
+ blockctr_li =3D 0;
+ for (listno =3D 0; listno < N_MALLOC_LISTS; listno++) {
+ list_min_pszB =3D listNo_to_pszB_min(listno);
+ list_max_pszB =3D listNo_to_pszB_max(listno);
+ b =3D a->freelist[listno];
+ if (b =3D=3D NULL) continue;
+ while (True) {
+ b_prev =3D b;
+ b =3D get_next_b(b);
+ if (get_prev_b(b) !=3D b_prev) {
+ VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
+ "BAD LINKAGE\n",=20
+ listno, b );
+ BOMB;
+ }
+ b_pszB =3D bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
+ if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
+ VG_(printf)(=20
+ "sanity_check_malloc_arena: list %d at %p: "
+ "WRONG CHAIN SIZE %dB (%dB, %dB)\n",=20
+ listno, b, b_pszB, list_min_pszB, list_max_pszB );
+ BOMB;
+ }
+ blockctr_li++;
+ if (b =3D=3D a->freelist[listno]) break;
+ }
+ }
+
+ if (blockctr_sb_free !=3D blockctr_li) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
+ "(via sbs %d, via lists %d)\n",
+ blockctr_sb_free, blockctr_li );
+# endif
+ ppSuperblocks(a);
+ BOMB;
+ }
+
+ if (VG_(clo_verbosity) > 2)=20
+ VG_(message)(Vg_DebugMsg,
+ "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
+ "%7d mmap, %7d loan",=20
+ a->name,
+ superblockctr,
+ blockctr_sb, blockctr_sb_free, blockctr_li,=20
+ a->bytes_mmaped, a->bytes_on_loan); =20
+# undef BOMB
+}
+
+
+void VG_(sanity_check_malloc_all) ( void )
+{
+ UInt i;
+ for (i =3D 0; i < VG_N_ARENAS; i++)
+ sanity_check_malloc_arena ( i );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Creating and deleting blocks. ---*/
+/*------------------------------------------------------------*/
+
+// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
+// relevant free list.
+
+static
+void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
+{
+ SizeT pszB =3D bszB_to_pszB(a, bszB);
+ vg_assert(b_lno =3D=3D pszB_to_listNo(pszB));
+ //zzVALGRIND_MAKE_WRITABLE(b, bszB);
+ // Set the size fields and indicate not-in-use.
+ set_bszB_lo(b, mk_free_bszB(bszB));
+ set_bszB_hi(b, mk_free_bszB(bszB));
+
+ // Add to the relevant list.
+ if (a->freelist[b_lno] =3D=3D NULL) {
+ set_prev_b(b, b);
+ set_next_b(b, b);
+ a->freelist[b_lno] =3D b;
+ } else {
+ Block* b_prev =3D get_prev_b(a->freelist[b_lno]);
+ Block* b_next =3D a->freelist[b_lno];
+ set_next_b(b_prev, b);
+ set_prev_b(b_next, b);
+ set_next_b(b, b_next);
+ set_prev_b(b, b_prev);
+ }
+# ifdef DEBUG_MALLOC
+ (void)blockSane(a,b);
+# endif
+}
+
+// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
+// appropriately.
+static
+void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
+{
+ UInt i;
+ vg_assert(bszB >=3D min_useful_bszB(a));
+ //zzVALGRIND_MAKE_WRITABLE(b, bszB);
+ set_bszB_lo(b, mk_inuse_bszB(bszB));
+ set_bszB_hi(b, mk_inuse_bszB(bszB));
+ set_prev_b(b, NULL); // Take off freelist
+ set_next_b(b, NULL); // ditto
+ if (!a->clientmem) {
+ for (i =3D 0; i < a->rz_szB; i++) {
+ set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MAS=
K));
+ set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MAS=
K));
+ }
+ }
+# ifdef DEBUG_MALLOC
+ (void)blockSane(a,b);
+# endif
+}
+
+// Remove a block from a given list. Does no sanity checking.
+static
+void unlinkBlock ( Arena* a, Block* b, UInt listno )
+{
+ vg_assert(listno < N_MALLOC_LISTS);
+ if (get_prev_b(b) =3D=3D b) {
+ // Only one element in the list; treat it specially.
+ vg_assert(get_next_b(b) =3D=3D b);
+ a->freelist[listno] =3D NULL;
+ } else {
+ Block* b_prev =3D get_prev_b(b);
+ Block* b_next =3D get_next_b(b);
+ a->freelist[listno] =3D b_prev;
+ set_next_b(b_prev, b_next);
+ set_prev_b(b_next, b_prev);
+ swizzle ( a, listno );
+ }
+ set_prev_b(b, NULL);
+ set_next_b(b, NULL);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Core-visible functions. ---*/
+/*------------------------------------------------------------*/
+
+// Align the request size.
+static __inline__
+SizeT align_req_pszB ( SizeT req_pszB )
+{
+ SizeT n =3D VG_MIN_MALLOC_SZB-1;
+ return ((req_pszB + n) & (~n));
+}
+
+void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
+{
+ SizeT req_bszB, frag_bszB, b_bszB;
+ UInt lno;
+ Superblock* new_sb;
+ Block* b =3D NULL;
+ Arena* a;
+ void* v;
+
+ VGP_PUSHCC(VgpMalloc);
+
+ ensure_mm_init();
+ a =3D arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+ req_pszB =3D align_req_pszB(req_pszB);
+ req_bszB =3D pszB_to_bszB(a, req_pszB);
+
+ // Scan through all the big-enough freelists for a block.
+ for (lno =3D pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
+ b =3D a->freelist[lno];
+ if (NULL =3D=3D b) continue; // If this list is empty, try the n=
ext one.
+ while (True) {
+ b_bszB =3D mk_plain_bszB(get_bszB_lo(b));
+ if (b_bszB >=3D req_bszB) goto obtained_block; // success!
+ b =3D get_next_b(b);
+ if (b =3D=3D a->freelist[lno]) break; // traversed entire fre=
elist
+ }
+ }
+
+ // If we reach here, no suitable block found, allocate a new superblo=
ck
+ vg_assert(lno =3D=3D N_MALLOC_LISTS);
+ new_sb =3D newSuperblock(a, req_bszB);
+ if (NULL =3D=3D new_sb) {
+ // Should only fail if for client, otherwise, should have aborted
+ // already.
+ vg_assert(VG_AR_CLIENT =3D=3D aid);
+ return NULL;
+ }
+ new_sb->next =3D a->sblocks;
+ a->sblocks =3D new_sb;
+ b =3D (Block*)&new_sb->payload_bytes[0];
+ lno =3D pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
+ mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
+ // fall through
+
+ obtained_block:
+ // Ok, we can allocate from b, which lives in list lno.
+ vg_assert(b !=3D NULL);
+ vg_assert(lno < N_MALLOC_LISTS);
+ vg_assert(a->freelist[lno] !=3D NULL);
+ b_bszB =3D mk_plain_bszB(get_bszB_lo(b));
+ // req_bszB is the size of the block we are after. b_bszB is the
+ // size of what we've actually got. */
+ vg_assert(b_bszB >=3D req_bszB);
+
+ // Could we split this block and still get a useful fragment?
+ frag_bszB =3D b_bszB - req_bszB;
+ if (frag_bszB >=3D min_useful_bszB(a)) {
+ // Yes, split block in two, put the fragment on the appropriate fr=
ee
+ // list, and update b_bszB accordingly.
+ // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_=
bszB );
+ unlinkBlock(a, b, lno);
+ mkInuseBlock(a, b, req_bszB);
+ mkFreeBlock(a, &b[req_bszB], frag_bszB,=20
+ pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
+ b_bszB =3D mk_plain_bszB(get_bszB_lo(b));
+ } else {
+ // No, mark as in use and use as-is.
+ unlinkBlock(a, b, lno);
+ mkInuseBlock(a, b, b_bszB);
+ }
+
+ // Update stats
+ a->bytes_on_loan +=3D bszB_to_pszB(a, b_bszB);
+ if (a->bytes_on_loan > a->bytes_on_loan_max)
+ a->bytes_on_loan_max =3D a->bytes_on_loan;
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ VGP_POPCC(VgpMalloc);
+ v =3D get_block_payload(a, b);
+ vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) =3D=3D 0 );
+
+ VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
+ return v;
+}
+
+=20
+void VG_(arena_free) ( ArenaId aid, void* ptr )
+{
+ Superblock* sb;
+ UByte* sb_start;
+ UByte* sb_end;
+ Block* other;
+ Block* b;
+ SizeT b_bszB, b_pszB, other_bszB;
+ UInt b_listno;
+ Arena* a;
+
+ VGP_PUSHCC(VgpMalloc);
+
+ ensure_mm_init();
+ a =3D arenaId_to_ArenaP(aid);
+
+ if (ptr =3D=3D NULL) {
+ VGP_POPCC(VgpMalloc);
+ return;
+ }
+ =20
+ b =3D get_payload_block(a, ptr);
+
+# ifdef DEBUG_MALLOC
+ vg_assert(blockSane(a, b));
+# endif
+
+ a->bytes_on_loan -=3D bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
+
+ sb =3D findSb( a, b );
+ sb_start =3D &sb->payload_bytes[0];
+ sb_end =3D &sb->payload_bytes[sb->n_payload_bytes - 1];
+
+ // Put this chunk back on a list somewhere.
+ b_bszB =3D get_bszB_lo(b);
+ b_pszB =3D bszB_to_pszB(a, b_bszB);
+ b_listno =3D pszB_to_listNo(b_pszB);
+ mkFreeBlock( a, b, b_bszB, b_listno );
+
+ // See if this block can be merged with its successor.
+ // First test if we're far enough before the superblock's end to poss=
ibly
+ // have a successor.
+ other =3D b + b_bszB;
+ if (other+min_useful_bszB(a)-1 <=3D (Block*)sb_end) {
+ // Ok, we have a successor, merge if it's not in use.
+ other_bszB =3D get_bszB_lo(other);
+ if (!is_inuse_bszB(other_bszB)) {
+ // VG_(printf)( "merge-successor\n");
+ other_bszB =3D mk_plain_bszB(other_bszB);
+# ifdef DEBUG_MALLOC
+ vg_assert(blockSane(a, other));
+# endif
+ unlinkBlock( a, b, b_listno );
+ unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB=
)) );
+ b_bszB +=3D other_bszB;
+ b_listno =3D pszB_to_listNo(bszB_to_pszB(a, b_bszB));
+ mkFreeBlock( a, b, b_bszB, b_listno );
+ }
+ } else {
+ // Not enough space for successor: check that b is the last block
+ // ie. there are no unused bytes at the end of the Superblock.
+ vg_assert(other-1 =3D=3D (Block*)sb_end);
+ }
+
+ // Then see if this block can be merged with its predecessor.
+ // First test if we're far enough after the superblock's start to pos=
sibly
+ // have a predecessor.
+ if (b >=3D (Block*)sb_start + min_useful_bszB(a)) {
+ // Ok, we have a predecessor, merge if it's not in use.
+ other =3D get_predecessor_block( b );
+ other_bszB =3D get_bszB_lo(other);
+ if (!is_inuse_bszB(other_bszB)) {
+ // VG_(printf)( "merge-predecessor\n");
+ other_bszB =3D mk_plain_bszB(other_bszB);
+ unlinkBlock( a, b, b_listno );
+ unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bsz=
B)) );
+ b =3D other;
+ b_bszB +=3D other_bszB;
+ b_listno =3D pszB_to_listNo(bszB_to_pszB(a, b_bszB));
+ mkFreeBlock( a, b, b_bszB, b_listno );
+ }
+ } else {
+ // Not enough space for predecessor: check that b is the first blo=
ck,
+ // ie. there are no unused bytes at the start of the Superblock.
+ vg_assert((Block*)sb_start =3D=3D b);
+ }
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ VALGRIND_FREELIKE_BLOCK(ptr, 0);
+
+ VGP_POPCC(VgpMalloc);
+}
+
+
+/*
+ The idea for malloc_aligned() is to allocate a big block, base, and
+ then split it into two parts: frag, which is returned to the the
+ free pool, and align, which is the bit we're really after. Here's
+ a picture. L and H denote the block lower and upper overheads, in
+ bytes. The details are gruesome. Note it is slightly complicated
+ because the initial request to generate base may return a bigger
+ block than we asked for, so it is important to distinguish the base
+ request size and the base actual size.
+
+ frag_b align_b
+ | |
+ | frag_p | align_p
+ | | | |
+ v v v v
+
+ +---+ +---+---+ +---+
+ | L |----------------| H | L |---------------| H |
+ +---+ +---+---+ +---+
+
+ ^ ^ ^
+ | | :
+ | base_p this addr must be aligned
+ |
+ base_b
+
+ . . . . . . .
+ <------ frag_bszB -------> . . .
+ . <------------- base_pszB_act -----------> .
+ . . . . . . .
+
+*/
+void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_psz=
B )
+{
+ SizeT base_pszB_req, base_pszB_act, frag_bszB;
+ Block *base_b, *align_b;
+ UByte *base_p, *align_p;
+ SizeT saved_bytes_on_loan;
+ Arena* a;
+
+ VGP_PUSHCC(VgpMalloc);
+
+ ensure_mm_init();
+ a =3D arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+
+ // Check that the requested alignment seems reasonable; that is, is
+ // a power of 2.
+ if (req_alignB < VG_MIN_MALLOC_SZB
+ || req_alignB > 1048576
+ || VG_(log2)( req_alignB ) =3D=3D -1 /* not a power of 2 */) {
+ VG_(printf)("VG_(arena_memalign)(%p, %d, %d)\nbad alignment",=20
+ a, req_alignB, req_pszB );
+ VG_(core_panic)("VG_(arena_memalign)");
+ /*NOTREACHED*/
+ }
+ // Paranoid
+ vg_assert(req_alignB % VG_MIN_MALLOC_SZB =3D=3D 0);
+
+ /* Required payload size for the aligned chunk. */
+ req_pszB =3D align_req_pszB(req_pszB);
+ =20
+ /* Payload size to request for the big block that we will split up. *=
/
+ base_pszB_req =3D req_pszB + min_useful_bszB(a) + req_alignB;
+
+ /* Payload ptr for the block we are going to split. Note this
+ changes a->bytes_on_loan; we save and restore it ourselves. */
+ saved_bytes_on_loan =3D a->bytes_on_loan;
+ base_p =3D VG_(arena_malloc) ( aid, base_pszB_req );
+ a->bytes_on_loan =3D saved_bytes_on_loan;
+
+ /* Block ptr for the block we are going to split. */
+ base_b =3D get_payload_block ( a, base_p );
+
+ /* Pointer to the payload of the aligned block we are going to
+ return. This has to be suitably aligned. */
+ align_p =3D align_upwards ( base_b + 2 * overhead_szB_lo(a)
+ + overhead_szB_hi(a),
+ req_alignB );
+ align_b =3D get_payload_block(a, align_p);
+
+ /* The block size of the fragment we will create. This must be big
+ enough to actually create a fragment. */
+ frag_bszB =3D align_b - base_b;
+
+ vg_assert(frag_bszB >=3D min_useful_bszB(a));
+
+ /* The actual payload size of the block we are going to split. */
+ base_pszB_act =3D bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)))=
;
+
+ /* Create the fragment block, and put it back on the relevant free li=
st. */
+ mkFreeBlock ( a, base_b, frag_bszB,
+ pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
+
+ /* Create the aligned block. */
+ mkInuseBlock ( a, align_b,
+ base_p + base_pszB_act=20
+ + overhead_szB_hi(a) - (UByte*)align_b );
+
+ /* Final sanity checks. */
+ vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) =
);
+
+ vg_assert(req_pszB
+ <=3D=20
+ bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
+ get_payload_block(a, align_=
p))))
+ );
+
+ a->bytes_on_loan=20
+ +=3D bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
+ get_payload_block(a, align_p))=
));
+ if (a->bytes_on_loan > a->bytes_on_loan_max)
+ a->bytes_on_loan_max =3D a->bytes_on_loan;
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ VGP_POPCC(VgpMalloc);
+
+ vg_assert( (((Addr)align_p) % req_alignB) =3D=3D 0 );
+
+ VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
+
+ return align_p;
+}
+
+
+SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
+{
+ Arena* a =3D arenaId_to_ArenaP(aid);
+ Block* b =3D get_payload_block(a, ptr);
+ return bszB_to_pszB(a, get_bszB_lo(b));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Services layered on top of malloc/free. ---*/
+/*------------------------------------------------------------*/
+
+void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb=
)
+{
+ SizeT size;
+ UChar* p;
+
+ VGP_PUSHCC(VgpMalloc);
+
+ size =3D nmemb * bytes_per_memb;
+ vg_assert(size >=3D nmemb && size >=3D bytes_per_memb);// check again=
st overflow
+
+ p =3D VG_(arena_malloc) ( aid, size );
+
+ VG_(memset)(p, 0, size);
+
+ VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
+
+ VGP_POPCC(VgpMalloc);
+ =20
+ return p;
+}
+
+
+void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
+{
+ Arena* a;
+ SizeT old_bszB, old_pszB;
+ UChar *p_new;
+ Block* b;
+
+ VGP_PUSHCC(VgpMalloc);
+
+ ensure_mm_init();
+ a =3D arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+
+ b =3D get_payload_block(a, ptr);
+ vg_assert(blockSane(a, b));
+
+ old_bszB =3D get_bszB_lo(b);
+ vg_assert(is_inuse_bszB(old_bszB));
+ old_bszB =3D mk_plain_bszB(old_bszB);
+ old_pszB =3D bszB_to_pszB(a, old_bszB);
+
+ if (req_pszB <=3D old_pszB) {
+ VGP_POPCC(VgpMalloc);
+ return ptr;
+ }
+
+ p_new =3D VG_(arena_malloc) ( aid, req_pszB );
+ =20
+ VG_(memcpy)(p_new, ptr, old_pszB);
+
+ VG_(arena_free)(aid, ptr);
+
+ VGP_POPCC(VgpMalloc);
+ return p_new;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Tool-visible functions. ---*/
+/*------------------------------------------------------------*/
+
+// All just wrappers to avoid exposing arenas to tools.
+
+void* VG_(malloc) ( SizeT nbytes )
+{
+ return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
+}
+
+void VG_(free) ( void* ptr )
+{
+ VG_(arena_free) ( VG_AR_TOOL, ptr );
+}
+
+void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
+{
+ return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
+}
+
+void* VG_(realloc) ( void* ptr, SizeT size )
+{
+ return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
Added: trunk/coregrind/m_replacemalloc/Makefile.am
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_replacemalloc/Makefile.am 2005-05-09 22:01:37 UTC (=
rev 3647)
+++ trunk/coregrind/m_replacemalloc/Makefile.am 2005-05-10 02:47:21 UTC (=
rev 3648)
@@ -0,0 +1,27 @@
+include $(top_srcdir)/Makefile.all.am
+include $(top_srcdir)/Makefile.core-AM_CPPFLAGS.am
+
+AM_CFLAGS =3D $(WERROR) -Wmissing-prototypes -Winline -Wall -Wshadow -O =
-g
+
+EXTRA_DIST =3D \
+ README_REPLACEMALLOC.txt
+
+noinst_LIBRARIES =3D \
+ libreplacemalloc_core.a \
+ libreplacemalloc_toolpreload.a
+
+libreplacemalloc_core_a_SOURCES =3D \
+ replacemalloc_core.c
+
+if USE_PIE
+libreplacemalloc_core_a_CFLAGS =3D $(AM_CFLAGS) -fpie
+else
+libreplacemalloc_core_a_CFLAGS =3D $(AM_CFLAGS)
+endif
+
+libreplacemalloc_toolpreload_a_SOURCES =3D \
+ vg_replace_malloc.c
+libreplacemalloc_toolpreload_a_CFLAGS =3D \
+ $(AM_CFLAGS) -fpic -fno-omit-frame-pointer
+
+
Added: trunk/coregrind/m_replacemalloc/README_REPLACEMALLOC.txt
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_replacemalloc/README_REPLACEMALLOC.txt 2005-05-09 2=
2:01:37 UTC (rev 3647)
+++ trunk/coregrind/m_replacemalloc/README_REPLACEMALLOC.txt 2005-05-10 0=
2:47:21 UTC (rev 3648)
@@ -0,0 +1,29 @@
+The structure of this module is worth noting.
+
+The main part is in vg_replace_malloc.c. It gets compiled into the tool=
's
+'preload' shared object, which goes into the client's area of memory, an=
d
+runs on the simulated CPU just like client code. As a result, it cannot
+use any functions in the core directly; it can only communicated with t=
he
+core using client requests, just like any other client code.
+
+And yet it must call the tool's malloc wrappers. How does it know where
+they are? The init function uses a client request which asks for the li=
st
+of all the core functions (and variables) that it needs to access. It t=
hen
+uses a client request each time it needs to call one of these.
+
+This means that the following sequence occurs each time a tool that uses
+this module starts up:
+
+ - Tool does initialisation, including calling VG_(malloc_funcs)() to te=
ll
+ the core the names of its malloc wrappers. These are stored in
+ VG_(tdict).
+
+ - On the first allocation, vg_replace_malloc.c:init() calls the
+ GET_MALLOCFUNCS client request to get the names of the malloc wrapper=
s
+ out of VG_(tdict), storing them in 'info'.
+
+ - All calls to these functions are done using 'info'.
+
+This is a bit complex, but it's hard to see how it can be done more simp=
ly.=20
+
+
Added: trunk/coregrind/m_replacemalloc/replacemalloc_core.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_replacemalloc/replacemalloc_core.c 2005-05-09 22:01=
:37 UTC (rev 3647)
+++ trunk/coregrind/m_replacemalloc/replacemalloc_core.c 2005-05-10 02:47=
:21 UTC (rev 3648)
@@ -0,0 +1,114 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Malloc replacement. replacemalloc_core.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2005 Julian Seward=20
+ js...@ac...
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "core.h"
+#include "pub_core_replacemalloc.h"
+
+/*------------------------------------------------------------*/
+/*--- Command line options ---*/
+/*------------------------------------------------------------*/
+
+/* Nb: the allocator always rounds blocks up to a multiple of
+ VG_MIN_MALLOC_SZB.
+*/
+
+/* DEBUG: print malloc details? default: NO */
+Bool VG_(clo_trace_malloc) =3D False;
+
+/* Minimum alignment in functions that don't specify alignment explicitl=
y.
+ default: 0, i.e. use VG_MIN_MALLOC_SZB. */
+UInt VG_(clo_alignment) =3D VG_MIN_MALLOC_SZB;
+
+
+Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
+{
+ if (VG_CLO_STREQN(12, arg, "--alignment=3D")) {
+ VG_(clo_alignment) =3D (UInt)VG_(atoll)(&arg[12]);
+
+ if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
+ || VG_(clo_alignment) > 4096
+ || VG_(log2)( VG_(clo_alignment) ) =3D=3D -1 /* not a power of=
2 *...
[truncated message content] |