|
From: <sv...@va...> - 2005-09-26 00:38:42
|
Author: njn
Date: 2005-09-26 01:38:36 +0100 (Mon, 26 Sep 2005)
New Revision: 4776
Log:
Backport some changes from the trunk, to make things easier later.
Modified:
branches/ASPACEM/coregrind/m_mallocfree.c
branches/ASPACEM/coregrind/pub_core_mallocfree.h
Modified: branches/ASPACEM/coregrind/m_mallocfree.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-26 00:00:29 UTC (re=
v 4775)
+++ branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-26 00:38:36 UTC (re=
v 4776)
@@ -60,9 +60,9 @@
/* Layout of an in-use block:
=20
this block total szB (sizeof(SizeT) bytes)
- red zone bytes (depends on Arena.rz_szB, but > sizeof(vo=
id*))
+ red zone bytes (depends on Arena.rz_szB, but >=3D sizeof=
(void*))
(payload bytes)
- red zone bytes (depends on Arena.rz_szB, but > sizeof(vo=
id*))
+ red zone bytes (depends on Arena.rz_szB, but >=3D sizeof=
(void*))
this block total szB (sizeof(SizeT) bytes)
=20
Layout of a block on the free list:
@@ -171,48 +171,50 @@
return bszB & (~SIZE_T_0x1);
}
=20
-// Set get the lower size field of a block.
+//----------------------------------------------------------------------=
-----
+
+// Get a block's size as stored, ie with the in-use/free attribute.
static __inline__
-SizeT get_bszB_lo ( Block* b )
+SizeT get_bszB_as_is ( Block* b )
{
- return *(SizeT*)&b[0];
+ UByte* b2 =3D (UByte*)b;
+ SizeT bszB_lo =3D *(SizeT*)&b2[0];
+ SizeT bszB_hi =3D *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)=
];
+ vg_assert2(bszB_lo =3D=3D bszB_hi,=20
+ "Heap block lo/hi size mismatch: lo =3D %llu, hi =3D %llu.\n"
+ "Probably caused by overrunning/underrunning a heap block's bounds=
\n");
+ return bszB_lo;
}
=20
-// Does this block have the in-use attribute?
+// Get a block's plain size, ie. remove the in-use/free attribute.
static __inline__
-Bool is_inuse_block ( Block* b )
+SizeT get_bszB ( Block* b )
{
- SizeT bszB =3D get_bszB_lo(b);
- vg_assert(bszB !=3D 0);
- return (0 !=3D (bszB & SIZE_T_0x1)) ? False : True;
+ return mk_plain_bszB(get_bszB_as_is(b));
}
=20
-// Get the address of the last byte in a block
+// Set the size fields of a block. bszB may have the in-use/free attrib=
ute.
static __inline__
-UByte* last_byte ( Block* b )
+void set_bszB ( Block* b, SizeT bszB )
{
UByte* b2 =3D (UByte*)b;
- return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
+ *(SizeT*)&b2[0] =3D bszB;
+ *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] =3D bszB;
}
=20
-// Get the upper size field of a block.
-static __inline__
-SizeT get_bszB_hi ( Block* b )
-{
- UByte* lb =3D last_byte(b);
- return *(SizeT*)&lb[-sizeof(SizeT) + 1];
-}
+//----------------------------------------------------------------------=
-----
=20
-// Set the size fields of a block.
+// Does this block have the in-use attribute?
static __inline__
-void set_bszB ( Block* b, SizeT bszB )
+Bool is_inuse_block ( Block* b )
{
- UByte* lb;
- *(SizeT*)&b[0] =3D bszB; // Set lo bszB; must precede last_byte(=
) call
- lb =3D last_byte(b);
- *(SizeT*)&lb[-sizeof(SizeT) + 1] =3D bszB; // Set hi bszB
+ SizeT bszB =3D get_bszB_as_is(b);
+ vg_assert(bszB !=3D 0);
+ return (0 !=3D (bszB & SIZE_T_0x1)) ? False : True;
}
=20
+//----------------------------------------------------------------------=
-----
+
// Return the lower, upper and total overhead in bytes for a block.
// These are determined purely by which arena the block lives in.
static __inline__
@@ -231,6 +233,8 @@
return overhead_szB_lo(a) + overhead_szB_hi(a);
}
=20
+//----------------------------------------------------------------------=
-----
+
// Return the minimum bszB for a block in this arena. Can have zero-len=
gth
// payloads, so it's the size of the admin bytes.
static __inline__
@@ -239,6 +243,8 @@
return overhead_szB(a);
}
=20
+//----------------------------------------------------------------------=
-----
+
// Convert payload size <--> block size (both in bytes).
static __inline__
SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
@@ -252,23 +258,8 @@
return bszB - overhead_szB(a);
}
=20
-// Get a block's size as stored, ie with the in-use/free attribute.
-static __inline__
-SizeT get_bszB_as_is ( Block* b )
-{
- SizeT bszB_lo =3D get_bszB_lo(b);
- SizeT bszB_hi =3D get_bszB_hi(b);
- vg_assert(bszB_lo =3D=3D bszB_hi);
- return bszB_lo;
-}
+//----------------------------------------------------------------------=
-----
=20
-// Get a block's plain size, ie. remove the in-use/free attribute.
-static __inline__
-SizeT get_bszB ( Block* b )
-{
- return mk_plain_bszB(get_bszB_as_is(b));
-}
-
// Get a block's payload size.
static __inline__
SizeT get_pszB ( Arena* a, Block* b )
@@ -276,7 +267,9 @@
return bszB_to_pszB(a, get_bszB(b));
}
=20
-// Given the addr of a block, return the addr of its payload.
+//----------------------------------------------------------------------=
-----
+
+// Given the addr of a block, return the addr of its payload, and vice v=
ersa.
static __inline__
UByte* get_block_payload ( Arena* a, Block* b )
{
@@ -290,6 +283,7 @@
return (Block*)&payload[ -overhead_szB_lo(a) ];
}
=20
+//----------------------------------------------------------------------=
-----
=20
// Set and get the next and previous link fields of a block.
static __inline__
@@ -301,8 +295,8 @@
static __inline__
void set_next_b ( Block* b, Block* next_p )
{
- UByte* lb =3D last_byte(b);
- *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] =3D next_p;
+ UByte* b2 =3D (UByte*)b;
+ *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] =3D next_p=
;
}
static __inline__
Block* get_prev_b ( Block* b )
@@ -313,10 +307,11 @@
static __inline__
Block* get_next_b ( Block* b )
{=20
- UByte* lb =3D last_byte(b);
- return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
+ UByte* b2 =3D (UByte*)b;
+ return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
}
=20
+//----------------------------------------------------------------------=
-----
=20
// Get the block immediately preceding this one in the Superblock.
static __inline__
@@ -327,6 +322,8 @@
return (Block*)&b2[-bszB];
}
=20
+//----------------------------------------------------------------------=
-----
+
// Read and write the lower and upper red-zone bytes of a block.
static __inline__
void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
@@ -337,8 +334,8 @@
static __inline__
void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
{
- UByte* lb =3D last_byte(b);
- lb[-sizeof(SizeT) - rz_byteno] =3D v;
+ UByte* b2 =3D (UByte*)b;
+ b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] =3D v;
}
static __inline__
UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
@@ -349,8 +346,8 @@
static __inline__
UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
{
- UByte* lb =3D last_byte(b);
- return lb[-sizeof(SizeT) - rz_byteno];
+ UByte* b2 =3D (UByte*)b;
+ return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
}
=20
=20
@@ -379,7 +376,12 @@
SizeT i;
Arena* a =3D arenaId_to_ArenaP(aid);
=20
- vg_assert(rz_szB < 128); // ensure reasonable size
+ // Ensure redzones are a reasonable size. They must always be at lea=
st
+ // the size of a pointer, for holding the prev/next pointer (see the =
layout
+ // details at the top of this file).
+ vg_assert(rz_szB < 128);
+ if (rz_szB < sizeof(void*)) rz_szB =3D sizeof(void*);
+ =20
vg_assert((min_sblock_szB % VKI_PAGE_SIZE) =3D=3D 0);
a->name =3D name;
a->clientmem =3D ( VG_AR_CLIENT =3D=3D aid ? True : False );
@@ -550,7 +552,7 @@
return 0;
sb =3D (Superblock*)sres.val;
} else {
- // non-client allocation -- aborts if it fails
+ // non-client allocation -- abort if it fails
sres =3D VG_(am_mmap_anon_float_valgrind)( cszB );
if (sres.isError) {
VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
@@ -678,8 +680,8 @@
{
# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
UInt i;
- if (get_bszB_lo(b) !=3D get_bszB_hi(b))
- {BLEAT("sizes");return False;}
+ // The lo and hi size fields will be checked (indirectly) by the call
+ // to get_rz_hi_byte().
if (!a->clientmem && is_inuse_block(b)) {
for (i =3D 0; i < a->rz_szB; i++) {
if (get_rz_lo_byte(a, b, i) !=3D=20
@@ -1242,14 +1244,13 @@
}
=20
=20
-SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
+SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* ptr )
{
Arena* a =3D arenaId_to_ArenaP(aid);
Block* b =3D get_payload_block(a, ptr);
return get_pszB(a, b);
}
=20
-
/*------------------------------------------------------------*/
/*--- Services layered on top of malloc/free. ---*/
/*------------------------------------------------------------*/
Modified: branches/ASPACEM/coregrind/pub_core_mallocfree.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-26 00:00:29 =
UTC (rev 4775)
+++ branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-26 00:38:36 =
UTC (rev 4776)
@@ -77,7 +77,7 @@
SizeT req_pszB );
extern Char* VG_(arena_strdup) ( ArenaId aid, const Char* s);
=20
-extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
+extern SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* p=
ayload );
=20
extern void VG_(sanity_check_malloc_all) ( void );
=20
|