|
From: <sv...@va...> - 2005-10-06 09:00:21
|
Author: tom
Date: 2005-10-06 10:00:17 +0100 (Thu, 06 Oct 2005)
New Revision: 4875
Log:
Fix realloc wrappers to handle the out of memory case properly - if
the call to VG_(cli_malloc) returns NULL then don't try and copy the
data or register a new block and just leave the old block in place
instead, but still return NULL to the caller.
Fixes bug 109487 and it's duplicates.
Modified:
trunk/helgrind/hg_main.c
trunk/massif/ms_main.c
trunk/memcheck/mac_malloc_wrappers.c
Modified: trunk/helgrind/hg_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/helgrind/hg_main.c 2005-10-06 03:32:42 UTC (rev 4874)
+++ trunk/helgrind/hg_main.c 2005-10-06 09:00:17 UTC (rev 4875)
@@ -1977,7 +1977,6 @@
{
HG_Chunk *hc;
HG_Chunk **prev_chunks_next_ptr;
- Int i;
=20
/* First try and find the block. */
hc =3D (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UWord)p,
@@ -2005,22 +2004,23 @@
/* Get new memory */
p_new =3D (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
=20
- /* First half kept and copied, second half new */
- copy_address_range_state( (Addr)p, p_new, hc->size );
- hg_new_mem_heap ( p_new+hc->size, new_size-hc->size,
- /*inited*/False );
+ if (p_new) {
+ /* First half kept and copied, second half new */
+ copy_address_range_state( (Addr)p, p_new, hc->size );
+ hg_new_mem_heap ( p_new+hc->size, new_size-hc->size,
+ /*inited*/False );
=20
- /* Copy from old to new */
- for (i =3D 0; i < hc->size; i++)
- ((UChar*)p_new)[i] =3D ((UChar*)p)[i];
+ /* Copy from old to new */
+ VG_(memcpy)((void *)p_new, p, hc->size);
=20
- /* Free old memory */
- die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
+ /* Free old memory */
+ die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
=20
- /* this has to be after die_and_free_mem, otherwise the
- former succeeds in shorting out the new block, not the
- old, in the case when both are on the same list. */
- add_HG_Chunk ( tid, p_new, new_size );
+ /* this has to be after die_and_free_mem, otherwise the
+ former succeeds in shorting out the new block, not the
+ old, in the case when both are on the same list. */
+ add_HG_Chunk ( tid, p_new, new_size );
+ }
=20
return (void*)p_new;
} =20
Modified: trunk/massif/ms_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/massif/ms_main.c 2005-10-06 03:32:42 UTC (rev 4874)
+++ trunk/massif/ms_main.c 2005-10-06 09:00:17 UTC (rev 4875)
@@ -802,22 +802,26 @@
} else {
// new size is bigger; make new block, copy shared contents, free=
old
p_new =3D VG_(cli_malloc)(VG_(clo_alignment), new_size);
- VG_(memcpy)(p_new, p_old, old_size);
- VG_(cli_free)(p_old);
+ if (p_new) {
+ VG_(memcpy)(p_new, p_old, old_size);
+ VG_(cli_free)(p_old);
+ }
}
- =20
- old_where =3D hc->where;
- new_where =3D get_XCon( tid, /*custom_malloc*/False);
=20
- // Update HP_Chunk
- hc->data =3D (Addr)p_new;
- hc->size =3D new_size;
- hc->where =3D new_where;
+ if (p_new) {
+ old_where =3D hc->where;
+ new_where =3D get_XCon( tid, /*custom_malloc*/False);
=20
- // Update XPt curr_space fields
- if (clo_heap) {
- if (0 !=3D old_size) update_XCon(old_where, -old_size);
- if (0 !=3D new_size) update_XCon(new_where, new_size);
+ // Update HP_Chunk
+ hc->data =3D (Addr)p_new;
+ hc->size =3D new_size;
+ hc->where =3D new_where;
+
+ // Update XPt curr_space fields
+ if (clo_heap) {
+ if (0 !=3D old_size) update_XCon(old_where, -old_size);
+ if (0 !=3D new_size) update_XCon(new_where, new_size);
+ }
}
=20
// Now insert the new hc (with a possibly new 'data' field) into
Modified: trunk/memcheck/mac_malloc_wrappers.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mac_malloc_wrappers.c 2005-10-06 03:32:42 UTC (rev 487=
4)
+++ trunk/memcheck/mac_malloc_wrappers.c 2005-10-06 09:00:17 UTC (rev 487=
5)
@@ -390,23 +390,26 @@
/* Get new memory */
Addr a_new =3D (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size)=
;
=20
- /* First half kept and copied, second half new, red zones as norma=
l */
- MAC_(ban_mem_heap) ( a_new-MAC_MALLOC_REDZONE_SZB, MAC_MALLOC_REDZ=
ONE_SZB );
- MAC_(copy_mem_heap)( (Addr)p_old, a_new, mc->size );
- MAC_(new_mem_heap) ( a_new+mc->size, new_size-mc->size, /*init'd*/=
False );
- MAC_(ban_mem_heap) ( a_new+new_size, MAC_MALLOC_REDZONE_SZB );
+ if (a_new) {
+ /* First half kept and copied, second half new, red zones as no=
rmal */
+ MAC_(ban_mem_heap) ( a_new-MAC_MALLOC_REDZONE_SZB, MAC_MALLOC_R=
EDZONE_SZB );
+ MAC_(copy_mem_heap)( (Addr)p_old, a_new, mc->size );
+ MAC_(new_mem_heap) ( a_new+mc->size, new_size-mc->size, /*init'=
d*/False );
+ MAC_(ban_mem_heap) ( a_new+new_size, MAC_MALLOC_REDZONE_SZB );
=20
- /* Copy from old to new */
- VG_(memcpy)((void*)a_new, p_old, mc->size);
+ /* Copy from old to new */
+ VG_(memcpy)((void*)a_new, p_old, mc->size);
=20
- /* Free old memory */
- /* Nb: we have to allocate a new MAC_Chunk for the new memory rath=
er
- than recycling the old one, so that any erroneous accesses to t=
he
- old memory are reported. */
- die_and_free_mem ( tid, mc, MAC_MALLOC_REDZONE_SZB );
+ /* Free old memory */
+ /* Nb: we have to allocate a new MAC_Chunk for the new memory r=
ather
+ than recycling the old one, so that any erroneous accesses t=
o the
+ old memory are reported. */
+ die_and_free_mem ( tid, mc, MAC_MALLOC_REDZONE_SZB );
=20
- // Allocate a new chunk.
- mc =3D create_MAC_Chunk( tid, a_new, new_size, MAC_AllocMalloc );
+ // Allocate a new chunk.
+ mc =3D create_MAC_Chunk( tid, a_new, new_size, MAC_AllocMalloc =
);
+ }
+
p_new =3D (void*)a_new;
} =20
=20
|