|
From: <sv...@va...> - 2005-09-10 16:02:07
|
Author: sewardj
Date: 2005-09-10 17:02:03 +0100 (Sat, 10 Sep 2005)
New Revision: 4615
Log:
Many changes:
- add infrastructure do deal with client/V mmap requests
- fix bugs in the VG_(aspacem_getAdvisory)
- minor jiggling of m_mallocfree to connect to new aspacem
- at startup, first start m_aspacemgr, then m_mallocfree
Now runs as far as getting dynamic memory management running.
Modified:
branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c
branches/ASPACEM/coregrind/m_main.c
branches/ASPACEM/coregrind/m_mallocfree.c
branches/ASPACEM/coregrind/pub_core_aspacemgr.h
branches/ASPACEM/coregrind/pub_core_mallocfree.h
Modified: branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c 2005-09-09 12:03:2=
8 UTC (rev 4614)
+++ branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c 2005-09-10 16:02:0=
3 UTC (rev 4615)
@@ -43,7 +43,9 @@
#include "pub_core_transtab.h" // For VG_(discard_translations)
#include "vki_unistd.h"
=20
+static void aspacem_barf ( HChar* what );
=20
+
/* Define to debug the memory-leak-detector. */
/* #define VG_DEBUG_LEAKCHECK */
=20
@@ -81,6 +83,7 @@
UInt fd, OffT offset)
{
SysRes res;
+aspacem_barf("mmap_native");
#if defined(VGP_x86_linux)
{=20
UWord args[6];
@@ -106,11 +109,13 @@
=20
SysRes VG_(munmap_native)(void *start, SizeT length)
{
+aspacem_barf("munmap_native");
return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
}
=20
SysRes VG_(mprotect_native)( void *start, SizeT length, UInt prot )
{
+aspacem_barf("mprotect_native");
return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
}
=20
@@ -569,6 +574,7 @@
{
Addr se =3D s->addr+s->len;
Addr pe =3D p+len;
+aspacem_barf("seg_contains");
vg_assert(pe >=3D p);
=20
return (p >=3D s->addr && pe <=3D se);
@@ -578,6 +584,7 @@
{
Addr se =3D s->addr+s->len;
Addr pe =3D p+len;
+aspacem_barf("seg_overlaps");
vg_assert(pe >=3D p);
=20
return (p < se && pe > s->addr);
@@ -616,6 +623,7 @@
Addr end, s_end;
Int i;
Bool deleted;
+aspacem_barf("unmap_range");
=20
if (len =3D=3D 0)
return;
@@ -734,6 +742,7 @@
HChar* stage2_suffix1 =3D "lib/valgrind/stage2";
HChar* stage2_suffix2 =3D "coregrind/stage2";
Bool is_stage2 =3D False;
+aspacem_barf("map_file_segment");
=20
is_stage2 =3D is_stage2 || ( VG_(strstr)(filename, stage2_suffix1) !=3D=
NULL );
is_stage2 =3D is_stage2 || ( VG_(strstr)(filename, stage2_suffix2) !=3D=
NULL );
@@ -845,6 +854,7 @@
{
Char buf[VKI_PATH_MAX];
struct vki_stat st;
+aspacem_barf("map_fd_segment");
=20
st.st_dev =3D 0;
st.st_ino =3D 0;
@@ -866,6 +876,7 @@
=20
void VG_(map_segment)(Addr addr, SizeT len, UInt prot, UInt flags)
{
+aspacem_barf("map_segment");
flags &=3D ~SF_FILE;
=20
VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
@@ -876,6 +887,7 @@
{
Int r;
const Bool debug =3D False || mem_debug;
+aspacem_barf("mprotect_range");
=20
if (debug)
VG_(printf)("\nmprotect_range(%p, %lu, %x)\n", a, len, prot);
@@ -907,6 +919,8 @@
Addr VG_(find_map_space)(Addr addr, SizeT len, Bool for_client)
{
const Bool debug =3D False || mem_debug;
+aspacem_barf("find_map_space");
+
Addr ret;
Addr addrOrig =3D addr;
Addr limit =3D (for_client ? VG_(client_end)-1 : VG_(valgrind_last)=
);
@@ -1047,6 +1061,7 @@
{
Addr addr =3D (start =3D=3D 0) ? VG_(client_base) : start;
SysRes ret;
+aspacem_barf("pad_address_space");
=20
Int i =3D 0;
Segment* s =3D i >=3D segments_used ? NULL : &segments[i];
@@ -1080,6 +1095,7 @@
=20
Int i =3D 0;
Segment* s =3D i >=3D segments_used ? NULL : &segments[i];
+aspacem_barf("unpad_address_space");
=20
while (s && addr <=3D VG_(valgrind_last)) {
if (addr < s->addr) {
@@ -1101,6 +1117,8 @@
Segment *VG_(find_segment)(Addr a)
{
Int r =3D find_segment(a);
+aspacem_barf("find_segment");
+
if (0) show_segments("find_segment");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1113,6 +1131,7 @@
Segment *VG_(find_segment_above_unmapped)(Addr a)
{
Int r =3D find_segment_above_unmapped(a);
+aspacem_barf("find_segment_above_unmapped");
if (0) show_segments("find_segment_above_unmapped");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1125,6 +1144,7 @@
Segment *VG_(find_segment_above_mapped)(Addr a)
{
Int r =3D find_segment_above_mapped(a);
+aspacem_barf("find_segment_above_mapped");
if (0) show_segments("find_segment_above_mapped");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1142,6 +1162,7 @@
Bool VG_(is_addressable)(Addr p, SizeT size, UInt prot)
{
Segment *seg;
+aspacem_barf("is_addressable");
=20
if ((p + size) < p)
return False; /* reject wraparounds */
@@ -1178,6 +1199,7 @@
Int i;
UInt flags;
Segment *s;
+aspacem_barf("find_root_memory");
=20
for (i =3D 0; i < segments_used; i++) {
s =3D &segments[i];
@@ -1202,11 +1224,13 @@
=20
Bool VG_(is_client_addr)(Addr a)
{
+aspacem_barf("is_client_addr");
return a >=3D VG_(client_base) && a < VG_(client_end);
}
=20
Bool VG_(is_shadow_addr)(Addr a)
{
+aspacem_barf("is_shadow_addr");
return a >=3D VG_(shadow_base) && a < VG_(shadow_end);
}
=20
@@ -1220,6 +1244,7 @@
static Addr shadow_alloc =3D 0;
Addr try_here;
SysRes r;
+aspacem_barf("shadow_alloc");
=20
if (0) show_segments("shadow_alloc(before)");
=20
@@ -1277,6 +1302,7 @@
=20
Bool VG_(setup_pointercheck)(Addr client_base, Addr client_end)
{
+aspacem_barf("setup_pointercheck");
vg_assert(0 !=3D client_end);
#if defined(VGP_x86_linux)
/* Client address space segment limit descriptor entry */
@@ -1317,6 +1343,8 @@
=20
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
=20
static void add_to_aspacem_sprintf_buf ( HChar c, void *p )
{
@@ -1354,6 +1382,64 @@
=20
/////////////////////////////////////////////////////////////////
=20
+static
+SysRes do_mmap_NATIVE( Addr start, SizeT length, UInt prot, UInt flags,
+ UInt fd, OffT offset)
+{
+ SysRes res;
+# if defined(VGP_x86_linux)
+ {=20
+ UWord args[6];
+ args[0] =3D (UWord)start;
+ args[1] =3D length;
+ args[2] =3D prot;
+ args[3] =3D flags;
+ args[4] =3D fd;
+ args[5] =3D offset;
+ res =3D VG_(do_syscall1)(__NR_mmap, (UWord)args );
+ }
+# elif defined(VGP_amd64_linux)
+ res =3D VG_(do_syscall6)(__NR_mmap, (UWord)start, length,=20
+ prot, flags, fd, offset);
+# elif defined(VGP_ppc32_linux)
+ res =3D VG_(do_syscall6)(__NR_mmap, (UWord)(start), (length),
+ prot, flags, fd, offset);
+# else
+# error Unknown platform
+# endif
+ return res;
+}
+
+static
+SysRes do_munmap_NATIVE(Addr start, SizeT length)
+{
+ return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
+}
+
+
+
+static=20
+Bool get_inode_for_fd ( Int fd, /*OUT*/UInt* dev, /*OUT*/UInt* ino )
+{
+ return False;
+}
+
+static
+Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
+{
+ return False;
+}
+
+
+/////////////////////////////////////////////////////////////////
+
+static void aspacem_barf ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ VG_(exit)(1);
+}
+
static void aspacem_barf_toolow ( HChar* what )
{
VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s is too low.\n", what)=
;
@@ -1477,8 +1563,18 @@
static Int nsegments_used =3D 0;
=20
=20
+ULong VG_(aspacem_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total =3D 0;
+ for (i =3D 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind !=3D SkAnon)
+ continue;
+ total +=3D (ULong)nsegments[i].end - (ULong)nsegments[i].start + 1UL=
L;
+ }
+ return total;
+}
=20
-
/* check the interval array */
static void check_nsegments ( void )
{
@@ -1491,7 +1587,31 @@
}
=20
=20
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. */
+static Int find_nsegment_idx ( Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo =3D 0,
+ hi =3D nsegments_used-1;
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ aspacem_barf("find_nsegment_idx: not found");
+ }
+ mid =3D (lo + hi) / 2;
+ a_mid_lo =3D nsegments[mid].start;
+ a_mid_hi =3D nsegments[mid].end;
=20
+ if (a < a_mid_lo) { hi =3D mid-1; continue; }
+ if (a > a_mid_hi) { lo =3D mid+1; continue; }
+ aspacem_assert(a >=3D a_mid_lo && a <=3D a_mid_hi);
+ aspacem_assert(0 <=3D mid && mid < nsegments_used);
+ return mid;
+ }
+}
=20
=20
=20
@@ -1513,81 +1633,85 @@
Bool VG_(aspacem_getAdvisory)
( MapRequest* req, Bool forClient, /*OUT*/Addr* result )
{
- return False;
-#if 0
+
/* Iterate over all holes in the address space, twice. In the first
-pass, find the first hole which is not below the search start
-point. */
+ pass, find the first hole which is not below the search start
+ point. */
Addr holeStart, holeEnd, holeLen;
Int i, j;
+ Bool fixed_not_required;
=20
- Addr minAddr =3D Addr_MIN;
- Addr maxAddr =3D Addr_MAX;
Addr startPoint =3D forClient ? aspacem_cStart : aspacem_vStart;
=20
Addr reqStart =3D req->rkind=3D=3DMAny ? 0 : req->start;
Addr reqEnd =3D reqStart + req->len - 1;
Addr reqLen =3D req->len;
=20
- Addr floatStart =3D 0;
- Bool floatFound =3D False;
+ /* These hold indices for segments found during search, or -1 if not
+ found. */
+ Int floatIdx =3D -1;
+ Int fixedIdx =3D -1;
=20
/* Don't waste time looking for a fixed match if not requested to. */
- Bool fixedFound =3D req->rkind=3D=3DMAny ? True : False;
+ aspacem_assert(nsegments_used > 0);
+ fixed_not_required =3D req->rkind =3D=3D MAny;
=20
- for (i =3D 0; i <=3D/*yes,really*/ nsegments_used; i++) {
- holeEnd =3D i=3D=3Dnsegments_used
- ? maxAddr
- : nsegments[i].start - 1;
- if (holeEnd >=3D startPoint)
- break;
- }
+ i =3D find_nsegment_idx(startPoint);
=20
+ if (0) VG_(debugLog)(0,"","startPoint %p, idx %d, reqlen %d\n",=20
+ startPoint,i,reqLen);
+
/* Now examine holes from index i back round to i-1. Record the
- size and length of the first fixed hole and the first floating
- hole which would satisfy the request. */
+ index first fixed hole and the first floating hole which would
+ satisfy the request. */
for (j =3D 0; j < nsegments_used; j++) {
=20
- holeStart =3D i=3D=3D0=20
- ? minAddr=20
- : nsegments[i-1].start + nsegments[i-1].len - 1;
- holeEnd =3D i=3D=3Dnsegments_used
- ? maxAddr
- : nsegments[i].start - 1;
+ if (nsegments[i].kind !=3D SkFree) {
+ i++;
+ if (i >=3D nsegments_used) i =3D 0;
+ continue;
+ }
=20
- /* Clamp the hole to something plausible */
- if (holeStart < aspacem_minAddr) holeStart =3D aspacem_minAddr;
- if (holeEnd > aspacem_maxAddr) holeEnd =3D aspacem_maxAddr;
+ holeStart =3D nsegments[i].start;
+ holeEnd =3D nsegments[i].end;
=20
- /* If it still looks viable, see if it's any use to us. */
- if (holeStart < holeEnd) {
+ /* Stay sane .. */
+ aspacem_assert(holeStart <=3D holeEnd);
+ aspacem_assert(aspacem_minAddr <=3D holeStart);
+ aspacem_assert(holeEnd <=3D aspacem_maxAddr);
=20
- holeLen =3D holeEnd - holeStart + 1;
+ /* See if it's any use to us. */
+ holeLen =3D holeEnd - holeStart + 1;
=20
- if (!fixedFound=20
+ if (fixedIdx =3D=3D -1=20
&& holeStart <=3D reqStart && reqEnd <=3D holeEnd) {
- fixedFound =3D True;
+ fixedIdx =3D i;
}
=20
- if (!floatFound
+ if (floatIdx =3D=3D -1
&& holeLen >=3D reqLen) {
- floatFound =3D True;
- floatStart =3D holeStart;
+ floatIdx =3D i;
}
- }
-
+ =20
/* Don't waste time searching once we've found what we wanted. */
- if (fixedFound && floatFound)
+ if ((fixed_not_required || fixedIdx >=3D 0) && floatIdx >=3D 0)
break;
=20
i++;
if (i >=3D nsegments_used) i =3D 0;
}
=20
+aspacem_assert(fixedIdx >=3D -1 && fixedIdx < nsegments_used);
+aspacem_assert(floatIdx >=3D -1 && floatIdx < nsegments_used);
+if (fixedIdx >=3D 0)=20
+aspacem_assert(nsegments[fixedIdx].kind =3D=3D SkFree);
+if (floatIdx >=3D 0)=20
+aspacem_assert(nsegments[floatIdx].kind =3D=3D SkFree);
+
/* Now see if we found anything which can satisfy the request. */
switch (req->rkind) {
case MFixed:
- if (fixedFound) {
+ if (fixedIdx >=3D 0) {
*result =3D req->start;
return True;
} else {
@@ -1595,18 +1719,18 @@
}
break;
case MHint:
- if (fixedFound) {
+ if (fixedIdx >=3D 0) {
*result =3D req->start;
return True;
}
- if (floatFound) {
- *result =3D floatStart;
+ if (floatIdx >=3D 0) {
+ *result =3D nsegments[floatIdx].start;
return True;
}
return False;
case MAny:
- if (floatFound) {
- *result =3D floatStart;
+ if (floatIdx >=3D 0) {
+ *result =3D nsegments[floatIdx].start;
return True;
}
return False;
@@ -1614,9 +1738,8 @@
default: break;
}
/*NOTREACHED*/
- aspacemgr_barf("getAdvisory: unknown request kind");
+ aspacem_barf("getAdvisory: unknown request kind");
return False;
-#endif
}
=20
=20
@@ -1642,6 +1765,8 @@
static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
{
aspacem_assert(start < end);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
init_nsegment(seg);
seg->kind =3D SkResvn;
seg->start =3D start;
@@ -1652,9 +1777,9 @@
{
switch (seg->kind) {
case SkFree: return "FREE";
- case SkAnon: return seg->isClient ? "ANON" : "anon";
- case SkFile: return seg->isClient ? "FILE" : "file";
- case SkResvn: return seg->isClient ? "RSVN" : "rsvn";
+ case SkAnon: return seg->isClient ? "anon" : "ANON";
+ case SkFile: return seg->isClient ? "file" : "FILE";
+ case SkResvn: return "RSVN";
default: return "????";
}
}
@@ -1787,6 +1912,8 @@
Addr dEnd =3D seg->end;
=20
aspacem_assert(dStart <=3D dEnd);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(dStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(dEnd+1));
=20
nDeld =3D 0;
=20
@@ -1911,6 +2038,11 @@
{
NSegment seg;
=20
+ aspacem_assert(sizeof(Word) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(Addr) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(SizeT) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) =3D=3D sizeof(void*));
+
/* Add a single interval covering the entire address space. */
init_nsegment(&seg);
seg.kind =3D SkFree;
@@ -1933,6 +2065,11 @@
aspacem_cStart =3D (Addr)0x04000000; // 64M
aspacem_vStart =3D (aspacem_minAddr + aspacem_maxAddr + 1) / 2;
=20
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
+
VG_(debugLog)(2, "aspacem", "minAddr =3D 0x%llx\n", (ULong)aspacem_mi=
nAddr);
VG_(debugLog)(2, "aspacem", "maxAddr =3D 0x%llx\n", (ULong)aspacem_ma=
xAddr);
VG_(debugLog)(2, "aspacem", " cStart =3D 0x%llx\n", (ULong)aspacem_cS=
tart);
@@ -1947,6 +2084,14 @@
add_segment(&seg);
}
=20
+ /* Create a 1-page reservation at the notional initial
+ client/valgrind boundary. This isn't strictly necessary, but
+ because the advisor does first-fit and starts searches for
+ valgrind allocations at the boundary, this is kind of necessary
+ in order to get it to start allocating in the right place. */
+ init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1)=
;
+ add_segment(&seg);
+
show_nsegments(2, "Initial layout");
=20
VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
@@ -1956,6 +2101,180 @@
}
=20
=20
+SysRes VG_(mmap_file_fixed_client)
+ ( void* startV, SizeT length, Int prot, Int fd, SizeT offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ UInt dev, ino;
+ HChar buf[VKI_PATH_MAX];
+=20
+ Addr start =3D (Addr)startV;=20
+=20
+ /* Not allowable. */
+ /* Not allowable. */
+ if (length =3D=3D 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MFixed;
+ req.start =3D start;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, True/*client*/, &advised );
+ if (!ok || advised !=3D start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( start, length, prot,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE, fd, offset );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkFile;
+ seg.isClient =3D True;
+ seg.start =3D start;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset =3D offset;
+ seg.hasR =3D toBool(prot & VKI_PROT_READ);
+ seg.hasW =3D toBool(prot & VKI_PROT_WRITE);
+ seg.hasX =3D toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino)) {
+ seg.dev =3D dev;
+ seg.ino =3D ino;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx =3D allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ return sres;
+}
+
+
+SysRes VG_(mmap_anon_fixed_client)
+ ( void* startV, SizeT length, Int prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+=20
+ Addr start =3D (Addr)startV;=20
+=20
+ /* Not allowable. */
+ if (length =3D=3D 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MFixed;
+ req.start =3D start;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, True/*client*/, &advised );
+ if (!ok || advised !=3D start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( start, length, prot,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE
+ |VKI_MAP_ANONYMOUS, 0, 0 );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkAnon;
+ seg.isClient =3D True;
+ seg.start =3D start;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR =3D toBool(prot & VKI_PROT_READ);
+ seg.hasW =3D toBool(prot & VKI_PROT_WRITE);
+ seg.hasX =3D toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ return sres;
+}
+
+
+SysRes VG_(map_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+=20
+ /* Not allowable. */
+ if (length =3D=3D 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MAny;
+ req.start =3D 0;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, False/*valgrind*/, &advised );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( advised, length,=20
+ VKI_PROT_READ|VKI_PROT_WRITE
+ |VKI_PROT_EXEC,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE
+ |VKI_MAP_ANONYMOUS, 0, 0 );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkAnon;
+ seg.isClient =3D False;
+ seg.start =3D advised;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR =3D True;
+ seg.hasW =3D True;
+ seg.hasX =3D True;
+ add_segment( &seg );
+
+ return sres;
+}
+
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
Modified: branches/ASPACEM/coregrind/m_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_main.c 2005-09-09 12:03:28 UTC (rev 4614=
)
+++ branches/ASPACEM/coregrind/m_main.c 2005-09-10 16:02:03 UTC (rev 4615=
)
@@ -2080,14 +2080,14 @@
/* ... and start the debug logger. Now we can safely emit logging
messages all through startup. */
VG_(debugLog_startup)(loglevel, "Stage 2 (main)");
+ VG_(debugLog)(1, "main", "Welcome to Valgrind version "=20
+ VERSION " debug logging.\n");
=20
//--------------------------------------------------------------
- // Start up the address space manager
+ // Ensure we're on a plausible stack.
// p: logging
//--------------------------------------------------------------
- VG_(debugLog)(1, "main", "Starting the address space manager\n");
-
- /* Ensure we're on a plausible stack. */
+ VG_(debugLog)(1, "main", "Checking we're on a plausible stack\n");
{ HChar* limLo =3D (HChar*)(&VG_(the_root_stack)[0]);
HChar* limHi =3D limLo + sizeof(VG_(the_root_stack));
HChar* aLocal =3D (HChar*)&zero; /* any auto local will do */
@@ -2101,8 +2101,27 @@
}
}
=20
+ //--------------------------------------------------------------
+ // Start up the address space manager
+ // p: logging, plausible-stack
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the address space manager\n");
VG_(new_aspacem_start)();
+ VG_(debugLog)(1, "main", "Address space manager is running\n");
=20
+ //--------------------------------------------------------------
+ // Start up the dynamic memory manager
+ // p: address space management
+ // In fact m_mallocfree is self-initialising, so there's no
+ // initialisation call to do. Instead, try a simple malloc/
+ // free pair right now to check that nothing is broken.
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the dynamic memory manager\n");
+ { void* p =3D VG_(malloc)( 12345 );
+ if (p) VG_(free)( p );
+ }
+ VG_(debugLog)(1, "main", "Dynamic memory manager is running\n");
+
//=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
// Command line argument handling order:
// * If --help/--help-debug are present, show usage message=20
@@ -2126,6 +2145,10 @@
//--------------------------------------------------------------
// Check we were launched by stage1
// p: none
+ // TODO: this is pretty pointless now. Plus, we shouldn't be
+ // screwing with our own auxv: instead, when our own auxv is
+ // used as the basis for the client's one, make modifications
+ // at that point.
//--------------------------------------------------------------
VG_(debugLog)(1, "main", "Doing scan_auxv()\n");
{
@@ -2177,8 +2200,8 @@
// Finalise address space layout
// p: load_tool() [probably?]
//--------------------------------------------------------------
- VG_(debugLog)(1, "main", "Laying out remaining space\n");
- layout_remaining_space( (Addr) & argc, VG_(tool_info).shadow_ratio );
+ //VG_(debugLog)(1, "main", "Laying out remaining space\n");
+ //layout_remaining_space( (Addr) & argc, VG_(tool_info).shadow_ratio =
);
=20
//--------------------------------------------------------------
// Load client executable, finding in $PATH if necessary
Modified: branches/ASPACEM/coregrind/m_mallocfree.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-09 12:03:28 UTC (re=
v 4614)
+++ branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-10 16:02:03 UTC (re=
v 4615)
@@ -30,7 +30,12 @@
*/
=20
#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
#include "pub_core_libcbase.h"
+
+#include "pub_core_debuginfo.h" // Needed for pub_core_aspacemgr :(
+#include "pub_core_aspacemgr.h"
+
#include "pub_core_libcassert.h"
#include "pub_core_libcmman.h"
#include "pub_core_libcprint.h"
@@ -473,6 +478,36 @@
/*--- Superblock management ---*/
/*------------------------------------------------------------*/
=20
+static
+void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
+{
+ static Bool alreadyCrashing =3D False;
+ ULong tot_alloc =3D VG_(aspacem_get_anonsize_total)();
+ if (!alreadyCrashing) {
+ alreadyCrashing =3D True;
+ VG_(printf)("\n"
+ "Valgrind's memory management: out of memory:\n");
+ VG_(printf)(" %s's request for %llu bytes failed.\n",=20
+ who, (ULong)szB );
+ VG_(printf)(" %llu bytes have already been allocated.\n",=20
+ tot_alloc);
+ VG_(printf)("Valgrind cannot continue. Sorry.\n\n");
+ } else {
+ VG_(debugLog)(0,"mallocfree","\n");
+ VG_(debugLog)(0,"mallocfree",
+ "Valgrind's memory management: out of memory:\n");
+ VG_(debugLog)(0,"mallocfree",
+ " %s's request for %llu bytes failed.\n",=20
+ who, (ULong)szB );
+ VG_(debugLog)(0,"mallocfree",
+ " %llu bytes have already been allocated.\n",=20
+ tot_alloc);
+ VG_(debugLog)(0,"mallocfree","Valgrind cannot continue. Sorry.\n\=
n");
+ }
+ VG_(exit)(1);
+}
+
+
// Align ptr p upwards to an align-sized boundary.
static
void* align_upwards ( void* p, SizeT align )
@@ -487,10 +522,8 @@
static
Superblock* newSuperblock ( Arena* a, SizeT cszB )
{
- // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
- static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZ=
B];
- static Bool called_before =3D True; //False;
Superblock* sb;
+ SysRes sres;
=20
// Take into account admin bytes in the Superblock.
cszB +=3D sizeof(Superblock);
@@ -498,32 +531,30 @@
if (cszB < a->min_sblock_szB) cszB =3D a->min_sblock_szB;
while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
=20
- if (!called_before) {
- // First time we're called -- use the special static bootstrap
- // superblock (see comment at top of main() for details).
- called_before =3D True;
- vg_assert(a =3D=3D arenaId_to_ArenaP(VG_AR_CORE));
- vg_assert(CORE_ARENA_MIN_SZB >=3D cszB);
- // Ensure sb is suitably aligned.
- sb =3D (Superblock*)align_upwards( bootstrap_superblock,=20
- VG_MIN_MALLOC_SZB );
- } else if (a->clientmem) {
+ if (a->clientmem) {
// client allocation -- return 0 to client if it fails
sb =3D (Superblock*)VG_(get_memory_from_mmap_for_client)(cszB);
if (NULL =3D=3D sb)
return 0;
} else {
// non-client allocation -- aborts if it fails
- sb =3D VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
+ sres =3D VG_(map_anon_float_valgrind)( cszB );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
+ /* NOTREACHED */
+ sb =3D NULL; /* keep gcc happy */
+ } else {
+ sb =3D (Superblock*)sres.val;
+ }
}
vg_assert(NULL !=3D sb);
//zzVALGRIND_MAKE_WRITABLE(sb, cszB);
vg_assert(0 =3D=3D (Addr)sb % VG_MIN_MALLOC_SZB);
sb->n_payload_bytes =3D cszB - sizeof(Superblock);
a->bytes_mmaped +=3D cszB;
- if (0)
- VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",=20
- sb->n_payload_bytes);
+ if (1)
+ VG_(message)(Vg_DebugMsg, "newSuperblock at %p, %d payload bytes",=
=20
+ sb, sb->n_payload_bytes);
return sb;
}
=20
Modified: branches/ASPACEM/coregrind/pub_core_aspacemgr.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/pub_core_aspacemgr.h 2005-09-09 12:03:28 U=
TC (rev 4614)
+++ branches/ASPACEM/coregrind/pub_core_aspacemgr.h 2005-09-10 16:02:03 U=
TC (rev 4615)
@@ -176,6 +176,20 @@
Bool VG_(aspacem_getAdvisory)
( MapRequest* req, Bool forClient, /*OUT*/Addr* result );
=20
+extern
+SysRes VG_(mmap_file_fixed_client)
+ ( void* startV, SizeT length, Int prot, Int fd, SizeT offset );
+
+extern
+SysRes VG_(mmap_anon_fixed_client)
+ ( void* startV, SizeT length, Int prot );
+
+extern
+SysRes VG_(map_anon_float_valgrind)( SizeT cszB );
+
+extern ULong VG_(aspacem_get_anonsize_total)( void );
+
+
#endif // __PUB_CORE_ASPACEMGR_H
=20
/*--------------------------------------------------------------------*/
Modified: branches/ASPACEM/coregrind/pub_core_mallocfree.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-09 12:03:28 =
UTC (rev 4614)
+++ branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-10 16:02:03 =
UTC (rev 4615)
@@ -77,10 +77,6 @@
SizeT req_pszB );
extern Char* VG_(arena_strdup) ( ArenaId aid, const Char* s);
=20
-/* Sets the size of the redzones at the start and end of heap blocks. T=
his
- must be called before any of VG_(malloc) and friends are called. */
-extern void VG_(set_client_malloc_redzone_szB) ( SizeT rz_szB );
-
extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
=20
extern void VG_(sanity_check_malloc_all) ( void );
|