|
From: <sv...@va...> - 2009-05-10 22:14:49
|
Author: njn
Date: 2009-05-10 23:14:40 +0100 (Sun, 10 May 2009)
New Revision: 9828
Log:
Fix another layering violation in aspacemgr; it's now (more or less)
self-sufficient again.
Modified:
branches/DARWIN/coregrind/m_aspacehl.c
branches/DARWIN/coregrind/m_aspacemgr/aspacemgr-linux.c
branches/DARWIN/coregrind/m_syswrap/priv_syswrap-generic.h
branches/DARWIN/coregrind/m_syswrap/syswrap-amd64-darwin.c
branches/DARWIN/coregrind/m_syswrap/syswrap-darwin.c
branches/DARWIN/coregrind/m_syswrap/syswrap-generic.c
branches/DARWIN/coregrind/m_syswrap/syswrap-linux.c
branches/DARWIN/coregrind/m_syswrap/syswrap-x86-darwin.c
branches/DARWIN/coregrind/pub_core_aspacemgr.h
Modified: branches/DARWIN/coregrind/m_aspacehl.c
===================================================================
--- branches/DARWIN/coregrind/m_aspacehl.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_aspacehl.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -64,8 +64,6 @@
return starts;
}
-
-
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
Modified: branches/DARWIN/coregrind/m_aspacemgr/aspacemgr-linux.c
===================================================================
--- branches/DARWIN/coregrind/m_aspacemgr/aspacemgr-linux.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_aspacemgr/aspacemgr-linux.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -3,7 +3,7 @@
/*--- The address space manager: segment initialisation and ---*/
/*--- tracking, stack operations ---*/
/*--- ---*/
-/*--- Implementation for Linux m_aspacemgr-linux.c ---*/
+/*--- Implementation for Linux (and Darwin!) m_aspacemgr-linux.c ---*/
/*--------------------------------------------------------------------*/
/*
@@ -3273,13 +3273,12 @@
static UInt stats_machcalls = 0;
-static void
-parse_procselfmaps (
- void (*record_mapping)( Addr addr, SizeT len, UInt prot,
- ULong dev, ULong ino, Off64T foff,
- const UChar* filename ),
- void (*record_gap)( Addr addr, SizeT len )
- )
+static void parse_procselfmaps (
+ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename ),
+ void (*record_gap)( Addr addr, SizeT len )
+ )
{
vm_address_t iter;
unsigned int depth;
@@ -3324,31 +3323,10 @@
(*record_gap)(last, (Addr)-1 - last);
}
+ChangedSeg* css_local;
+Int css_size_local;
+Int css_used_local;
-// GrP hack
-extern void ML_(notify_aspacem_and_tool_of_mmap)
- ( Addr a, SizeT len, UInt prot, UInt flags, Int fd, Off64T offset );
-extern void ML_(notify_aspacem_and_tool_of_munmap) ( Addr a, SizeT len );
-
-typedef
- struct {
- Bool is_added; // Added or removed seg?
- Addr start;
- SizeT end;
- UInt prot; // Not used for removed segs.
- Off64T offset; // Not used for removed segs.
- }
- ChangedSeg;
-
-// I haven't seen more than 1 segment be added or removed in a single calls to
-// VG_(sync_mappings). So 20 seems generous. However, if it needs to be made
-// larger, we know that it'll never need to be larger than 'segnames_used', so
-// an array of that size could be dynamically allocated in VG_(sync_mappings).
-// --njn
-#define CHANGED_SEGS_SIZE 20
-static ChangedSeg changed_segs[CHANGED_SEGS_SIZE];
-static Int changed_segs_used;
-
static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
ULong dev, ULong ino, Off64T offset,
const UChar *filename)
@@ -3378,14 +3356,14 @@
}
else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
/* Add mapping for SkResvn regions */
- ChangedSeg* cs = &changed_segs[changed_segs_used];
- aspacem_assert(changed_segs_used < CHANGED_SEGS_SIZE);
+ ChangedSeg* cs = &css_local[css_used_local];
+ aspacem_assert(css_used_local < css_size_local);
cs->is_added = True;
cs->start = addr;
cs->end = addr + len - 1;
cs->prot = prot;
cs->offset = offset;
- changed_segs_used++;
+ css_used_local++;
return;
} else if (nsegments[i].kind == SkAnonC ||
@@ -3437,58 +3415,42 @@
for (i = iLo; i <= iHi; i++) {
if (nsegments[i].kind != SkFree && nsegments[i].kind != SkResvn) {
// V has a mapping, kernel doesn't
- ChangedSeg* cs = &changed_segs[changed_segs_used];
- aspacem_assert(changed_segs_used < CHANGED_SEGS_SIZE);
+ ChangedSeg* cs = &css_local[css_used_local];
+ aspacem_assert(css_used_local < css_size_local);
+ cs->is_added = True;
cs->is_added = False;
cs->start = nsegments[i].start;
cs->end = nsegments[i].end;
cs->prot = 0;
cs->offset = 0;
- changed_segs_used++;
+ css_used_local++;
return;
}
}
}
-void VG_(sync_mappings)(const HChar *when, const HChar *where, Int num)
+void VG_(get_changed_segments)(
+ const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
+ Int css_size, /*OUT*/Int* css_used)
{
- Int i;
static UInt stats_synccalls = 1;
aspacem_assert(when && where);
if (0)
VG_(debugLog)(0,"aspacem",
- "[%u,%u] VG_(sync_mappings)(%s, %s, %d)\n",
- stats_synccalls++, stats_machcalls, when, where, num
+ "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
+ stats_synccalls++, stats_machcalls, when, where
);
- changed_segs_used = 0;
+ css_local = css;
+ css_size_local = css_size;
+ css_used_local = 0;
// Get the list of segs that need to be added/removed.
parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
- // Now add/remove them.
- for (i = 0; i < changed_segs_used; i++) {
- ChangedSeg* cs = &changed_segs[i];
- Char* action;
- if (cs->is_added) {
- ML_(notify_aspacem_and_tool_of_mmap)(
- cs->start, cs->end - cs->start + 1,
- cs->prot, VKI_MAP_PRIVATE, 0, cs->offset);
- action = "added";
-
- } else {
- ML_(notify_aspacem_and_tool_of_munmap)(
- cs->start, cs->end - cs->start + 1);
- action = "removed";
- }
- if (VG_(clo_trace_syscalls)) {
- VG_(debugLog)(0, "aspacem",
- "\n%s region 0x%010lx..0x%010lx at %s (%s)",
- action, cs->start, cs->end + 1, where, when);
- }
- }
+ *css_used = css_used_local;
}
#endif
Modified: branches/DARWIN/coregrind/m_syswrap/priv_syswrap-generic.h
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/priv_syswrap-generic.h 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/priv_syswrap-generic.h 2009-05-10 22:14:40 UTC (rev 9828)
@@ -62,16 +62,15 @@
extern
Bool ML_(do_sigkill)(Int pid, Int tgid);
-/* So that it can be seen from syswrap-$VG_PLATFORM.c. */
-/* When a client mmap or munmap has been successfully done, both aspacem
+/* When a client mmap or munmap has been successfully done, both the core
and the tool need to be notified of the new mapping. Hence this fn. */
extern void
-ML_(notify_aspacem_and_tool_of_mmap) ( Addr a, SizeT len, UInt prot,
- UInt mm_flags, Int fd, Off64T offset );
+ML_(notify_core_and_tool_of_mmap) ( Addr a, SizeT len, UInt prot,
+ UInt mm_flags, Int fd, Off64T offset );
extern void
-ML_(notify_aspacem_and_tool_of_munmap) ( Addr a, SizeT len );
+ML_(notify_core_and_tool_of_munmap) ( Addr a, SizeT len );
extern void
-ML_(notify_aspacem_and_tool_of_mprotect) ( Addr a, SizeT len, Int prot );
+ML_(notify_core_and_tool_of_mprotect) ( Addr a, SizeT len, Int prot );
extern void
ML_(buf_and_len_pre_check) ( ThreadId tid, Addr buf_p, Addr buflen_p,
Modified: branches/DARWIN/coregrind/m_syswrap/syswrap-amd64-darwin.c
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/syswrap-amd64-darwin.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/syswrap-amd64-darwin.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -320,17 +320,16 @@
tst->client_stack_szB = stacksize;
// pthread structure
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack+stacksize, pthread_structsize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack+stacksize, pthread_structsize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// stack contents
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack, stacksize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack, stacksize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// guard page
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
- 0, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE, 0, VKI_MAP_PRIVATE, -1, 0);
} else {
// client allocated stack
find_stack_segment(tst->tid, sp);
@@ -433,19 +432,18 @@
// GrP fixme scheduler lock?!
// pthread structure
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack+stacksize, pthread_structsize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack+stacksize, pthread_structsize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// stack contents
// GrP fixme uninitialized!
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack, stacksize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack, stacksize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// guard page
// GrP fixme ban_mem_stack!
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
- 0, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE, 0, VKI_MAP_PRIVATE, -1, 0);
VG_(am_do_sync_check)("after", "wqthread_hijack", 0);
Modified: branches/DARWIN/coregrind/m_syswrap/syswrap-darwin.c
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/syswrap-darwin.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/syswrap-darwin.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -586,6 +586,46 @@
/* ---------------------------------------------------------------------
+ sync_mappings
+ ------------------------------------------------------------------ */
+
+static void sync_mappings(const HChar *when, const HChar *where, Int num)
+{
+ // I haven't seen more than 1 segment be added or removed in a single calls
+ // to sync_mappings(). So 20 seems generous. The upper bound is the
+ // number of segments currently in use. --njn
+ #define CSS_SIZE 20
+ ChangedSeg css[CSS_SIZE];
+ Int css_used;
+ Int i;
+
+ VG_(get_changed_segments)(when, where, css, CSS_SIZE, &css_used);
+
+ // Now add/remove them.
+ for (i = 0; i < css_used; i++) {
+ ChangedSeg* cs = &css[i];
+ Char* action;
+ if (cs->is_added) {
+ ML_(notify_core_and_tool_of_mmap)(
+ cs->start, cs->end - cs->start + 1,
+ cs->prot, VKI_MAP_PRIVATE, 0, cs->offset);
+ // should this call VG_(di_notify_mmap) also?
+ action = "added";
+
+ } else {
+ ML_(notify_core_and_tool_of_munmap)(
+ cs->start, cs->end - cs->start + 1);
+ action = "removed";
+ }
+ if (VG_(clo_trace_syscalls)) {
+ VG_(debugLog)(0, "aspacem",
+ "\n%s region 0x%010lx..0x%010lx at %s (%s)\n",
+ action, cs->start, cs->end + 1, where, when);
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------
wrappers
------------------------------------------------------------------ */
@@ -3108,8 +3148,7 @@
POST(sys_mmap)
{
if (RES != -1) {
- ML_(notify_aspacem_and_tool_of_mmap)
- (RES, ARG2, ARG3, ARG4, ARG5, ARG6);
+ ML_(notify_core_and_tool_of_mmap)(RES, ARG2, ARG3, ARG4, ARG5, ARG6);
// Try to load symbols from the region
VG_(di_notify_mmap)( (Addr)RES, False/*allow_SkFileV*/ );
}
@@ -3349,9 +3388,9 @@
PRINT("got ool mem %p..%#lx; ", desc->out_of_line.address,
(Addr)desc->out_of_line.address+desc->out_of_line.size);
- ML_(notify_aspacem_and_tool_of_mmap)
- (start, end - start, VKI_PROT_READ|VKI_PROT_WRITE,
- VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ start, end - start, VKI_PROT_READ|VKI_PROT_WRITE,
+ VKI_MAP_PRIVATE, -1, 0);
}
// GrP fixme mark only un-rounded part as initialized
break;
@@ -3366,9 +3405,9 @@
Addr end = VG_PGROUNDUP((Addr)desc->ool_ports.address + desc->ool_ports.count * sizeof(mach_port_t));
mach_port_t *ports = (mach_port_t *)desc->ool_ports.address;
- ML_(notify_aspacem_and_tool_of_mmap)
- (start, end - start, VKI_PROT_READ|VKI_PROT_WRITE,
- VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ start, end - start, VKI_PROT_READ|VKI_PROT_WRITE,
+ VKI_MAP_PRIVATE, -1, 0);
PRINT(":");
for (i = 0; i < desc->ool_ports.count; i++) {
@@ -3500,7 +3539,7 @@
Addr end = VG_PGROUNDUP((Addr)desc->out_of_line.address + size);
PRINT("kill ool mem %p..%#lx; ", desc->out_of_line.address,
(Addr)desc->out_of_line.address + size);
- ML_(notify_aspacem_and_tool_of_munmap)(start, end - start);
+ ML_(notify_core_and_tool_of_munmap)(start, end - start);
}
break;
@@ -3516,7 +3555,7 @@
Addr end = VG_PGROUNDUP((Addr)desc->ool_ports.address + size);
PRINT("kill ool port array %p..%#lx; ", desc->ool_ports.address,
(Addr)desc->ool_ports.address + size);
- ML_(notify_aspacem_and_tool_of_munmap)(start, end - start);
+ ML_(notify_core_and_tool_of_munmap)(start, end - start);
}
break;
default:
@@ -4529,9 +4568,9 @@
PRINT("allocated at %#x", reply->address);
// requesting 0 bytes returns address 0 with no error
if (MACH_ARG(vm_allocate.size)) {
- ML_(notify_aspacem_and_tool_of_mmap)
- (reply->address, MACH_ARG(vm_allocate.size),
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_ANON, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ reply->address, MACH_ARG(vm_allocate.size),
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_ANON, -1, 0);
}
} else {
PRINT("allocated at %#x in remote task %s", reply->address,
@@ -4590,7 +4629,7 @@
Addr end = VG_PGROUNDUP(MACH_ARG(vm_deallocate.address) +
MACH_ARG(vm_deallocate.size));
// Must have cleared SfMayBlock in PRE to prevent race
- ML_(notify_aspacem_and_tool_of_munmap)(start, end - start);
+ ML_(notify_core_and_tool_of_munmap)(start, end - start);
}
}
} else {
@@ -4650,7 +4689,7 @@
VG_(printf)("UNKNOWN vm_protect set maximum");
//VG_(mprotect_max_range)(start, end-start, prot);
} else {
- ML_(notify_aspacem_and_tool_of_mprotect)(start, end-start, prot);
+ ML_(notify_core_and_tool_of_mprotect)(start, end-start, prot);
}
}
} else {
@@ -4954,9 +4993,9 @@
// GrP fixme check src and dest tasks
PRINT("mapped at %#x", reply->address);
// GrP fixme max prot
- ML_(notify_aspacem_and_tool_of_mmap)
- (reply->address, VG_PGROUNDUP(MACH_ARG(vm_map.size)),
- MACH_ARG(vm_map.protection), VKI_MAP_SHARED, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ reply->address, VG_PGROUNDUP(MACH_ARG(vm_map.size)),
+ MACH_ARG(vm_map.protection), VKI_MAP_SHARED, -1, 0);
// GrP fixme VKI_MAP_PRIVATE if !copy?
} else {
PRINT("mig return %d", reply->RetCode);
@@ -5031,9 +5070,9 @@
UInt prot = reply->cur_protection & reply->max_protection;
// GrP fixme max prot
PRINT("mapped at %#x", reply->target_address);
- ML_(notify_aspacem_and_tool_of_mmap)
- (reply->target_address, VG_PGROUNDUP(MACH_ARG(vm_remap.size)),
- prot, VKI_MAP_SHARED, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ reply->target_address, VG_PGROUNDUP(MACH_ARG(vm_remap.size)),
+ prot, VKI_MAP_SHARED, -1, 0);
// GrP fixme VKI_MAP_FIXED if !copy?
// GrP fixme copy initialized bits from source to dest if source_task is also mach_task_self
} else {
@@ -5217,9 +5256,9 @@
PRINT("allocated at 0x%llx", reply->address);
// requesting 0 bytes returns address 0 with no error
if (MACH_ARG(mach_vm_allocate.size)) {
- ML_(notify_aspacem_and_tool_of_mmap)
- (reply->address, MACH_ARG(mach_vm_allocate.size),
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_ANON, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ reply->address, MACH_ARG(mach_vm_allocate.size),
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_ANON, -1, 0);
}
} else {
PRINT("allocated at 0x%llx in remote task %s", reply->address,
@@ -5278,7 +5317,7 @@
Addr end = VG_PGROUNDUP(MACH_ARG(mach_vm_deallocate.address) +
MACH_ARG(mach_vm_deallocate.size));
// Must have cleared SfMayBlock in PRE to prevent race
- ML_(notify_aspacem_and_tool_of_munmap)(start, end - start);
+ ML_(notify_core_and_tool_of_munmap)(start, end - start);
}
}
} else {
@@ -5337,7 +5376,7 @@
// DDD: #warning GrP fixme mprotect max
//VG_(mprotect_max_range)(start, end-start, prot);
} else {
- ML_(notify_aspacem_and_tool_of_mprotect)(start, end-start, prot);
+ ML_(notify_core_and_tool_of_mprotect)(start, end-start, prot);
}
}
} else {
@@ -5495,9 +5534,9 @@
// GrP fixme check src and dest tasks
PRINT("mapped at 0x%llx", reply->address);
// GrP fixme max prot
- ML_(notify_aspacem_and_tool_of_mmap)
- (reply->address, VG_PGROUNDUP(MACH_ARG(mach_vm_map.size)),
- MACH_ARG(mach_vm_map.protection), VKI_MAP_SHARED, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ reply->address, VG_PGROUNDUP(MACH_ARG(mach_vm_map.size)),
+ MACH_ARG(mach_vm_map.protection), VKI_MAP_SHARED, -1, 0);
// GrP fixme VKI_MAP_PRIVATE if !copy?
} else {
PRINT("mig return %d", reply->RetCode);
@@ -5740,7 +5779,7 @@
// GrP fixme errors?
if (ARG4) semaphore_signal((semaphore_t)ARG4);
if (ARG1 && ARG2) {
- ML_(notify_aspacem_and_tool_of_munmap)(ARG1, ARG2);
+ ML_(notify_core_and_tool_of_munmap)(ARG1, ARG2);
vm_deallocate(mach_task_self(), (vm_address_t)ARG1, (vm_size_t)ARG2);
}
@@ -5980,7 +6019,7 @@
// PRINT("UNHANDLED reply %d", mh->msgh_id);
// Assume the call may have mapped or unmapped memory
- VG_(sync_mappings)("after", "mach_msg_receive", 0);
+ sync_mappings("after", "mach_msg_receive", 0);
}
PRE(mach_msg_receive)
@@ -6391,7 +6430,7 @@
POST(mach_msg_unhandled)
{
- VG_(sync_mappings)("after", "mach_msg_unhandled", 0);
+ sync_mappings("after", "mach_msg_unhandled", 0);
}
@@ -6690,7 +6729,7 @@
POST(iokit_user_client_trap)
{
- VG_(sync_mappings)("after", "iokit_user_client_trap", ARG2);
+ sync_mappings("after", "iokit_user_client_trap", ARG2);
}
Modified: branches/DARWIN/coregrind/m_syswrap/syswrap-generic.c
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/syswrap-generic.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/syswrap-generic.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -59,15 +59,6 @@
#include "priv_syswrap-generic.h"
-/* Local function declarations. */
-
-static
-void notify_aspacem_of_mmap(Addr a, SizeT len, UInt prot,
- UInt flags, Int fd, Off64T offset);
-static
-void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle);
-
-
/* Returns True iff address range is something the client can
plausibly mess with: all of it is either already belongs to the
client or is free or a reservation. */
@@ -125,11 +116,6 @@
Doing mmap, mremap
------------------------------------------------------------------ */
-// Nb: this isn't done as precisely as possible, but it seems that programs
-// are usually sufficiently well-behaved that the more obscure corner cases
-// aren't important. Various comments in the few functions below give more
-// details... njn 2002-Sep-17
-
/* AFAICT from kernel sources (mm/mprotect.c) and general experimentation,
munmap, mprotect (and mremap??) work at the page level. So addresses
and lengths must be adjusted for this. */
@@ -148,35 +134,9 @@
*a = ra;
}
-/* When a client mmap has been successfully done, this function must
- be called. It notifies both aspacem and the tool of the new
- mapping.
-
- JRS 2008-Aug-14: But notice this is *very* obscure. The only place
- it is called from is POST(sys_io_setup). In particular,
- ML_(generic_PRE_sys_mmap), further down in this file, is the
- "normal case" handler for client mmap. But it doesn't call this
- function; instead it does the relevant notifications itself. Here,
- we just pass di_handle=0 to notify_tool_of_mmap as we have no
- better information. But really this function should be done away
- with; problem is I don't understand what POST(sys_io_setup) does or
- how it works.
-
- [This function is also used lots for Darwin, because
- ML_(generic_PRE_sys_mmap) cannot be used for Darwin.]
- */
-void
-ML_(notify_aspacem_and_tool_of_mmap) ( Addr a, SizeT len, UInt prot,
- UInt flags, Int fd, Off64T offset )
+static void notify_core_of_mmap(Addr a, SizeT len, UInt prot,
+ UInt flags, Int fd, Off64T offset)
{
- notify_aspacem_of_mmap(a, len, prot, flags, fd, offset);
- notify_tool_of_mmap(a, len, prot, 0/*di_handle*/);
-}
-
-static
-void notify_aspacem_of_mmap(Addr a, SizeT len, UInt prot,
- UInt flags, Int fd, Off64T offset)
-{
Bool d;
/* 'a' is the return value from a real kernel mmap, hence: */
@@ -188,11 +148,10 @@
if (d)
VG_(discard_translations)( (Addr64)a, (ULong)len,
- "ML_(notify_aspacem_of_mmap)" );
+ "notify_core_of_mmap" );
}
-static
-void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle)
+static void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle)
{
Bool rr, ww, xx;
@@ -208,9 +167,34 @@
VG_TRACK( new_mem_mmap, a, len, rr, ww, xx, di_handle );
}
+
+/* When a client mmap has been successfully done, this function must
+ be called. It notifies both aspacem and the tool of the new
+ mapping.
+
+ JRS 2008-Aug-14: But notice this is *very* obscure. The only place
+ it is called from is POST(sys_io_setup). In particular,
+ ML_(generic_PRE_sys_mmap), in m_syswrap, is the "normal case" handler for
+ client mmap. But it doesn't call this function; instead it does the
+ relevant notifications itself. Here, we just pass di_handle=0 to
+ notify_tool_of_mmap as we have no better information. But really this
+ function should be done away with; problem is I don't understand what
+ POST(sys_io_setup) does or how it works.
+
+ [However, this function is used lots for Darwin, because
+ ML_(generic_PRE_sys_mmap) cannot be used for Darwin.]
+ */
void
-ML_(notify_aspacem_and_tool_of_munmap) ( Addr a, SizeT len )
+ML_(notify_core_and_tool_of_mmap) ( Addr a, SizeT len, UInt prot,
+ UInt flags, Int fd, Off64T offset )
{
+ notify_core_of_mmap(a, len, prot, flags, fd, offset);
+ notify_tool_of_mmap(a, len, prot, 0/*di_handle*/);
+}
+
+void
+ML_(notify_core_and_tool_of_munmap) ( Addr a, SizeT len )
+{
Bool d;
page_align_addr_and_len(&a, &len);
@@ -219,11 +203,11 @@
VG_(di_notify_munmap)( a, len );
if (d)
VG_(discard_translations)( (Addr64)a, (ULong)len,
- "ML_(notify_aspacem_and_tool_of_munmap)" );
+ "ML_(notify_core_and_tool_of_munmap)" );
}
void
-ML_(notify_aspacem_and_tool_of_mprotect) ( Addr a, SizeT len, Int prot )
+ML_(notify_core_and_tool_of_mprotect) ( Addr a, SizeT len, Int prot )
{
Bool rr = toBool(prot & VKI_PROT_READ);
Bool ww = toBool(prot & VKI_PROT_WRITE);
@@ -236,10 +220,11 @@
VG_(di_notify_mprotect)( a, len, prot );
if (d)
VG_(discard_translations)( (Addr64)a, (ULong)len,
- "ML_(notify_aspacem_and_tool_of_mprotect)" );
+ "ML_(notify_core_and_tool_of_mprotect)" );
}
+
#if HAVE_MREMAP
/* Expand (or shrink) an existing mapping, potentially moving it at
the same time (controlled by the MREMAP_MAYMOVE flag). Nightmare.
@@ -2027,7 +2012,7 @@
if (!sr_isError(sres)) {
ULong di_handle;
/* Notify aspacem. */
- notify_aspacem_of_mmap(
+ notify_core_of_mmap(
(Addr)sr_Res(sres), /* addr kernel actually assigned */
arg2, /* length */
arg3, /* prot */
@@ -3475,7 +3460,7 @@
SizeT len = ARG2;
Int prot = ARG3;
- ML_(notify_aspacem_and_tool_of_mprotect)(a, len, prot);
+ ML_(notify_core_and_tool_of_mprotect)(a, len, prot);
}
PRE(sys_munmap)
@@ -3493,7 +3478,7 @@
Addr a = ARG1;
SizeT len = ARG2;
- ML_(notify_aspacem_and_tool_of_munmap)( (Addr64)a, (ULong)len );
+ ML_(notify_core_and_tool_of_munmap)( (Addr64)a, (ULong)len );
}
PRE(sys_mincore)
Modified: branches/DARWIN/coregrind/m_syswrap/syswrap-linux.c
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/syswrap-linux.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/syswrap-linux.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -1273,9 +1273,9 @@
r = *(struct vki_aio_ring **)ARG2;
vg_assert(ML_(valid_client_addr)((Addr)r, size, tid, "io_setup"));
- ML_(notify_aspacem_and_tool_of_mmap)( (Addr)r, size,
- VKI_PROT_READ | VKI_PROT_WRITE,
- VKI_MAP_ANONYMOUS, -1, 0 );
+ ML_(notify_core_and_tool_of_mmap)( (Addr)r, size,
+ VKI_PROT_READ | VKI_PROT_WRITE,
+ VKI_MAP_ANONYMOUS, -1, 0 );
POST_MEM_WRITE( ARG2, sizeof(vki_aio_context_t) );
}
Modified: branches/DARWIN/coregrind/m_syswrap/syswrap-x86-darwin.c
===================================================================
--- branches/DARWIN/coregrind/m_syswrap/syswrap-x86-darwin.c 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/m_syswrap/syswrap-x86-darwin.c 2009-05-10 22:14:40 UTC (rev 9828)
@@ -315,17 +315,17 @@
tst->client_stack_szB = stacksize;
// pthread structure
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack+stacksize, pthread_structsize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack+stacksize, pthread_structsize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// stack contents
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack, stacksize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack, stacksize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// guard page
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
- 0, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
+ 0, VKI_MAP_PRIVATE, -1, 0);
} else {
// client allocated stack
find_stack_segment(tst->tid, sp);
@@ -462,19 +462,19 @@
// GrP fixme scheduler lock?!
// pthread structure
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack+stacksize, pthread_structsize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack+stacksize, pthread_structsize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// stack contents
// GrP fixme uninitialized!
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack, stacksize,
- VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack, stacksize,
+ VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
// guard page
// GrP fixme ban_mem_stack!
- ML_(notify_aspacem_and_tool_of_mmap)
- (stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
- 0, VKI_MAP_PRIVATE, -1, 0);
+ ML_(notify_core_and_tool_of_mmap)(
+ stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
+ 0, VKI_MAP_PRIVATE, -1, 0);
VG_(am_do_sync_check)("after", "wqthread_hijack", 0);
Modified: branches/DARWIN/coregrind/pub_core_aspacemgr.h
===================================================================
--- branches/DARWIN/coregrind/pub_core_aspacemgr.h 2009-05-10 20:17:27 UTC (rev 9827)
+++ branches/DARWIN/coregrind/pub_core_aspacemgr.h 2009-05-10 22:14:40 UTC (rev 9828)
@@ -406,7 +406,23 @@
extern Int VG_(am_get_VgStack_unused_szB)( VgStack* stack );
+// DDD: this is ugly
+#if defined(VGO_darwin)
+typedef
+ struct {
+ Bool is_added; // Added or removed seg?
+ Addr start;
+ SizeT end;
+ UInt prot; // Not used for removed segs.
+ Off64T offset; // Not used for removed segs.
+ }
+ ChangedSeg;
+extern void VG_(get_changed_segments)(
+ const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
+ Int css_size, /*OUT*/Int* css_used);
+#endif
+
#endif // __PUB_CORE_ASPACEMGR_H
/*--------------------------------------------------------------------*/
|