You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
1
(12) |
2
(11) |
3
(8) |
|
4
(9) |
5
(10) |
6
(18) |
7
(8) |
8
(12) |
9
(23) |
10
(14) |
|
11
(15) |
12
(31) |
13
(45) |
14
(28) |
15
(20) |
16
(16) |
17
(9) |
|
18
(18) |
19
(26) |
20
(49) |
21
(14) |
22
(18) |
23
(24) |
24
(28) |
|
25
(39) |
26
(17) |
27
(27) |
28
(27) |
29
(14) |
30
(44) |
|
|
From: <sv...@va...> - 2005-09-10 16:02:07
|
Author: sewardj
Date: 2005-09-10 17:02:03 +0100 (Sat, 10 Sep 2005)
New Revision: 4615
Log:
Many changes:
- add infrastructure do deal with client/V mmap requests
- fix bugs in the VG_(aspacem_getAdvisory)
- minor jiggling of m_mallocfree to connect to new aspacem
- at startup, first start m_aspacemgr, then m_mallocfree
Now runs as far as getting dynamic memory management running.
Modified:
branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c
branches/ASPACEM/coregrind/m_main.c
branches/ASPACEM/coregrind/m_mallocfree.c
branches/ASPACEM/coregrind/pub_core_aspacemgr.h
branches/ASPACEM/coregrind/pub_core_mallocfree.h
Modified: branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c 2005-09-09 12:03:2=
8 UTC (rev 4614)
+++ branches/ASPACEM/coregrind/m_aspacemgr/aspacemgr.c 2005-09-10 16:02:0=
3 UTC (rev 4615)
@@ -43,7 +43,9 @@
#include "pub_core_transtab.h" // For VG_(discard_translations)
#include "vki_unistd.h"
=20
+static void aspacem_barf ( HChar* what );
=20
+
/* Define to debug the memory-leak-detector. */
/* #define VG_DEBUG_LEAKCHECK */
=20
@@ -81,6 +83,7 @@
UInt fd, OffT offset)
{
SysRes res;
+aspacem_barf("mmap_native");
#if defined(VGP_x86_linux)
{=20
UWord args[6];
@@ -106,11 +109,13 @@
=20
SysRes VG_(munmap_native)(void *start, SizeT length)
{
+aspacem_barf("munmap_native");
return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
}
=20
SysRes VG_(mprotect_native)( void *start, SizeT length, UInt prot )
{
+aspacem_barf("mprotect_native");
return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
}
=20
@@ -569,6 +574,7 @@
{
Addr se =3D s->addr+s->len;
Addr pe =3D p+len;
+aspacem_barf("seg_contains");
vg_assert(pe >=3D p);
=20
return (p >=3D s->addr && pe <=3D se);
@@ -578,6 +584,7 @@
{
Addr se =3D s->addr+s->len;
Addr pe =3D p+len;
+aspacem_barf("seg_overlaps");
vg_assert(pe >=3D p);
=20
return (p < se && pe > s->addr);
@@ -616,6 +623,7 @@
Addr end, s_end;
Int i;
Bool deleted;
+aspacem_barf("unmap_range");
=20
if (len =3D=3D 0)
return;
@@ -734,6 +742,7 @@
HChar* stage2_suffix1 =3D "lib/valgrind/stage2";
HChar* stage2_suffix2 =3D "coregrind/stage2";
Bool is_stage2 =3D False;
+aspacem_barf("map_file_segment");
=20
is_stage2 =3D is_stage2 || ( VG_(strstr)(filename, stage2_suffix1) !=3D=
NULL );
is_stage2 =3D is_stage2 || ( VG_(strstr)(filename, stage2_suffix2) !=3D=
NULL );
@@ -845,6 +854,7 @@
{
Char buf[VKI_PATH_MAX];
struct vki_stat st;
+aspacem_barf("map_fd_segment");
=20
st.st_dev =3D 0;
st.st_ino =3D 0;
@@ -866,6 +876,7 @@
=20
void VG_(map_segment)(Addr addr, SizeT len, UInt prot, UInt flags)
{
+aspacem_barf("map_segment");
flags &=3D ~SF_FILE;
=20
VG_(map_file_segment)(addr, len, prot, flags, 0, 0, 0, 0);
@@ -876,6 +887,7 @@
{
Int r;
const Bool debug =3D False || mem_debug;
+aspacem_barf("mprotect_range");
=20
if (debug)
VG_(printf)("\nmprotect_range(%p, %lu, %x)\n", a, len, prot);
@@ -907,6 +919,8 @@
Addr VG_(find_map_space)(Addr addr, SizeT len, Bool for_client)
{
const Bool debug =3D False || mem_debug;
+aspacem_barf("find_map_space");
+
Addr ret;
Addr addrOrig =3D addr;
Addr limit =3D (for_client ? VG_(client_end)-1 : VG_(valgrind_last)=
);
@@ -1047,6 +1061,7 @@
{
Addr addr =3D (start =3D=3D 0) ? VG_(client_base) : start;
SysRes ret;
+aspacem_barf("pad_address_space");
=20
Int i =3D 0;
Segment* s =3D i >=3D segments_used ? NULL : &segments[i];
@@ -1080,6 +1095,7 @@
=20
Int i =3D 0;
Segment* s =3D i >=3D segments_used ? NULL : &segments[i];
+aspacem_barf("unpad_address_space");
=20
while (s && addr <=3D VG_(valgrind_last)) {
if (addr < s->addr) {
@@ -1101,6 +1117,8 @@
Segment *VG_(find_segment)(Addr a)
{
Int r =3D find_segment(a);
+aspacem_barf("find_segment");
+
if (0) show_segments("find_segment");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1113,6 +1131,7 @@
Segment *VG_(find_segment_above_unmapped)(Addr a)
{
Int r =3D find_segment_above_unmapped(a);
+aspacem_barf("find_segment_above_unmapped");
if (0) show_segments("find_segment_above_unmapped");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1125,6 +1144,7 @@
Segment *VG_(find_segment_above_mapped)(Addr a)
{
Int r =3D find_segment_above_mapped(a);
+aspacem_barf("find_segment_above_mapped");
if (0) show_segments("find_segment_above_mapped");
if (r =3D=3D -1) return NULL;
return &segments[r];
@@ -1142,6 +1162,7 @@
Bool VG_(is_addressable)(Addr p, SizeT size, UInt prot)
{
Segment *seg;
+aspacem_barf("is_addressable");
=20
if ((p + size) < p)
return False; /* reject wraparounds */
@@ -1178,6 +1199,7 @@
Int i;
UInt flags;
Segment *s;
+aspacem_barf("find_root_memory");
=20
for (i =3D 0; i < segments_used; i++) {
s =3D &segments[i];
@@ -1202,11 +1224,13 @@
=20
Bool VG_(is_client_addr)(Addr a)
{
+aspacem_barf("is_client_addr");
return a >=3D VG_(client_base) && a < VG_(client_end);
}
=20
Bool VG_(is_shadow_addr)(Addr a)
{
+aspacem_barf("is_shadow_addr");
return a >=3D VG_(shadow_base) && a < VG_(shadow_end);
}
=20
@@ -1220,6 +1244,7 @@
static Addr shadow_alloc =3D 0;
Addr try_here;
SysRes r;
+aspacem_barf("shadow_alloc");
=20
if (0) show_segments("shadow_alloc(before)");
=20
@@ -1277,6 +1302,7 @@
=20
Bool VG_(setup_pointercheck)(Addr client_base, Addr client_end)
{
+aspacem_barf("setup_pointercheck");
vg_assert(0 !=3D client_end);
#if defined(VGP_x86_linux)
/* Client address space segment limit descriptor entry */
@@ -1317,6 +1343,8 @@
=20
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
=20
static void add_to_aspacem_sprintf_buf ( HChar c, void *p )
{
@@ -1354,6 +1382,64 @@
=20
/////////////////////////////////////////////////////////////////
=20
+static
+SysRes do_mmap_NATIVE( Addr start, SizeT length, UInt prot, UInt flags,
+ UInt fd, OffT offset)
+{
+ SysRes res;
+# if defined(VGP_x86_linux)
+ {=20
+ UWord args[6];
+ args[0] =3D (UWord)start;
+ args[1] =3D length;
+ args[2] =3D prot;
+ args[3] =3D flags;
+ args[4] =3D fd;
+ args[5] =3D offset;
+ res =3D VG_(do_syscall1)(__NR_mmap, (UWord)args );
+ }
+# elif defined(VGP_amd64_linux)
+ res =3D VG_(do_syscall6)(__NR_mmap, (UWord)start, length,=20
+ prot, flags, fd, offset);
+# elif defined(VGP_ppc32_linux)
+ res =3D VG_(do_syscall6)(__NR_mmap, (UWord)(start), (length),
+ prot, flags, fd, offset);
+# else
+# error Unknown platform
+# endif
+ return res;
+}
+
+static
+SysRes do_munmap_NATIVE(Addr start, SizeT length)
+{
+ return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
+}
+
+
+
+static=20
+Bool get_inode_for_fd ( Int fd, /*OUT*/UInt* dev, /*OUT*/UInt* ino )
+{
+ return False;
+}
+
+static
+Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
+{
+ return False;
+}
+
+
+/////////////////////////////////////////////////////////////////
+
+static void aspacem_barf ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ VG_(exit)(1);
+}
+
static void aspacem_barf_toolow ( HChar* what )
{
VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s is too low.\n", what)=
;
@@ -1477,8 +1563,18 @@
static Int nsegments_used =3D 0;
=20
=20
+ULong VG_(aspacem_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total =3D 0;
+ for (i =3D 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind !=3D SkAnon)
+ continue;
+ total +=3D (ULong)nsegments[i].end - (ULong)nsegments[i].start + 1UL=
L;
+ }
+ return total;
+}
=20
-
/* check the interval array */
static void check_nsegments ( void )
{
@@ -1491,7 +1587,31 @@
}
=20
=20
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. */
+static Int find_nsegment_idx ( Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo =3D 0,
+ hi =3D nsegments_used-1;
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ aspacem_barf("find_nsegment_idx: not found");
+ }
+ mid =3D (lo + hi) / 2;
+ a_mid_lo =3D nsegments[mid].start;
+ a_mid_hi =3D nsegments[mid].end;
=20
+ if (a < a_mid_lo) { hi =3D mid-1; continue; }
+ if (a > a_mid_hi) { lo =3D mid+1; continue; }
+ aspacem_assert(a >=3D a_mid_lo && a <=3D a_mid_hi);
+ aspacem_assert(0 <=3D mid && mid < nsegments_used);
+ return mid;
+ }
+}
=20
=20
=20
@@ -1513,81 +1633,85 @@
Bool VG_(aspacem_getAdvisory)
( MapRequest* req, Bool forClient, /*OUT*/Addr* result )
{
- return False;
-#if 0
+
/* Iterate over all holes in the address space, twice. In the first
-pass, find the first hole which is not below the search start
-point. */
+ pass, find the first hole which is not below the search start
+ point. */
Addr holeStart, holeEnd, holeLen;
Int i, j;
+ Bool fixed_not_required;
=20
- Addr minAddr =3D Addr_MIN;
- Addr maxAddr =3D Addr_MAX;
Addr startPoint =3D forClient ? aspacem_cStart : aspacem_vStart;
=20
Addr reqStart =3D req->rkind=3D=3DMAny ? 0 : req->start;
Addr reqEnd =3D reqStart + req->len - 1;
Addr reqLen =3D req->len;
=20
- Addr floatStart =3D 0;
- Bool floatFound =3D False;
+ /* These hold indices for segments found during search, or -1 if not
+ found. */
+ Int floatIdx =3D -1;
+ Int fixedIdx =3D -1;
=20
/* Don't waste time looking for a fixed match if not requested to. */
- Bool fixedFound =3D req->rkind=3D=3DMAny ? True : False;
+ aspacem_assert(nsegments_used > 0);
+ fixed_not_required =3D req->rkind =3D=3D MAny;
=20
- for (i =3D 0; i <=3D/*yes,really*/ nsegments_used; i++) {
- holeEnd =3D i=3D=3Dnsegments_used
- ? maxAddr
- : nsegments[i].start - 1;
- if (holeEnd >=3D startPoint)
- break;
- }
+ i =3D find_nsegment_idx(startPoint);
=20
+ if (0) VG_(debugLog)(0,"","startPoint %p, idx %d, reqlen %d\n",=20
+ startPoint,i,reqLen);
+
/* Now examine holes from index i back round to i-1. Record the
- size and length of the first fixed hole and the first floating
- hole which would satisfy the request. */
+ index first fixed hole and the first floating hole which would
+ satisfy the request. */
for (j =3D 0; j < nsegments_used; j++) {
=20
- holeStart =3D i=3D=3D0=20
- ? minAddr=20
- : nsegments[i-1].start + nsegments[i-1].len - 1;
- holeEnd =3D i=3D=3Dnsegments_used
- ? maxAddr
- : nsegments[i].start - 1;
+ if (nsegments[i].kind !=3D SkFree) {
+ i++;
+ if (i >=3D nsegments_used) i =3D 0;
+ continue;
+ }
=20
- /* Clamp the hole to something plausible */
- if (holeStart < aspacem_minAddr) holeStart =3D aspacem_minAddr;
- if (holeEnd > aspacem_maxAddr) holeEnd =3D aspacem_maxAddr;
+ holeStart =3D nsegments[i].start;
+ holeEnd =3D nsegments[i].end;
=20
- /* If it still looks viable, see if it's any use to us. */
- if (holeStart < holeEnd) {
+ /* Stay sane .. */
+ aspacem_assert(holeStart <=3D holeEnd);
+ aspacem_assert(aspacem_minAddr <=3D holeStart);
+ aspacem_assert(holeEnd <=3D aspacem_maxAddr);
=20
- holeLen =3D holeEnd - holeStart + 1;
+ /* See if it's any use to us. */
+ holeLen =3D holeEnd - holeStart + 1;
=20
- if (!fixedFound=20
+ if (fixedIdx =3D=3D -1=20
&& holeStart <=3D reqStart && reqEnd <=3D holeEnd) {
- fixedFound =3D True;
+ fixedIdx =3D i;
}
=20
- if (!floatFound
+ if (floatIdx =3D=3D -1
&& holeLen >=3D reqLen) {
- floatFound =3D True;
- floatStart =3D holeStart;
+ floatIdx =3D i;
}
- }
-
+ =20
/* Don't waste time searching once we've found what we wanted. */
- if (fixedFound && floatFound)
+ if ((fixed_not_required || fixedIdx >=3D 0) && floatIdx >=3D 0)
break;
=20
i++;
if (i >=3D nsegments_used) i =3D 0;
}
=20
+aspacem_assert(fixedIdx >=3D -1 && fixedIdx < nsegments_used);
+aspacem_assert(floatIdx >=3D -1 && floatIdx < nsegments_used);
+if (fixedIdx >=3D 0)=20
+aspacem_assert(nsegments[fixedIdx].kind =3D=3D SkFree);
+if (floatIdx >=3D 0)=20
+aspacem_assert(nsegments[floatIdx].kind =3D=3D SkFree);
+
/* Now see if we found anything which can satisfy the request. */
switch (req->rkind) {
case MFixed:
- if (fixedFound) {
+ if (fixedIdx >=3D 0) {
*result =3D req->start;
return True;
} else {
@@ -1595,18 +1719,18 @@
}
break;
case MHint:
- if (fixedFound) {
+ if (fixedIdx >=3D 0) {
*result =3D req->start;
return True;
}
- if (floatFound) {
- *result =3D floatStart;
+ if (floatIdx >=3D 0) {
+ *result =3D nsegments[floatIdx].start;
return True;
}
return False;
case MAny:
- if (floatFound) {
- *result =3D floatStart;
+ if (floatIdx >=3D 0) {
+ *result =3D nsegments[floatIdx].start;
return True;
}
return False;
@@ -1614,9 +1738,8 @@
default: break;
}
/*NOTREACHED*/
- aspacemgr_barf("getAdvisory: unknown request kind");
+ aspacem_barf("getAdvisory: unknown request kind");
return False;
-#endif
}
=20
=20
@@ -1642,6 +1765,8 @@
static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
{
aspacem_assert(start < end);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
init_nsegment(seg);
seg->kind =3D SkResvn;
seg->start =3D start;
@@ -1652,9 +1777,9 @@
{
switch (seg->kind) {
case SkFree: return "FREE";
- case SkAnon: return seg->isClient ? "ANON" : "anon";
- case SkFile: return seg->isClient ? "FILE" : "file";
- case SkResvn: return seg->isClient ? "RSVN" : "rsvn";
+ case SkAnon: return seg->isClient ? "anon" : "ANON";
+ case SkFile: return seg->isClient ? "file" : "FILE";
+ case SkResvn: return "RSVN";
default: return "????";
}
}
@@ -1787,6 +1912,8 @@
Addr dEnd =3D seg->end;
=20
aspacem_assert(dStart <=3D dEnd);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(dStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(dEnd+1));
=20
nDeld =3D 0;
=20
@@ -1911,6 +2038,11 @@
{
NSegment seg;
=20
+ aspacem_assert(sizeof(Word) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(Addr) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(SizeT) =3D=3D sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) =3D=3D sizeof(void*));
+
/* Add a single interval covering the entire address space. */
init_nsegment(&seg);
seg.kind =3D SkFree;
@@ -1933,6 +2065,11 @@
aspacem_cStart =3D (Addr)0x04000000; // 64M
aspacem_vStart =3D (aspacem_minAddr + aspacem_maxAddr + 1) / 2;
=20
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
+
VG_(debugLog)(2, "aspacem", "minAddr =3D 0x%llx\n", (ULong)aspacem_mi=
nAddr);
VG_(debugLog)(2, "aspacem", "maxAddr =3D 0x%llx\n", (ULong)aspacem_ma=
xAddr);
VG_(debugLog)(2, "aspacem", " cStart =3D 0x%llx\n", (ULong)aspacem_cS=
tart);
@@ -1947,6 +2084,14 @@
add_segment(&seg);
}
=20
+ /* Create a 1-page reservation at the notional initial
+ client/valgrind boundary. This isn't strictly necessary, but
+ because the advisor does first-fit and starts searches for
+ valgrind allocations at the boundary, this is kind of necessary
+ in order to get it to start allocating in the right place. */
+ init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1)=
;
+ add_segment(&seg);
+
show_nsegments(2, "Initial layout");
=20
VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
@@ -1956,6 +2101,180 @@
}
=20
=20
+SysRes VG_(mmap_file_fixed_client)
+ ( void* startV, SizeT length, Int prot, Int fd, SizeT offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ UInt dev, ino;
+ HChar buf[VKI_PATH_MAX];
+=20
+ Addr start =3D (Addr)startV;=20
+=20
+ /* Not allowable. */
+ /* Not allowable. */
+ if (length =3D=3D 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MFixed;
+ req.start =3D start;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, True/*client*/, &advised );
+ if (!ok || advised !=3D start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( start, length, prot,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE, fd, offset );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkFile;
+ seg.isClient =3D True;
+ seg.start =3D start;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset =3D offset;
+ seg.hasR =3D toBool(prot & VKI_PROT_READ);
+ seg.hasW =3D toBool(prot & VKI_PROT_WRITE);
+ seg.hasX =3D toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino)) {
+ seg.dev =3D dev;
+ seg.ino =3D ino;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx =3D allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ return sres;
+}
+
+
+SysRes VG_(mmap_anon_fixed_client)
+ ( void* startV, SizeT length, Int prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+=20
+ Addr start =3D (Addr)startV;=20
+=20
+ /* Not allowable. */
+ if (length =3D=3D 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MFixed;
+ req.start =3D start;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, True/*client*/, &advised );
+ if (!ok || advised !=3D start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( start, length, prot,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE
+ |VKI_MAP_ANONYMOUS, 0, 0 );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkAnon;
+ seg.isClient =3D True;
+ seg.start =3D start;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR =3D toBool(prot & VKI_PROT_READ);
+ seg.hasW =3D toBool(prot & VKI_PROT_WRITE);
+ seg.hasX =3D toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ return sres;
+}
+
+
+SysRes VG_(map_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+=20
+ /* Not allowable. */
+ if (length =3D=3D 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind =3D MAny;
+ req.start =3D 0;
+ req.len =3D length;
+ ok =3D VG_(aspacem_getAdvisory)( &req, False/*valgrind*/, &advised );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres =3D do_mmap_NATIVE( advised, length,=20
+ VKI_PROT_READ|VKI_PROT_WRITE
+ |VKI_PROT_EXEC,=20
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE
+ |VKI_MAP_ANONYMOUS, 0, 0 );
+ if (sres.isError)
+ return sres;
+
+ if (sres.val !=3D advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)do_munmap_NATIVE( sres.val, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind =3D SkAnon;
+ seg.isClient =3D False;
+ seg.start =3D advised;
+ seg.end =3D seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR =3D True;
+ seg.hasW =3D True;
+ seg.hasX =3D True;
+ add_segment( &seg );
+
+ return sres;
+}
+
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
Modified: branches/ASPACEM/coregrind/m_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_main.c 2005-09-09 12:03:28 UTC (rev 4614=
)
+++ branches/ASPACEM/coregrind/m_main.c 2005-09-10 16:02:03 UTC (rev 4615=
)
@@ -2080,14 +2080,14 @@
/* ... and start the debug logger. Now we can safely emit logging
messages all through startup. */
VG_(debugLog_startup)(loglevel, "Stage 2 (main)");
+ VG_(debugLog)(1, "main", "Welcome to Valgrind version "=20
+ VERSION " debug logging.\n");
=20
//--------------------------------------------------------------
- // Start up the address space manager
+ // Ensure we're on a plausible stack.
// p: logging
//--------------------------------------------------------------
- VG_(debugLog)(1, "main", "Starting the address space manager\n");
-
- /* Ensure we're on a plausible stack. */
+ VG_(debugLog)(1, "main", "Checking we're on a plausible stack\n");
{ HChar* limLo =3D (HChar*)(&VG_(the_root_stack)[0]);
HChar* limHi =3D limLo + sizeof(VG_(the_root_stack));
HChar* aLocal =3D (HChar*)&zero; /* any auto local will do */
@@ -2101,8 +2101,27 @@
}
}
=20
+ //--------------------------------------------------------------
+ // Start up the address space manager
+ // p: logging, plausible-stack
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the address space manager\n");
VG_(new_aspacem_start)();
+ VG_(debugLog)(1, "main", "Address space manager is running\n");
=20
+ //--------------------------------------------------------------
+ // Start up the dynamic memory manager
+ // p: address space management
+ // In fact m_mallocfree is self-initialising, so there's no
+ // initialisation call to do. Instead, try a simple malloc/
+ // free pair right now to check that nothing is broken.
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the dynamic memory manager\n");
+ { void* p =3D VG_(malloc)( 12345 );
+ if (p) VG_(free)( p );
+ }
+ VG_(debugLog)(1, "main", "Dynamic memory manager is running\n");
+
//=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
// Command line argument handling order:
// * If --help/--help-debug are present, show usage message=20
@@ -2126,6 +2145,10 @@
//--------------------------------------------------------------
// Check we were launched by stage1
// p: none
+ // TODO: this is pretty pointless now. Plus, we shouldn't be
+ // screwing with our own auxv: instead, when our own auxv is
+ // used as the basis for the client's one, make modifications
+ // at that point.
//--------------------------------------------------------------
VG_(debugLog)(1, "main", "Doing scan_auxv()\n");
{
@@ -2177,8 +2200,8 @@
// Finalise address space layout
// p: load_tool() [probably?]
//--------------------------------------------------------------
- VG_(debugLog)(1, "main", "Laying out remaining space\n");
- layout_remaining_space( (Addr) & argc, VG_(tool_info).shadow_ratio );
+ //VG_(debugLog)(1, "main", "Laying out remaining space\n");
+ //layout_remaining_space( (Addr) & argc, VG_(tool_info).shadow_ratio =
);
=20
//--------------------------------------------------------------
// Load client executable, finding in $PATH if necessary
Modified: branches/ASPACEM/coregrind/m_mallocfree.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-09 12:03:28 UTC (re=
v 4614)
+++ branches/ASPACEM/coregrind/m_mallocfree.c 2005-09-10 16:02:03 UTC (re=
v 4615)
@@ -30,7 +30,12 @@
*/
=20
#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
#include "pub_core_libcbase.h"
+
+#include "pub_core_debuginfo.h" // Needed for pub_core_aspacemgr :(
+#include "pub_core_aspacemgr.h"
+
#include "pub_core_libcassert.h"
#include "pub_core_libcmman.h"
#include "pub_core_libcprint.h"
@@ -473,6 +478,36 @@
/*--- Superblock management ---*/
/*------------------------------------------------------------*/
=20
+static
+void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
+{
+ static Bool alreadyCrashing =3D False;
+ ULong tot_alloc =3D VG_(aspacem_get_anonsize_total)();
+ if (!alreadyCrashing) {
+ alreadyCrashing =3D True;
+ VG_(printf)("\n"
+ "Valgrind's memory management: out of memory:\n");
+ VG_(printf)(" %s's request for %llu bytes failed.\n",=20
+ who, (ULong)szB );
+ VG_(printf)(" %llu bytes have already been allocated.\n",=20
+ tot_alloc);
+ VG_(printf)("Valgrind cannot continue. Sorry.\n\n");
+ } else {
+ VG_(debugLog)(0,"mallocfree","\n");
+ VG_(debugLog)(0,"mallocfree",
+ "Valgrind's memory management: out of memory:\n");
+ VG_(debugLog)(0,"mallocfree",
+ " %s's request for %llu bytes failed.\n",=20
+ who, (ULong)szB );
+ VG_(debugLog)(0,"mallocfree",
+ " %llu bytes have already been allocated.\n",=20
+ tot_alloc);
+ VG_(debugLog)(0,"mallocfree","Valgrind cannot continue. Sorry.\n\=
n");
+ }
+ VG_(exit)(1);
+}
+
+
// Align ptr p upwards to an align-sized boundary.
static
void* align_upwards ( void* p, SizeT align )
@@ -487,10 +522,8 @@
static
Superblock* newSuperblock ( Arena* a, SizeT cszB )
{
- // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
- static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZ=
B];
- static Bool called_before =3D True; //False;
Superblock* sb;
+ SysRes sres;
=20
// Take into account admin bytes in the Superblock.
cszB +=3D sizeof(Superblock);
@@ -498,32 +531,30 @@
if (cszB < a->min_sblock_szB) cszB =3D a->min_sblock_szB;
while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
=20
- if (!called_before) {
- // First time we're called -- use the special static bootstrap
- // superblock (see comment at top of main() for details).
- called_before =3D True;
- vg_assert(a =3D=3D arenaId_to_ArenaP(VG_AR_CORE));
- vg_assert(CORE_ARENA_MIN_SZB >=3D cszB);
- // Ensure sb is suitably aligned.
- sb =3D (Superblock*)align_upwards( bootstrap_superblock,=20
- VG_MIN_MALLOC_SZB );
- } else if (a->clientmem) {
+ if (a->clientmem) {
// client allocation -- return 0 to client if it fails
sb =3D (Superblock*)VG_(get_memory_from_mmap_for_client)(cszB);
if (NULL =3D=3D sb)
return 0;
} else {
// non-client allocation -- aborts if it fails
- sb =3D VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
+ sres =3D VG_(map_anon_float_valgrind)( cszB );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
+ /* NOTREACHED */
+ sb =3D NULL; /* keep gcc happy */
+ } else {
+ sb =3D (Superblock*)sres.val;
+ }
}
vg_assert(NULL !=3D sb);
//zzVALGRIND_MAKE_WRITABLE(sb, cszB);
vg_assert(0 =3D=3D (Addr)sb % VG_MIN_MALLOC_SZB);
sb->n_payload_bytes =3D cszB - sizeof(Superblock);
a->bytes_mmaped +=3D cszB;
- if (0)
- VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",=20
- sb->n_payload_bytes);
+ if (1)
+ VG_(message)(Vg_DebugMsg, "newSuperblock at %p, %d payload bytes",=
=20
+ sb, sb->n_payload_bytes);
return sb;
}
=20
Modified: branches/ASPACEM/coregrind/pub_core_aspacemgr.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/pub_core_aspacemgr.h 2005-09-09 12:03:28 U=
TC (rev 4614)
+++ branches/ASPACEM/coregrind/pub_core_aspacemgr.h 2005-09-10 16:02:03 U=
TC (rev 4615)
@@ -176,6 +176,20 @@
Bool VG_(aspacem_getAdvisory)
( MapRequest* req, Bool forClient, /*OUT*/Addr* result );
=20
+extern
+SysRes VG_(mmap_file_fixed_client)
+ ( void* startV, SizeT length, Int prot, Int fd, SizeT offset );
+
+extern
+SysRes VG_(mmap_anon_fixed_client)
+ ( void* startV, SizeT length, Int prot );
+
+extern
+SysRes VG_(map_anon_float_valgrind)( SizeT cszB );
+
+extern ULong VG_(aspacem_get_anonsize_total)( void );
+
+
#endif // __PUB_CORE_ASPACEMGR_H
=20
/*--------------------------------------------------------------------*/
Modified: branches/ASPACEM/coregrind/pub_core_mallocfree.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-09 12:03:28 =
UTC (rev 4614)
+++ branches/ASPACEM/coregrind/pub_core_mallocfree.h 2005-09-10 16:02:03 =
UTC (rev 4615)
@@ -77,10 +77,6 @@
SizeT req_pszB );
extern Char* VG_(arena_strdup) ( ArenaId aid, const Char* s);
=20
-/* Sets the size of the redzones at the start and end of heap blocks. T=
his
- must be called before any of VG_(malloc) and friends are called. */
-extern void VG_(set_client_malloc_redzone_szB) ( SizeT rz_szB );
-
extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
=20
extern void VG_(sanity_check_malloc_all) ( void );
|
|
From: <sv...@va...> - 2005-09-10 12:02:31
|
Author: cerion Date: 2005-09-10 13:02:24 +0100 (Sat, 10 Sep 2005) New Revision: 1385 Log: reinstated altivec insn disassembly framework - no more insns implemented, just easier to see what insn is needed when= we hit an unhandled insn. Modified: trunk/priv/guest-ppc32/toIR.c [... diff too large to include ...] |
|
From: Julian S. <js...@ac...> - 2005-09-10 11:51:45
|
Ah. Thanks for finding that. I observed it was erroneously running
ppc32 tests on x86 but wasn't sure how to fix it.
J
On Saturday 10 September 2005 12:28, Jeroen N. Witmond wrote:
> Greetings,
>
> The introduction of test none/tests/ppc32/lsw in revision 4614 activated a
> small bug in tests/cputest.c: Directory ppc32 is not excluded when running
> on x86. The patch below fixes this problem.
>
> Jeroen.
>
> Index: tests/cputest.c
> ===================================================================
> --- tests/cputest.c (revision 4614)
> +++ tests/cputest.c (working copy)
> @@ -16,7 +16,7 @@
>
> char* all_archs[] = {
> "amd64",
> - "ppc",
> + "ppc32",
> "x86",
> NULL
> };
> @@ -34,7 +34,7 @@
> #ifdef __powerpc__
> static Bool go(char* cpu)
> {
> - if ( strcmp( cpu, "ppc" ) == 0 )
> + if ( strcmp( cpu, "ppc32" ) == 0 )
> return True;
> else
> return False;
>
>
>
>
> -------------------------------------------------------
> SF.Net email is Sponsored by the Better Software Conference & EXPO
> September 19-22, 2005 * San Francisco, CA * Development Lifecycle Practices
> Agile & Plan-Driven Development * Managing Projects & Teams * Testing & QA
> Security * Process Improvement & Measurement * http://www.sqe.com/bsce5sf
> _______________________________________________
> Valgrind-developers mailing list
> Val...@li...
> https://lists.sourceforge.net/lists/listinfo/valgrind-developers
|
|
From: Jeroen N. W. <jn...@xs...> - 2005-09-10 11:28:52
|
Greetings,
The introduction of test none/tests/ppc32/lsw in revision 4614 activated a
small bug in tests/cputest.c: Directory ppc32 is not excluded when running
on x86. The patch below fixes this problem.
Jeroen.
Index: tests/cputest.c
===================================================================
--- tests/cputest.c (revision 4614)
+++ tests/cputest.c (working copy)
@@ -16,7 +16,7 @@
char* all_archs[] = {
"amd64",
- "ppc",
+ "ppc32",
"x86",
NULL
};
@@ -34,7 +34,7 @@
#ifdef __powerpc__
static Bool go(char* cpu)
{
- if ( strcmp( cpu, "ppc" ) == 0 )
+ if ( strcmp( cpu, "ppc32" ) == 0 )
return True;
else
return False;
|
|
From: Julian S. <js...@ac...> - 2005-09-10 09:34:54
|
> > In the current Valgrind memory model, there's the client space and
> > Valgrind's space, and never the twain shall meet. To prevent rogue
> > mappings, it would be possible for Valgrind to keep all of Valgrind's
> > space mapped, and leave the unused parts with no permissions. This
> > requires a bit of code change, but should be straightforward.
>
> That's basically we handle it at the moment - there is code to pad
> the address space which is invoked before the problem system calls
> to ensure that the mapping goes where we want it.
>
> Keeping large chunks of address space mapped is problematic however
> which is one reason for the rewrite of the address space manager.
Where I'm going (I think) is: the address space manager behaves as an
observer of what the kernel does, and tries to influence layout where
it can. In the end however it has to go along with what the kernel
does. It seems to be impossible to write a manager which is the sole
dictator of layout, since it can always be defeated by a sufficiently
uncooperative kernel. The best you can hope for is to veto (fail)
client fixed mmap requests in inconvenient places.
The new manager, like the current one, is based around maintaining a
list of segments describing what the current layout is. It uses this
list to generate advisory placements ("please put this new mapping at
address X if you can"). Unlike the current manager, the list explicitly
represents free areas as that makes it easier to iterate over them,
and so it should account for *every* address in (Addr)0 through
(Addr)(-1) inclusive.
The new manager also has the concept of a reservation segment. Such a
segment is not mapped, and so is similar to a free-space segment, but
with the difference that it will not attempt to allocate anything in
that space.
So I think this helps in 2 ways:
- It allows the kernel to do what the hell it likes, and will record
the outcome, provided there is a way to find out what happened.
- It allows you to create reservation sections, which I believe give
you "mapped to prevent client use but not used by Valgrind"
semantics. In fact what it gives you is "I will never hand this
out to anyone of my own accord, but I am prepared to let the kernel
do so" semantics. I think this is what you need?
Reservation segments also seem like a general mechanism for implementing
grow-down stacks. You create an initial stack mapping, and immediately
below that put a reservation section marked as having a shrinkable upper
end. This prevents aspacem from allocating new stuff in that area
(again, the kernel can do what it likes, but there's nothing we can
do about that). Same idea for implementing brk sections (the resvn
goes after the brk section in this case).
I think the real issue re spontaneous mappings is to have a
reliable way to know they have happened, and where the new mapping
is. On Linux that comes down to rescanning /proc/self/maps
after any event which might create such a mapping.
Anyway, that's the theory. Let me know ASAP if it is not what
Darwin needs. It's taken me most of this week to understand in detail
how the aspacemgr needs to be connected to the rest of system.
I hope to have something starting to work in the next couple of days.
J
------------
Here's an example of the segment list on x86-linux at startup, just
after reading /proc/self/maps to get the initial layout:
<<< SHOW_SEGMENTS: With contents of /proc/self/maps (0 segments, 1 segnames)
( 0) /home/sewardj/VgASPACEM/aspacem/Inst/lib/valgrind/memcheck
0: rsvn 0x00000000-0x03FFFFFF 64m ---- (Fixed,Fixed,0)
1: FREE 0x04000000-0xAFFFFFFF 2752m
2: file 0xB0000000-0xB00ECFFF 970752 r-x- d=0x802 i=363997 o=0
3: file 0xB00ED000-0xB00EDFFF 4096 rw-- d=0x802 i=363997 o=966656 (0)
4: anon 0xB00EE000-0xB08A9FFF 8110080 rwx- d=0x000 i=0 o=0 (-1)
5: FREE 0xB08AA000-0xBFFFEFFF 247m
6: anon 0xBFFFF000-0xBFFFFFFF 4096 rw-- d=0x000 i=0 o=0 (-1)
7: rsvn 0xC0000000-0xFFFFDFFF 1023m ---- (Fixed,Fixed,0)
8: anon 0xFFFFE000-0xFFFFEFFF 4096 ---- d=0x000 i=0 o=0 (-1)
9: rsvn 0xFFFFF000-0xFFFFFFFF 4096 ---- (Fixed,Fixed,0)
>>>
You can see for example that there's a reservation for 0xC0000000-0xFFFFFFFF
since that's unavailable to us. The reservation (7,9) is interrupted
by the sysinfo page (8). Similarly I (somewhat arbitrarily) placed a
reservation in the lowest 64M since I didn't think allocating down there
was a good idea.
|
|
From: Tom H. <to...@co...> - 2005-09-10 06:39:17
|
In message <171...@ka...>
Greg Parker <gp...@us...> wrote:
> There's a capability in Mac OS X's Mach VM that basically looks like
> new memory mappings appearing as a result of a syscall (other than mmap).
> There isn't a way to specify where any such mapping should be inserted
> (nothing like MAP_FIXED); indeed, in the worst case, the mapping just
> appears without any explicit request by the process.
There are some system calls in linux that do that as well.
> In the current Valgrind memory model, there's the client space and
> Valgrind's space, and never the twain shall meet. To prevent rogue
> mappings, it would be possible for Valgrind to keep all of Valgrind's
> space mapped, and leave the unused parts with no permissions. This
> requires a bit of code change, but should be straightforward.
That's basically we handle it at the moment - there is code to pad
the address space which is invoked before the problem system calls
to ensure that the mapping goes where we want it.
Keeping large chunks of address space mapped is problematic however
which is one reason for the rewrite of the address space manager.
Tom
--
Tom Hughes (to...@co...)
http://www.compton.nu/
|
|
From: Greg P. <gp...@us...> - 2005-09-10 04:53:15
|
There's a capability in Mac OS X's Mach VM that basically looks like new memory mappings appearing as a result of a syscall (other than mmap). There isn't a way to specify where any such mapping should be inserted (nothing like MAP_FIXED); indeed, in the worst case, the mapping just appears without any explicit request by the process. In the current Valgrind memory model, there's the client space and Valgrind's space, and never the twain shall meet. To prevent rogue mappings, it would be possible for Valgrind to keep all of Valgrind's space mapped, and leave the unused parts with no permissions. This requires a bit of code change, but should be straightforward. Does aspacemgr still have address ranges that are not mapped, but must not be used for client allocations? If so, it would also need some notion of "mapped to prevent client use but not used by Valgrind". If not, then the entire problem goes away and everybody's happy. (I'm hoping that the truly spontaneous mapping case doesn't actually occur in real programs. The window server is a risk, but so far I've only seen it insert memory regions during window server requests. If spontaneous mappings do occur in real life, then memcheck might need to verify Valgrind's memory map against the kernel's before it actually flags an access error. Determining the initialized-ness of such a lazily-discovered mapping is left as an exercise for the reader.) -- Greg Parker gp...@us... |
|
From: Tom H. <th...@cy...> - 2005-09-10 02:59:49
|
Nightly build on gill ( x86_64, Fedora Core 2 ) started at 2005-09-10 03:00:02 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 165 tests, 8 stderr failures, 2 stdout failures ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/fdleak_fcntl (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) none/tests/tls (stdout) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 164 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/fdleak_fcntl (stderr) none/tests/tls (stdout) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:29:45 2005 --- new.short Sat Sep 10 03:59:42 2005 *************** *** 8,10 **** ! == 164 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) --- 8,10 ---- ! == 165 tests, 8 stderr failures, 2 stdout failures ================= memcheck/tests/sigprocmask (stderr) *************** *** 16,17 **** --- 16,19 ---- none/tests/fdleak_fcntl (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) none/tests/tls (stdout) |
|
From: <js...@ac...> - 2005-09-10 02:58:35
|
Nightly build on phoenix ( SuSE 9.1 ) started at 2005-09-10 03:30:00 BST Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 186 tests, 3 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out vex source tree ... done Building vex ... done Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 185 tests, 2 stderr failures, 0 stdout failures ================= none/tests/faultstatus (stderr) none/tests/x86/int (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:44:29 2005 --- new.short Sat Sep 10 03:58:31 2005 *************** *** 10,13 **** ! == 185 tests, 2 stderr failures, 0 stdout failures ================= none/tests/faultstatus (stderr) none/tests/x86/int (stderr) --- 10,15 ---- ! == 186 tests, 3 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <to...@co...> - 2005-09-10 02:40:59
|
Nightly build on dunsmere ( athlon, Fedora Core 4 ) started at 2005-09-10 03:30:03 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 188 tests, 6 stderr failures, 1 stdout failure ================= memcheck/tests/leak-tree (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 187 tests, 5 stderr failures, 0 stdout failures ================= memcheck/tests/leak-tree (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/x86/int (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:35:32 2005 --- new.short Sat Sep 10 03:40:54 2005 *************** *** 8,10 **** ! == 187 tests, 5 stderr failures, 0 stdout failures ================= memcheck/tests/leak-tree (stderr) --- 8,10 ---- ! == 188 tests, 6 stderr failures, 1 stdout failure ================= memcheck/tests/leak-tree (stderr) *************** *** 13,14 **** --- 13,16 ---- none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-09-10 02:27:57
|
Nightly build on alvis ( i686, Red Hat 7.3 ) started at 2005-09-10 03:15:04 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 187 tests, 15 stderr failures, 2 stdout failures ================= memcheck/tests/addressable (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/erringfds (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/partiallydefinedeq (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/sigkill (stderr) memcheck/tests/stack_changes (stderr) none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) none/tests/x86/yield (stdout) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 186 tests, 14 stderr failures, 0 stdout failures ================= memcheck/tests/addressable (stderr) memcheck/tests/describe-block (stderr) memcheck/tests/erringfds (stderr) memcheck/tests/leak-0 (stderr) memcheck/tests/leak-cycle (stderr) memcheck/tests/leak-regroot (stderr) memcheck/tests/leak-tree (stderr) memcheck/tests/match-overrun (stderr) memcheck/tests/partiallydefinedeq (stderr) memcheck/tests/pointer-trace (stderr) memcheck/tests/sigkill (stderr) memcheck/tests/stack_changes (stderr) none/tests/faultstatus (stderr) none/tests/x86/int (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:21:48 2005 --- new.short Sat Sep 10 03:27:45 2005 *************** *** 8,10 **** ! == 186 tests, 14 stderr failures, 0 stdout failures ================= memcheck/tests/addressable (stderr) --- 8,10 ---- ! == 187 tests, 15 stderr failures, 2 stdout failures ================= memcheck/tests/addressable (stderr) *************** *** 22,24 **** --- 22,27 ---- none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) + none/tests/x86/yield (stdout) |
|
From: Tom H. <th...@cy...> - 2005-09-10 02:25:08
|
Nightly build on ginetta ( i686, Red Hat 8.0 ) started at 2005-09-10 03:10:10 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 187 tests, 3 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 186 tests, 2 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) none/tests/x86/int (stderr) none/tests/x86/yield (stdout) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:19:07 2005 --- new.short Sat Sep 10 03:25:00 2005 *************** *** 8,13 **** ! == 186 tests, 2 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) none/tests/x86/int (stderr) - none/tests/x86/yield (stdout) --- 8,14 ---- ! == 187 tests, 3 stderr failures, 1 stdout failure ================= none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) none/tests/x86/int (stderr) |
|
From: Tom H. <th...@cy...> - 2005-09-10 02:20:38
|
Nightly build on dellow ( x86_64, Fedora Core 4 ) started at 2005-09-10 03:10:10 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 165 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 164 tests, 6 stderr failures, 0 stdout failures ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:16:48 2005 --- new.short Sat Sep 10 03:20:33 2005 *************** *** 8,10 **** ! == 164 tests, 6 stderr failures, 0 stdout failures ================= memcheck/tests/sigprocmask (stderr) --- 8,10 ---- ! == 165 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) *************** *** 15,16 **** --- 15,18 ---- none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) |
|
From: Tom H. <th...@cy...> - 2005-09-10 02:18:58
|
Nightly build on aston ( x86_64, Fedora Core 3 ) started at 2005-09-10 03:05:10 BST Results differ from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 165 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) none/tests/ppc32/lsw (stdout) none/tests/ppc32/lsw (stderr) ================================================= == Results from 24 hours ago == ================================================= Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 164 tests, 6 stderr failures, 0 stdout failures ================= memcheck/tests/sigprocmask (stderr) memcheck/tests/strchr (stderr) memcheck/tests/vgtest_ume (stderr) memcheck/tests/weirdioctl (stderr) memcheck/tests/xml1 (stderr) none/tests/faultstatus (stderr) ================================================= == Difference between 24 hours ago and now == ================================================= *** old.short Sat Sep 10 03:13:17 2005 --- new.short Sat Sep 10 03:18:49 2005 *************** *** 8,10 **** ! == 164 tests, 6 stderr failures, 0 stdout failures ================= memcheck/tests/sigprocmask (stderr) --- 8,10 ---- ! == 165 tests, 7 stderr failures, 1 stdout failure ================= memcheck/tests/sigprocmask (stderr) *************** *** 15,16 **** --- 15,18 ---- none/tests/faultstatus (stderr) + none/tests/ppc32/lsw (stdout) + none/tests/ppc32/lsw (stderr) |