You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
1
(5) |
2
(3) |
|
3
(2) |
4
(3) |
5
(16) |
6
(8) |
7
(6) |
8
(2) |
9
(4) |
|
10
(10) |
11
(22) |
12
(7) |
13
(10) |
14
(11) |
15
(8) |
16
(6) |
|
17
(11) |
18
|
19
(6) |
20
(8) |
21
(5) |
22
(11) |
23
(6) |
|
24
(1) |
25
(6) |
26
(4) |
27
(2) |
28
(1) |
29
|
30
(2) |
|
31
(5) |
|
|
|
|
|
|
|
From: <sv...@va...> - 2015-05-10 22:40:46
|
Author: philippe
Date: Sun May 10 23:40:38 2015
New Revision: 15208
Log:
Improves the way arena statistics are shown
The mmap'd max/curr and max/curr nr of bytes will be shown e.g. as
11,440,408/ 4,508,968
instead of
11440656/ 4509200
So, using more space, but more readable (in particular when the
nr exceeds the width, and so are not aligned anymore)
Modified:
trunk/coregrind/m_gdbserver/server.c
trunk/coregrind/m_mallocfree.c
Modified: trunk/coregrind/m_gdbserver/server.c
==============================================================================
--- trunk/coregrind/m_gdbserver/server.c (original)
+++ trunk/coregrind/m_gdbserver/server.c Sun May 10 23:40:38 2015
@@ -157,9 +157,10 @@
VG_(message)(Vg_DebugMsg,
"------ Valgrind's internal memory use stats follow ------\n" );
VG_(sanity_check_malloc_all)();
- VG_(message)(Vg_DebugMsg,
- "------ %llu bytes have already been mmap-ed ANONYMOUS.\n",
- VG_(am_get_anonsize_total)());
+ VG_(message)
+ (Vg_DebugMsg,
+ "------ %'13llu bytes have already been mmap-ed ANONYMOUS.\n",
+ VG_(am_get_anonsize_total)());
VG_(print_all_arena_stats)();
if (VG_(clo_profile_heap))
VG_(print_arena_cc_analysis) ();
@@ -386,7 +387,7 @@
VG_(gdbserver_status_output)();
break;
case 4: /* memory */
- VG_(printf) ("%llu bytes have already been mmap-ed ANONYMOUS.\n",
+ VG_(printf) ("%'13llu bytes have already been mmap-ed ANONYMOUS.\n",
VG_(am_get_anonsize_total)());
VG_(print_all_arena_stats) ();
if (VG_(clo_profile_heap))
Modified: trunk/coregrind/m_mallocfree.c
==============================================================================
--- trunk/coregrind/m_mallocfree.c (original)
+++ trunk/coregrind/m_mallocfree.c Sun May 10 23:40:38 2015
@@ -625,9 +625,9 @@
for (i = 0; i < VG_N_ARENAS; i++) {
Arena* a = arenaId_to_ArenaP(i);
VG_(message)(Vg_DebugMsg,
- "%-8s: %8lu/%8lu max/curr mmap'd, "
+ "%-8s: %'13lu/%'13lu max/curr mmap'd, "
"%llu/%llu unsplit/split sb unmmap'd, "
- "%8lu/%8lu max/curr, "
+ "%'13lu/%'13lu max/curr, "
"%10llu/%10llu totalloc-blocks/bytes,"
" %10llu searches %lu rzB\n",
a->name,
@@ -758,7 +758,7 @@
"\n"
" Valgrind's memory management: out of memory:\n"
" %s's request for %llu bytes failed.\n"
- " %llu bytes have already been mmap-ed ANONYMOUS.\n"
+ " %'13llu bytes have already been mmap-ed ANONYMOUS.\n"
" Valgrind cannot continue. Sorry.\n\n"
" There are several possible reasons for this.\n"
" - You have some kind of memory limit in place. Look at the\n"
|
Author: philippe
Date: Sun May 10 23:19:31 2015
New Revision: 15207
Log:
This patch decreases the memory used by the helgrind SecMap,
by implementing a Garbage Collection for the SecMap.
The basic change is that freed memory is marked as noaccess
(while before, it kept the previous marking, on the basis that
non buggy applications are not accessing freed memory in any case).
Keeping the previous marking avoids the CPU/memory changes needed
to mark noaccess.
However, marking freed memory noaccess and GC the secmap reduces
the memory on big apps.
For example, a firefox test needs 220Mb less (on about 2.06 Gb).
Similar reduction for libreoffice batch (260 MB less on 1.09 Gb).
On such applications, the performance with the patch is similar to the trunk.
There is a performance decrease for applications that are doing
a lot of malloc/free repetitively: e.g. on some perf tests, an increase
in cpu of up to 15% has been observed.
Several performance optimisations can be done afterwards to not loose
too much performance. The decrease of memory is expected to produce
in any case significant benefit in memory constrained environments
(e.g. android phones).
So, after discussion with Julian, it was decided to commit as-is
and (re-)gain (part of) performance in follow-up commits.
Added:
trunk/helgrind/tests/shmem_abits.c
trunk/helgrind/tests/shmem_abits.stderr.exp
trunk/helgrind/tests/shmem_abits.stdout.exp
trunk/helgrind/tests/shmem_abits.vgtest
Modified:
trunk/helgrind/helgrind.h
trunk/helgrind/hg_main.c
trunk/helgrind/libhb.h
trunk/helgrind/libhb_core.c
trunk/helgrind/tests/ (props changed)
trunk/helgrind/tests/Makefile.am
Modified: trunk/helgrind/helgrind.h
==============================================================================
--- trunk/helgrind/helgrind.h (original)
+++ trunk/helgrind/helgrind.h Sun May 10 23:19:31 2015
@@ -118,8 +118,8 @@
_VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
_VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/
_VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */
- _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK /* void*s,Word ml */
-
+ _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK,/* void*s,Word ml */
+ _VG_USERREQ__HG_GET_ABITS /* Addr a,Addr abits, ulong len */
} Vg_TCheckClientRequest;
@@ -157,7 +157,7 @@
#define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
do { \
- long int arg1; \
+ long int _arg1; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
@@ -194,6 +194,23 @@
_arg1,_arg2,_arg3,0,0); \
} while (0)
+#define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
+ _ty2F,_arg2F, _ty3F, _arg3F) \
+ do { \
+ long int _qzz_res; \
+ long int _arg1, _arg2, _arg3; \
+ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
+ _arg1 = (long int)(_arg1F); \
+ _arg2 = (long int)(_arg2F); \
+ _arg3 = (long int)(_arg3F); \
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ (_dfltF), \
+ (_creqF), \
+ _arg1,_arg2,_arg3,0,0); \
+ _resF = _qzz_res; \
+ } while (0)
+
+
#define _HG_CLIENTREQ_UNIMP(_qzz_str) \
DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \
@@ -367,6 +384,38 @@
unsigned long,(_qzz_len))
+#define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \
+ DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \
+ void*,(_qzz_start), \
+ unsigned long,(_qzz_len))
+
+
+/* Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
+ If zzabits array is provided, copy the accessibility bits in zzabits.
+ Return values:
+ -2 if not running on helgrind
+ -1 if any parts of zzabits is not addressable
+ >= 0 : success.
+ When success, it returns the nr of addressable bytes found.
+ So, to check that a whole range is addressable, check
+ VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
+ In addition, if you want to examine the addressability of each
+ byte of the range, you need to provide a non NULL ptr as
+ second argument, pointing to an array of unsigned char
+ of length len.
+ Addressable bytes are indicated with 0xff.
+ Non-addressable bytes are indicated with 0x00.
+*/
+#define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \
+ (__extension__ \
+ ({long int _res; \
+ DO_CREQ_W_WWW(_res, (-2)/*default*/, \
+ _VG_USERREQ__HG_GET_ABITS, \
+ void*,(zza), void*,(zzabits), \
+ unsigned long,(zznbytes)); \
+ _res; \
+ }))
+
/*----------------------------------------------------------------*/
/*--- ---*/
/*--- ThreadSanitizer-compatible requests ---*/
Modified: trunk/helgrind/hg_main.c
==============================================================================
--- trunk/helgrind/hg_main.c (original)
+++ trunk/helgrind/hg_main.c Sun May 10 23:19:31 2015
@@ -1813,7 +1813,28 @@
guarantee that the reference happens before the free. */
shadow_mem_cwrite_range(thr, a, len);
}
- shadow_mem_make_NoAccess_NoFX( thr, a, len );
+ shadow_mem_make_NoAccess_AHAE( thr, a, len );
+ /* We used to call instead
+ shadow_mem_make_NoAccess_NoFX( thr, a, len );
+ A non-buggy application will not access anymore
+ the freed memory, and so marking no access is in theory useless.
+ Not marking freed memory would avoid the overhead for applications
+ doing mostly malloc/free, as the freed memory should then be recycled
+ very quickly after marking.
+ We rather mark it noaccess for the following reasons:
+ * accessibility bits then always correctly represents the memory
+ status (e.g. for the client request VALGRIND_HG_GET_ABITS).
+ * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
+ blocks, on a ppc64le, for a unrealistic workload of an application
+ doing only malloc/free).
+ * marking no access allows to GC the SecMap, which might improve
+ performance and/or memory usage.
+ * we might detect more applications bugs when memory is marked
+ noaccess.
+ If needed, we could support here an option --free-is-noaccess=yes|no
+ to avoid marking freed memory as no access if some applications
+ would need to avoid the marking noaccess overhead. */
+
if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
all__sanity_check("evh__pre_mem_read-post");
}
@@ -4885,6 +4906,20 @@
}
break;
+ case _VG_USERREQ__HG_GET_ABITS:
+ if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%ld)\n",
+ args[1], args[2], args[3]);
+ UChar *zzabit = (UChar *) args[2];
+ if (zzabit == NULL
+ || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
+ VKI_PROT_READ|VKI_PROT_WRITE))
+ *ret = (UWord) libhb_srange_get_abits ((Addr) args[1],
+ (UChar*) args[2],
+ (SizeT) args[3]);
+ else
+ *ret = -1;
+ break;
+
/* --- --- Client requests for Helgrind's use only --- --- */
/* Some thread is telling us its pthread_t value. Record the
Modified: trunk/helgrind/libhb.h
==============================================================================
--- trunk/helgrind/libhb.h (original)
+++ trunk/helgrind/libhb.h Sun May 10 23:19:31 2015
@@ -131,6 +131,13 @@
void libhb_srange_noaccess_NoFX ( Thr*, Addr, SizeT ); /* IS IGNORED */
void libhb_srange_noaccess_AHAE ( Thr*, Addr, SizeT ); /* IS NOT IGNORED */
+/* Counts the nr of bytes addressable in the range [a, a+len[
+ (so a+len excluded) and returns the nr of addressable bytes found.
+ If abits /= NULL, abits must point to a block of memory of length len.
+ In this array, each addressable byte will be indicated with 0xff.
+ Non-addressable bytes are indicated with 0x00. */
+UWord libhb_srange_get_abits (Addr a, /*OUT*/UChar *abits, SizeT len);
+
/* Get and set the hgthread (pointer to corresponding Thread
structure). */
Thread* libhb_get_Thr_hgthread ( Thr* );
Modified: trunk/helgrind/libhb_core.c
==============================================================================
--- trunk/helgrind/libhb_core.c (original)
+++ trunk/helgrind/libhb_core.c Sun May 10 23:19:31 2015
@@ -401,6 +401,7 @@
static void zsm_init ( void(*rcinc)(SVal), void(*rcdec)(SVal) );
static void zsm_sset_range ( Addr, SizeT, SVal );
+static void zsm_sset_range_SMALL ( Addr a, SizeT len, SVal svNew );
static void zsm_scopy_range ( Addr, Addr, SizeT );
static void zsm_flush_cache ( void );
@@ -412,7 +413,19 @@
/* Round a down to the next multiple of N. N must be a power of 2 */
#define ROUNDDN(a, N) ((a) & ~(N-1))
-
+/* True if a belongs in range [start, start + szB[
+ (i.e. start + szB is excluded). */
+static inline Bool address_in_range (Addr a, Addr start, SizeT szB)
+{
+ /* Checking start <= a && a < start + szB.
+ As start and a are unsigned addresses, the condition can
+ be simplified. */
+ if (CHECK_ZSM)
+ tl_assert ((a - start < szB)
+ == (start <= a
+ && a < start + szB));
+ return a - start < szB;
+}
/* ------ User-supplied RC functions ------ */
static void(*rcinc)(SVal) = NULL;
@@ -517,6 +530,9 @@
#define SecMap_MAGIC 0x571e58cbU
+// (UInt) `echo "Free SecMap" | md5sum`
+#define SecMap_free_MAGIC 0x5a977f30U
+
__attribute__((unused))
static inline Bool is_sane_SecMap ( SecMap* sm ) {
return sm != NULL && sm->magic == SecMap_MAGIC;
@@ -558,18 +574,20 @@
static UWord stats__secmaps_search = 0; // # SM finds
static UWord stats__secmaps_search_slow = 0; // # SM lookupFMs
static UWord stats__secmaps_allocd = 0; // # SecMaps issued
+static UWord stats__secmaps_in_map_shmem = 0; // # SecMaps 'live'
+static UWord stats__secmaps_scanGC = 0; // # nr of scan GC done.
+static UWord stats__secmaps_scanGCed = 0; // # SecMaps GC-ed via scan
+static UWord stats__secmaps_ssetGCed = 0; // # SecMaps GC-ed via setnoaccess
static UWord stats__secmap_ga_space_covered = 0; // # ga bytes covered
static UWord stats__secmap_linesZ_allocd = 0; // # LineZ's issued
static UWord stats__secmap_linesZ_bytes = 0; // .. using this much storage
static UWord stats__secmap_linesF_allocd = 0; // # LineF's issued
static UWord stats__secmap_linesF_bytes = 0; // .. using this much storage
-static UWord stats__secmap_iterator_steppings = 0; // # calls to stepSMIter
static UWord stats__cache_Z_fetches = 0; // # Z lines fetched
static UWord stats__cache_Z_wbacks = 0; // # Z lines written back
static UWord stats__cache_F_fetches = 0; // # F lines fetched
static UWord stats__cache_F_wbacks = 0; // # F lines written back
-static UWord stats__cache_invals = 0; // # cache invals
-static UWord stats__cache_flushes = 0; // # cache flushes
+static UWord stats__cache_flushes_invals = 0; // # cache flushes and invals
static UWord stats__cache_totrefs = 0; // # total accesses
static UWord stats__cache_totmisses = 0; // # misses
static ULong stats__cache_make_New_arange = 0; // total arange made New
@@ -599,6 +617,7 @@
static UWord stats__vts__join = 0; // # calls to VTS__join
static UWord stats__vts__cmpLEQ = 0; // # calls to VTS__cmpLEQ
static UWord stats__vts__cmp_structural = 0; // # calls to VTS__cmp_structural
+static UWord stats__vts_tab_GC = 0; // # nr of vts_tab GC
// # calls to VTS__cmp_structural w/ slow case
static UWord stats__vts__cmp_structural_slow = 0;
@@ -657,10 +676,59 @@
return shmem__bigchunk_next - n;
}
-static SecMap* shmem__alloc_SecMap ( void )
+/* SecMap changed to be fully SVal_NOACCESS are inserted in a list of
+ recycled SecMap. When a new SecMap is needed, a recycled SecMap
+ will be used in preference to allocating a new SecMap. */
+/* We make a linked list of SecMap. LinesF pointer is re-used to
+ implement the link list. */
+static SecMap *SecMap_freelist = NULL;
+static UWord SecMap_freelist_length(void)
+{
+ SecMap *sm;
+ UWord n = 0;
+
+ sm = SecMap_freelist;
+ while (sm) {
+ n++;
+ sm = (SecMap*)sm->linesF;
+ }
+ return n;
+}
+
+static void push_SecMap_on_freelist(SecMap* sm)
+{
+ if (0) VG_(message)(Vg_DebugMsg, "%p push\n", sm);
+ sm->magic = SecMap_free_MAGIC;
+ sm->linesF = (LineF*)SecMap_freelist;
+ SecMap_freelist = sm;
+}
+/* Returns a free SecMap if there is one.
+ Otherwise, returns NULL. */
+static SecMap *pop_SecMap_from_freelist(void)
+{
+ SecMap *sm;
+
+ sm = SecMap_freelist;
+ if (sm) {
+ tl_assert (sm->magic == SecMap_free_MAGIC);
+ SecMap_freelist = (SecMap*)sm->linesF;
+ if (0) VG_(message)(Vg_DebugMsg, "%p pop\n", sm);
+ }
+ return sm;
+}
+
+static SecMap* shmem__alloc_or_recycle_SecMap ( void )
{
Word i, j;
- SecMap* sm = shmem__bigchunk_alloc( sizeof(SecMap) );
+ SecMap* sm = pop_SecMap_from_freelist();
+
+ if (!sm) {
+ sm = shmem__bigchunk_alloc( sizeof(SecMap) );
+ stats__secmaps_allocd++;
+ stats__secmap_ga_space_covered += N_SECMAP_ARANGE;
+ stats__secmap_linesZ_allocd += N_SECMAP_ZLINES;
+ stats__secmap_linesZ_bytes += N_SECMAP_ZLINES * sizeof(LineZ);
+ }
if (0) VG_(printf)("alloc_SecMap %p\n",sm);
tl_assert(sm);
sm->magic = SecMap_MAGIC;
@@ -674,10 +742,6 @@
}
sm->linesF = NULL;
sm->linesF_size = 0;
- stats__secmaps_allocd++;
- stats__secmap_ga_space_covered += N_SECMAP_ARANGE;
- stats__secmap_linesZ_allocd += N_SECMAP_ZLINES;
- stats__secmap_linesZ_bytes += N_SECMAP_ZLINES * sizeof(LineZ);
return sm;
}
@@ -719,17 +783,120 @@
return sm;
}
+/* Scan the SecMap and count the SecMap that can be GC-ed.
+ If really, really does the GC of the SecMap. */
+/* NOT TO BE CALLED FROM WITHIN libzsm. */
+static UWord next_SecMap_GC_at = 1000;
+__attribute__((noinline))
+static UWord shmem__SecMap_do_GC(Bool really)
+{
+ UWord secmapW = 0;
+ Addr gaKey;
+ UWord examined = 0;
+ UWord ok_GCed = 0;
+
+ /* First invalidate the smCache */
+ smCache[0].gaKey = 1;
+ smCache[1].gaKey = 1;
+ smCache[2].gaKey = 1;
+ STATIC_ASSERT (3 == sizeof(smCache)/sizeof(smCache[0]));
+
+ VG_(initIterFM)( map_shmem );
+ while (VG_(nextIterFM)( map_shmem, &gaKey, &secmapW )) {
+ UWord i;
+ UWord j;
+ SecMap* sm = (SecMap*)secmapW;
+ tl_assert(sm->magic == SecMap_MAGIC);
+ Bool ok_to_GC = True;
+
+ examined++;
+
+ /* Deal with the LineZs */
+ for (i = 0; i < N_SECMAP_ZLINES && ok_to_GC; i++) {
+ LineZ* lineZ = &sm->linesZ[i];
+ ok_to_GC = lineZ->dict[0] == SVal_INVALID
+ || (lineZ->dict[0] == SVal_NOACCESS
+ && !SVal__isC (lineZ->dict[1])
+ && !SVal__isC (lineZ->dict[2])
+ && !SVal__isC (lineZ->dict[3]));
+ }
+ /* Deal with the LineFs */
+ for (i = 0; i < sm->linesF_size && ok_to_GC; i++) {
+ LineF* lineF = &sm->linesF[i];
+ if (!lineF->inUse)
+ continue;
+ for (j = 0; j < N_LINE_ARANGE && ok_to_GC; j++)
+ ok_to_GC = lineF->w64s[j] == SVal_NOACCESS;
+ }
+ if (ok_to_GC)
+ ok_GCed++;
+ if (ok_to_GC && really) {
+ SecMap *fm_sm;
+ Addr fm_gaKey;
+ /* We cannot remove a SecMap from map_shmem while iterating.
+ So, stop iteration, remove from map_shmem, recreate the iteration
+ on the next SecMap. */
+ VG_(doneIterFM) ( map_shmem );
+ /* No need to rcdec linesZ or linesF, these are all SVal_NOACCESS or
+ not in use. We just need to free the linesF. */
+ if (sm->linesF_size > 0) {
+ HG_(free)(sm->linesF);
+ stats__secmap_linesF_allocd -= sm->linesF_size;
+ stats__secmap_linesF_bytes -= sm->linesF_size * sizeof(LineF);
+ }
+ if (!VG_(delFromFM)(map_shmem, &fm_gaKey, (UWord*)&fm_sm, gaKey))
+ tl_assert (0);
+ stats__secmaps_in_map_shmem--;
+ tl_assert (gaKey == fm_gaKey);
+ tl_assert (sm == fm_sm);
+ stats__secmaps_scanGCed++;
+ push_SecMap_on_freelist (sm);
+ VG_(initIterAtFM) (map_shmem, gaKey + N_SECMAP_ARANGE);
+ }
+ }
+ VG_(doneIterFM)( map_shmem );
+
+ if (really) {
+ stats__secmaps_scanGC++;
+ /* Next GC when we approach the max allocated */
+ next_SecMap_GC_at = stats__secmaps_allocd - 1000;
+ /* Unless we GCed less than 10%. We then allow to alloc 10%
+ more before GCing. This avoids doing a lot of costly GC
+ for the worst case : the 'growing phase' of an application
+ that allocates a lot of memory.
+ Worst can can be reproduced e.g. by
+ perf/memrw -t 30000000 -b 1000 -r 1 -l 1
+ that allocates around 30Gb of memory. */
+ if (ok_GCed < stats__secmaps_allocd/10)
+ next_SecMap_GC_at = stats__secmaps_allocd + stats__secmaps_allocd/10;
+
+ }
+
+ if (VG_(clo_stats) && really) {
+ VG_(message)(Vg_DebugMsg,
+ "libhb: SecMap GC: #%lu scanned %lu, GCed %lu,"
+ " next GC at %lu\n",
+ stats__secmaps_scanGC, examined, ok_GCed,
+ next_SecMap_GC_at);
+ }
+
+ return ok_GCed;
+}
+
static SecMap* shmem__find_or_alloc_SecMap ( Addr ga )
{
SecMap* sm = shmem__find_SecMap ( ga );
if (LIKELY(sm)) {
+ if (CHECK_ZSM) tl_assert(is_sane_SecMap(sm));
return sm;
} else {
/* create a new one */
Addr gaKey = shmem__round_to_SecMap_base(ga);
- sm = shmem__alloc_SecMap();
+ sm = shmem__alloc_or_recycle_SecMap();
tl_assert(sm);
VG_(addToFM)( map_shmem, (UWord)gaKey, (UWord)sm );
+ stats__secmaps_in_map_shmem++;
+ if (CHECK_ZSM) tl_assert(is_sane_SecMap(sm));
return sm;
}
}
@@ -1461,16 +1628,46 @@
normalise_CacheLine( cl );
}
-static void shmem__invalidate_scache ( void ) {
+/* Invalid the cachelines corresponding to the given range. */
+static void shmem__invalidate_scache_range (Addr ga, SizeT szB)
+{
+ Addr before_start = ga;
+ Addr aligned_start = ROUNDUP(ga, N_LINE_ARANGE);
+ Addr after_start = ROUNDDN(ga + szB, N_LINE_ARANGE);
+ UWord before_len = aligned_start - before_start;
+ UWord after_len = ga + szB - after_start;
+
+ /* Write-back cachelines partially set to NOACCESS */
+ if (before_len > 0) {
+ zsm_sset_range_SMALL (before_start, before_len, SVal_NOACCESS);
+ szB += N_LINE_ARANGE - before_len;
+ }
+ if (after_len > 0) {
+ zsm_sset_range_SMALL (after_start, after_len, SVal_NOACCESS);
+ szB += N_LINE_ARANGE - after_len;
+ }
+
+ /* szB must now be a multiple of cacheline size. */
+ tl_assert (0 == (szB & (N_LINE_ARANGE - 1)));
+
Word wix;
- if (0) VG_(printf)("%s","scache inval\n");
- tl_assert(!is_valid_scache_tag(1));
- for (wix = 0; wix < N_WAY_NENT; wix++) {
- cache_shmem.tags0[wix] = 1/*INVALID*/;
+
+ Word ga_ix = (ga >> N_LINE_BITS) & (N_WAY_NENT - 1);
+ Word nwix = szB / N_LINE_ARANGE;
+
+ if (nwix > N_WAY_NENT)
+ nwix = N_WAY_NENT; // no need to check several times the same entry.
+
+ for (wix = 0; wix < nwix; wix++) {
+ if (address_in_range(cache_shmem.tags0[ga_ix], ga, szB))
+ cache_shmem.tags0[ga_ix] = 1/*INVALID*/;
+ ga_ix++;
+ if (ga_ix == N_WAY_NENT)
+ ga_ix = 0;
}
- stats__cache_invals++;
}
+
static void shmem__flush_and_invalidate_scache ( void ) {
Word wix;
Addr tag;
@@ -1486,8 +1683,7 @@
}
cache_shmem.tags0[wix] = 1/*INVALID*/;
}
- stats__cache_flushes++;
- stats__cache_invals++;
+ stats__cache_flushes_invals++;
}
@@ -1758,7 +1954,11 @@
map_shmem = VG_(newFM)( HG_(zalloc), "libhb.zsm_init.1 (map_shmem)",
HG_(free),
NULL/*unboxed UWord cmp*/);
- shmem__invalidate_scache();
+ /* Invalidate all cache entries. */
+ tl_assert(!is_valid_scache_tag(1));
+ for (UWord wix = 0; wix < N_WAY_NENT; wix++) {
+ cache_shmem.tags0[wix] = 1/*INVALID*/;
+ }
/* a SecMap must contain an integral number of CacheLines */
tl_assert(0 == (N_SECMAP_ARANGE % N_LINE_ARANGE));
@@ -2818,11 +3018,11 @@
}
if (VG_(clo_stats)) {
- static UInt ctr = 1;
tl_assert(nTab > 0);
VG_(message)(Vg_DebugMsg,
- "libhb: VTS GC: #%u old size %lu live %lu (%2llu%%)\n",
- ctr++, nTab, nLive, (100ULL * (ULong)nLive) / (ULong)nTab);
+ "libhb: VTS GC: #%lu old size %lu live %lu (%2llu%%)\n",
+ stats__vts_tab_GC,
+ nTab, nLive, (100ULL * (ULong)nLive) / (ULong)nTab);
}
/* ---------- END VTS GC ---------- */
@@ -6006,8 +6206,17 @@
VG_(printf)(" linesF: %'10lu allocd (%'12lu bytes occupied)\n",
stats__secmap_linesF_allocd,
stats__secmap_linesF_bytes);
- VG_(printf)(" secmaps: %'10lu iterator steppings\n",
- stats__secmap_iterator_steppings);
+ VG_(printf)(" secmaps: %'10lu in map (can be scanGCed %'5lu)"
+ " #%lu scanGC \n",
+ stats__secmaps_in_map_shmem,
+ shmem__SecMap_do_GC(False /* really do GC */),
+ stats__secmaps_scanGC);
+ tl_assert (VG_(sizeFM) (map_shmem) == stats__secmaps_in_map_shmem);
+ VG_(printf)(" secmaps: %'10lu in freelist,"
+ " total (scanGCed %'lu, ssetGCed %'lu)\n",
+ SecMap_freelist_length(),
+ stats__secmaps_scanGCed,
+ stats__secmaps_ssetGCed);
VG_(printf)(" secmaps: %'10lu searches (%'12lu slow)\n",
stats__secmaps_search, stats__secmaps_search_slow);
@@ -6018,8 +6227,8 @@
stats__cache_Z_fetches, stats__cache_F_fetches );
VG_(printf)(" cache: %'14lu Z-wback, %'14lu F-wback\n",
stats__cache_Z_wbacks, stats__cache_F_wbacks );
- VG_(printf)(" cache: %'14lu invals, %'14lu flushes\n",
- stats__cache_invals, stats__cache_flushes );
+ VG_(printf)(" cache: %'14lu flushes_invals\n",
+ stats__cache_flushes_invals );
VG_(printf)(" cache: %'14llu arange_New %'14llu direct-to-Zreps\n",
stats__cache_make_New_arange,
stats__cache_make_New_inZrep);
@@ -6044,17 +6253,19 @@
stats__cline_swrite08s );
VG_(printf)(" cline: s rd1s %'lu, s copy1s %'lu\n",
stats__cline_sread08s, stats__cline_scopy08s );
- VG_(printf)(" cline: splits: 8to4 %'12lu 4to2 %'12lu 2to1 %'12lu\n",
- stats__cline_64to32splits,
- stats__cline_32to16splits,
- stats__cline_16to8splits );
- VG_(printf)(" cline: pulldowns: 8to4 %'12lu 4to2 %'12lu 2to1 %'12lu\n",
- stats__cline_64to32pulldown,
- stats__cline_32to16pulldown,
- stats__cline_16to8pulldown );
+ VG_(printf)(" cline: splits: 8to4 %'12lu 4to2 %'12lu"
+ " 2to1 %'12lu\n",
+ stats__cline_64to32splits, stats__cline_32to16splits,
+ stats__cline_16to8splits );
+ VG_(printf)(" cline: pulldowns: 8to4 %'12lu 4to2 %'12lu"
+ " 2to1 %'12lu\n",
+ stats__cline_64to32pulldown, stats__cline_32to16pulldown,
+ stats__cline_16to8pulldown );
if (0)
- VG_(printf)(" cline: sizeof(CacheLineZ) %ld, covers %ld bytes of arange\n",
- (Word)sizeof(LineZ), (Word)N_LINE_ARANGE);
+ VG_(printf)(" cline: sizeof(CacheLineZ) %ld,"
+ " covers %ld bytes of arange\n",
+ (Word)sizeof(LineZ),
+ (Word)N_LINE_ARANGE);
VG_(printf)("%s","\n");
@@ -6068,21 +6279,22 @@
stats__join2_queries, stats__join2_misses);
VG_(printf)("%s","\n");
- VG_(printf)( " libhb: VTSops: tick %'lu, join %'lu, cmpLEQ %'lu\n",
- stats__vts__tick, stats__vts__join, stats__vts__cmpLEQ );
- VG_(printf)( " libhb: VTSops: cmp_structural %'lu (%'lu slow)\n",
- stats__vts__cmp_structural, stats__vts__cmp_structural_slow );
- VG_(printf)( " libhb: VTSset: find__or__clone_and_add %'lu (%'lu allocd)\n",
+ VG_(printf)(" libhb: VTSops: tick %'lu, join %'lu, cmpLEQ %'lu\n",
+ stats__vts__tick, stats__vts__join, stats__vts__cmpLEQ );
+ VG_(printf)(" libhb: VTSops: cmp_structural %'lu (%'lu slow)\n",
+ stats__vts__cmp_structural, stats__vts__cmp_structural_slow);
+ VG_(printf)(" libhb: VTSset: find__or__clone_and_add %'lu"
+ " (%'lu allocd)\n",
stats__vts_set__focaa, stats__vts_set__focaa_a );
VG_(printf)( " libhb: VTSops: indexAt_SLOW %'lu\n",
stats__vts__indexat_slow );
- show_vts_stats ("libhb stats");
VG_(printf)("%s","\n");
VG_(printf)(
" libhb: %ld entries in vts_table (approximately %lu bytes)\n",
VG_(sizeXA)( vts_tab ), VG_(sizeXA)( vts_tab ) * sizeof(VtsTE)
);
+ VG_(printf)(" libhb: #%lu vts_tab GC\n", stats__vts_tab_GC);
VG_(printf)( " libhb: %lu entries in vts_set\n",
VG_(sizeFM)( vts_set ) );
@@ -6440,22 +6652,289 @@
/* do nothing */
}
+
+/* Set the lines zix_start till zix_end to NOACCESS. */
+static void zsm_secmap_line_range_noaccess (SecMap *sm,
+ UInt zix_start, UInt zix_end)
+{
+ for (UInt lz = zix_start; lz <= zix_end; lz++) {
+ LineZ* lineZ;
+ LineF* lineF;
+ lineZ = &sm->linesZ[lz];
+ if (lineZ->dict[0] != SVal_INVALID) {
+ rcdec_LineZ(lineZ);
+ } else {
+ UInt fix = (UInt)lineZ->dict[1];
+ tl_assert(sm->linesF);
+ tl_assert(sm->linesF_size > 0);
+ tl_assert(fix >= 0 && fix < sm->linesF_size);
+ lineF = &sm->linesF[fix];
+ rcdec_LineF(lineF);
+ lineF->inUse = False;
+ }
+ lineZ->dict[0] = SVal_NOACCESS;
+ lineZ->dict[1] = lineZ->dict[2] = lineZ->dict[3] = SVal_INVALID;
+ for (UInt i = 0; i < N_LINE_ARANGE/4; i++)
+ lineZ->ix2s[i] = 0; /* all refer to dict[0] */
+ }
+}
+
+/* Set the given range to SVal_NOACCESS in-place in the secmap.
+ a must be cacheline aligned. len must be a multiple of a cacheline
+ and must be < N_SECMAP_ARANGE. */
+static void zsm_sset_range_noaccess_in_secmap(Addr a, SizeT len)
+{
+ tl_assert (is_valid_scache_tag (a));
+ tl_assert (0 == (len & (N_LINE_ARANGE - 1)));
+ tl_assert (len < N_SECMAP_ARANGE);
+
+ SecMap *sm1 = shmem__find_SecMap (a);
+ SecMap *sm2 = shmem__find_SecMap (a + len - 1);
+ UWord zix_start = shmem__get_SecMap_offset(a ) >> N_LINE_BITS;
+ UWord zix_end = shmem__get_SecMap_offset(a + len - 1) >> N_LINE_BITS;
+
+ if (sm1) {
+ if (CHECK_ZSM) tl_assert(is_sane_SecMap(sm1));
+ zsm_secmap_line_range_noaccess (sm1, zix_start,
+ sm1 == sm2 ? zix_end : N_SECMAP_ZLINES-1);
+ }
+ if (sm2 && sm1 != sm2) {
+ if (CHECK_ZSM) tl_assert(is_sane_SecMap(sm2));
+ zsm_secmap_line_range_noaccess (sm2, 0, zix_end);
+ }
+}
+
+/* Set the given address range to SVal_NOACCESS.
+ The SecMaps fully set to SVal_NOACCESS will be pushed in SecMap_freelist. */
+static void zsm_sset_range_noaccess (Addr addr, SizeT len)
+{
+ /*
+ BPC = Before, Partial Cacheline, = addr
+ (i.e. starting inside a cacheline/inside a SecMap)
+ BFC = Before, Full Cacheline(s), but not full SecMap
+ (i.e. starting inside a SecMap)
+ FSM = Full SecMap(s)
+ (i.e. starting a SecMap)
+ AFC = After, Full Cacheline(s), but not full SecMap
+ (i.e. first address after the full SecMap(s))
+ APC = After, Partial Cacheline, i.e. first address after the
+ full CacheLines).
+ ARE = After Range End = addr+len = first address not part of the range.
+
+ If addr starts a Cacheline, then BPC == BFC.
+ If addr starts a SecMap, then BPC == BFC == FSM.
+ If addr+len starts a SecMap, then APC == ARE == AFC
+ If addr+len starts a Cacheline, then APC == ARE
+ */
+ Addr ARE = addr + len;
+ Addr BPC = addr;
+ Addr BFC = ROUNDUP(BPC, N_LINE_ARANGE);
+ Addr FSM = ROUNDUP(BPC, N_SECMAP_ARANGE);
+ Addr AFC = ROUNDDN(ARE, N_SECMAP_ARANGE);
+ Addr APC = ROUNDDN(ARE, N_LINE_ARANGE);
+ SizeT Plen = len; // Plen will be split between the following:
+ SizeT BPClen;
+ SizeT BFClen;
+ SizeT FSMlen;
+ SizeT AFClen;
+ SizeT APClen;
+
+ /* Consumes from Plen the nr of bytes between from and to.
+ from and to must be aligned on a multiple of round.
+ The length consumed will be a multiple of round, with
+ a maximum of Plen. */
+# define PlenCONSUME(from, to, round, consumed) \
+ do { \
+ if (from < to) { \
+ if (to - from < Plen) \
+ consumed = to - from; \
+ else \
+ consumed = ROUNDDN(Plen, round); \
+ } else { \
+ consumed = 0; \
+ } \
+ Plen -= consumed; } while (0)
+
+ PlenCONSUME(BPC, BFC, 1, BPClen);
+ PlenCONSUME(BFC, FSM, N_LINE_ARANGE, BFClen);
+ PlenCONSUME(FSM, AFC, N_SECMAP_ARANGE, FSMlen);
+ PlenCONSUME(AFC, APC, N_LINE_ARANGE, AFClen);
+ PlenCONSUME(APC, ARE, 1, APClen);
+
+ if (0)
+ VG_(printf) ("addr %p[%ld] ARE %p"
+ " BPC %p[%ld] BFC %p[%ld] FSM %p[%ld]"
+ " AFC %p[%ld] APC %p[%ld]\n",
+ (void*)addr, len, (void*)ARE,
+ (void*)BPC, BPClen, (void*)BFC, BFClen, (void*)FSM, FSMlen,
+ (void*)AFC, AFClen, (void*)APC, APClen);
+
+ tl_assert (Plen == 0);
+
+ /* Set to NOACCESS pieces before and after not covered by entire SecMaps. */
+
+ /* First we set the partial cachelines. This is done through the cache. */
+ if (BPClen > 0)
+ zsm_sset_range_SMALL (BPC, BPClen, SVal_NOACCESS);
+ if (APClen > 0)
+ zsm_sset_range_SMALL (APC, APClen, SVal_NOACCESS);
+
+ /* After this, we will not use the cache anymore. We will directly work
+ in-place on the z shadow memory in SecMap(s).
+ So, we invalidate the cachelines for the whole range we are setting
+ to NOACCESS below. */
+ shmem__invalidate_scache_range (BFC, APC - BFC);
+
+ if (BFClen > 0)
+ zsm_sset_range_noaccess_in_secmap (BFC, BFClen);
+ if (AFClen > 0)
+ zsm_sset_range_noaccess_in_secmap (AFC, AFClen);
+
+ if (FSMlen > 0) {
+ /* Set to NOACCESS all the SecMaps, pushing the SecMaps to the
+ free list. */
+ Addr sm_start = FSM;
+ while (sm_start < AFC) {
+ SecMap *sm = shmem__find_SecMap (sm_start);
+ if (sm) {
+ Addr gaKey;
+ SecMap *fm_sm;
+
+ if (CHECK_ZSM) tl_assert(is_sane_SecMap(sm));
+ for (UInt lz = 0; lz < N_SECMAP_ZLINES; lz++) {
+ if (sm->linesZ[lz].dict[0] != SVal_INVALID)
+ rcdec_LineZ(&sm->linesZ[lz]);
+ }
+ for (UInt lf = 0; lf < sm->linesF_size; lf++) {
+ if (sm->linesF[lf].inUse)
+ rcdec_LineF (&sm->linesF[lf]);
+ }
+ if (sm->linesF_size > 0) {
+ HG_(free)(sm->linesF);
+ stats__secmap_linesF_allocd -= sm->linesF_size;
+ stats__secmap_linesF_bytes -= sm->linesF_size * sizeof(LineF);
+ }
+ if (!VG_(delFromFM)(map_shmem, &gaKey, (UWord*)&fm_sm, sm_start))
+ tl_assert (0);
+ stats__secmaps_in_map_shmem--;
+ tl_assert (gaKey == sm_start);
+ tl_assert (sm == fm_sm);
+ stats__secmaps_ssetGCed++;
+ push_SecMap_on_freelist (sm);
+ }
+ sm_start += N_SECMAP_ARANGE;
+ }
+ tl_assert (sm_start == AFC);
+
+ /* The above loop might have kept copies of freed SecMap in the smCache.
+ => clear them. */
+ if (address_in_range(smCache[0].gaKey, FSM, FSMlen)) {
+ smCache[0].gaKey = 1;
+ smCache[0].sm = NULL;
+ }
+ if (address_in_range(smCache[1].gaKey, FSM, FSMlen)) {
+ smCache[1].gaKey = 1;
+ smCache[1].sm = NULL;
+ }
+ if (address_in_range(smCache[2].gaKey, FSM, FSMlen)) {
+ smCache[2].gaKey = 1;
+ smCache[2].sm = NULL;
+ }
+ STATIC_ASSERT (3 == sizeof(smCache)/sizeof(SMCacheEnt));
+ }
+}
+
void libhb_srange_noaccess_AHAE ( Thr* thr, Addr a, SizeT szB )
{
/* This really does put the requested range in NoAccess. It's
expensive though. */
SVal sv = SVal_NOACCESS;
tl_assert(is_sane_SVal_C(sv));
- zsm_sset_range( a, szB, sv );
+ if (LIKELY(szB < 2 * N_LINE_ARANGE))
+ zsm_sset_range_SMALL (a, szB, SVal_NOACCESS);
+ else
+ zsm_sset_range_noaccess (a, szB);
Filter__clear_range( thr->filter, a, szB );
}
+/* Works byte at a time. Can be optimised if needed. */
+UWord libhb_srange_get_abits (Addr a, UChar *abits, SizeT len)
+{
+ UWord anr = 0; // nr of bytes addressable.
+
+ /* Get the accessibility of each byte. Pay attention to not
+ create SecMap or LineZ when checking if a byte is addressable.
+
+ Note: this is used for client request. Performance deemed not critical.
+ So for simplicity, we work byte per byte.
+ Performance could be improved by working with full cachelines
+ or with full SecMap, when reaching a cacheline or secmap boundary. */
+ for (SizeT i = 0; i < len; i++) {
+ SVal sv = SVal_INVALID;
+ Addr b = a + i;
+ Addr tag = b & ~(N_LINE_ARANGE - 1);
+ UWord wix = (b >> N_LINE_BITS) & (N_WAY_NENT - 1);
+ UWord cloff = get_cacheline_offset(b);
+
+ /* Note: we do not use get_cacheline(b) to avoid creating cachelines
+ and/or SecMap for non addressable bytes. */
+ if (tag == cache_shmem.tags0[wix]) {
+ CacheLine copy = cache_shmem.lyns0[wix];
+ /* We work on a copy of the cacheline, as we do not want to
+ record the client request as a real read.
+ The below is somewhat similar to zsm_sapply08__msmcread but
+ avoids side effects on the cache. */
+ UWord toff = get_tree_offset(b); /* == 0 .. 7 */
+ UWord tno = get_treeno(b);
+ UShort descr = copy.descrs[tno];
+ if (UNLIKELY( !(descr & (TREE_DESCR_8_0 << toff)) )) {
+ SVal* tree = ©.svals[tno << 3];
+ copy.descrs[tno] = pulldown_to_8(tree, toff, descr);
+ }
+ sv = copy.svals[cloff];
+ } else {
+ /* Byte not found in the cacheline. Search for a SecMap. */
+ SecMap *sm = shmem__find_SecMap(b);
+ LineZ *lineZ;
+ if (sm == NULL)
+ sv = SVal_NOACCESS;
+ else {
+ UWord zix = shmem__get_SecMap_offset(b) >> N_LINE_BITS;
+ lineZ = &sm->linesZ[zix];
+ if (lineZ->dict[0] == SVal_INVALID) {
+ UInt fix = (UInt)lineZ->dict[1];
+ sv = sm->linesF[fix].w64s[cloff];
+ } else {
+ UWord ix = read_twobit_array( lineZ->ix2s, cloff );
+ sv = lineZ->dict[ix];
+ }
+ }
+ }
+
+ tl_assert (sv != SVal_INVALID);
+ if (sv == SVal_NOACCESS) {
+ if (abits)
+ abits[i] = 0x00;
+ } else {
+ if (abits)
+ abits[i] = 0xff;
+ anr++;
+ }
+ }
+
+ return anr;
+}
+
+
void libhb_srange_untrack ( Thr* thr, Addr a, SizeT szB )
{
SVal sv = SVal_NOACCESS;
tl_assert(is_sane_SVal_C(sv));
if (0 && TRACEME(a,szB)) trace(thr,a,szB,"untrack-before");
- zsm_sset_range( a, szB, sv );
+ if (LIKELY(szB < 2 * N_LINE_ARANGE))
+ zsm_sset_range_SMALL (a, szB, SVal_NOACCESS);
+ else
+ zsm_sset_range_noaccess (a, szB);
Filter__clear_range( thr->filter, a, szB );
if (0 && TRACEME(a,szB)) trace(thr,a,szB,"untrack-after ");
}
@@ -6492,18 +6971,26 @@
*/
if (UNLIKELY(stats__ctxt_tab_curr > N_RCEC_TAB/2
&& stats__ctxt_tab_curr + 1000 >= stats__ctxt_tab_max
- && stats__ctxt_tab_curr * 0.75 > RCEC_referenced))
+ && (stats__ctxt_tab_curr * 3)/4 > RCEC_referenced))
do_RCEC_GC();
- /* If there are still freelist entries available, no need for a
- GC. */
- if (vts_tab_freelist != VtsID_INVALID)
- return;
- /* So all the table entries are full, and we're having to expand
- the table. But did we hit the threshhold point yet? */
- if (VG_(sizeXA)( vts_tab ) < vts_next_GC_at)
- return;
- vts_tab__do_GC( False/*don't show stats*/ );
+ /* If there are still no entries available (all the table entries are full),
+ and we hit the threshhold point, then do a GC */
+ Bool vts_tab_GC = vts_tab_freelist == VtsID_INVALID
+ && VG_(sizeXA)( vts_tab ) >= vts_next_GC_at;
+ if (UNLIKELY (vts_tab_GC))
+ vts_tab__do_GC( False/*don't show stats*/ );
+
+ /* scan GC the SecMaps when
+ (1) no SecMap in the freelist
+ and (2) the current nr of live secmaps exceeds the threshold. */
+ if (UNLIKELY(SecMap_freelist == NULL
+ && stats__secmaps_in_map_shmem >= next_SecMap_GC_at)) {
+ // If we did a vts tab GC, then no need to flush the cache again.
+ if (!vts_tab_GC)
+ zsm_flush_cache();
+ shmem__SecMap_do_GC(True);
+ }
/* Check the reference counts (expensive) */
if (CHECK_CEM)
Modified: trunk/helgrind/tests/Makefile.am
==============================================================================
--- trunk/helgrind/tests/Makefile.am (original)
+++ trunk/helgrind/tests/Makefile.am Sun May 10 23:19:31 2015
@@ -49,6 +49,7 @@
pth_spinlock.vgtest pth_spinlock.stdout.exp pth_spinlock.stderr.exp \
rwlock_race.vgtest rwlock_race.stdout.exp rwlock_race.stderr.exp \
rwlock_test.vgtest rwlock_test.stdout.exp rwlock_test.stderr.exp \
+ shmem_abits.vgtest shmem_abits.stdout.exp shmem_abits.stderr.exp \
stackteardown.vgtest stackteardown.stdout.exp stackteardown.stderr.exp \
t2t_laog.vgtest t2t_laog.stdout.exp t2t_laog.stderr.exp \
tc01_simple_race.vgtest tc01_simple_race.stdout.exp \
@@ -125,6 +126,7 @@
locked_vs_unlocked2 \
locked_vs_unlocked3 \
pth_destroy_cond \
+ shmem_abits \
stackteardown \
t2t \
tc01_simple_race \
Added: trunk/helgrind/tests/shmem_abits.c
==============================================================================
--- trunk/helgrind/tests/shmem_abits.c (added)
+++ trunk/helgrind/tests/shmem_abits.c Sun May 10 23:19:31 2015
@@ -0,0 +1,136 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include "helgrind/helgrind.h"
+
+#define MAX 1000000
+static unsigned char shadow[MAX];
+
+
+#define V(cond, testline) \
+ do { if (!(cond)) \
+ fprintf (stderr, "Test at line %d Failed verif at line %d: " #cond "\n", \
+ testline, __LINE__); } \
+ while (0)
+
+#define CHK(a1,a2,a3,a4) check(__LINE__,a1,a2,a3,a4)
+/* Check that [p, p+len[ has access access.
+ If heap, check that one byte before and after is unaccessible */
+static void check (int testline, void *p, int len, unsigned char access, int heap)
+{
+ int i;
+ long int r;
+
+ assert (len < 1000000); // Do not exceed the shadow array
+
+ if (len == 0 && p == NULL)
+ return;
+ // malloc(0) can return a ptr or NULL.
+ // Let's not check NULL
+
+ r = VALGRIND_HG_GET_ABITS (p, shadow, len);
+ V (r == VALGRIND_HG_GET_ABITS (p, NULL, len), testline);
+ V (access == 0xff ? r == len : r == 0, testline);
+ for (i = 0; i < len; i++)
+ V(shadow[i] == access, testline);
+ if (heap) {
+ /* Check the range starting 1 byte before. */
+ r = VALGRIND_HG_GET_ABITS (p-1, shadow, len+1);
+ V (r == VALGRIND_HG_GET_ABITS (p-1, NULL, len+1), testline);
+ V (access == 0xff ? r == len : r == 0, testline);
+ V (shadow[0] == 0x00, testline);
+ for (i = 1; i < len+1; i++)
+ V (shadow[i] == access, testline);
+ /* Same but one byte after. We need special cases for
+ a len 0,*/
+ r = VALGRIND_HG_GET_ABITS (p+1, shadow, len);
+ V (r == VALGRIND_HG_GET_ABITS (p+1, NULL, len), testline);
+ if (len == 0)
+ V (r == 0, testline);
+ else
+ V (access == 0xff ? r == len-1 : r == 0, testline);
+ for (i = 0; i < len-1; i++)
+ V(shadow[i] == access, testline);
+ if (len != 0)
+ V(shadow[len-1] == 0x00, testline);
+ }
+}
+
+/* return an address on the stack, with big var on the stack,
+ to ensure it is really unaddressable when calling check. */
+static void* popped_stack_address(void)
+{
+ char s[MAX];
+ memcpy(s, shadow, MAX);
+ char *p;
+
+ p = &s[MAX/2-1-s[0]];
+ CHK(p, 1, 0xFF, 0);
+ return p;
+}
+
+int main ( void )
+{
+ char *p;
+
+ /* Basic test for an heap object */
+ fprintf(stderr, "basic heap test\n");
+ p = malloc (100);
+ CHK (p, 100, 0xff, 1);
+ free (p);
+ CHK (p, 100, 0x00, 1);
+
+ /* Basic test for some code : verify 50 bytes of check function code
+ is accessible. */
+ fprintf(stderr, "code test\n");
+ CHK (check, 50, 0xff, 0);
+
+ /* Check something on the stack */
+ fprintf(stderr, "stack test\n");
+ CHK (&p, sizeof(p), 0xff, 0);
+
+
+ /* Now shake the heap, to verify various sizes */
+ fprintf(stderr, "doing many heap blocks\n");
+ int i;
+ int j;
+# define X 200
+# define Y 4
+ void *ptr[X][Y];
+ int sz[X][Y];
+ int f[X][Y]; // already freed or not ?
+ for (i = 0; i < X; i++) {
+ for (j = 0; j < Y; j++) {
+ f[i][j] = 1;
+ // A SecMap represents 8Kb. We test the boundaries
+ // around such secmap (X/2 bytes before and after)
+ // We test with blocks covering from 0 till Y-1 secmaps
+ sz[i][j] = j * 8192 - (j == 0 ? 0 : X/2) + i;
+ ptr[i][j] = malloc(sz[i][j]);
+ CHK(ptr[i][j],sz[i][j], 0xff, 1);
+ }
+ }
+ /* Shake and check when doing random free */
+ fprintf(stderr, "random heap free and checks\n");
+ for (i = 0; i < X*Y/10; i++) {
+ int x = rand() % X;
+ int y = rand() % Y;
+ if (f[x][y]) {
+ CHK(ptr[x][y],sz[x][y], 0xff, 1);
+ free(ptr[x][y]);
+ f[x][y] = 0;
+ }
+ CHK(ptr[x][y],sz[x][y], 0x00, 1);
+ }
+
+#if 0
+ /* Check that a use after return gives unaddressable. */
+ CHK (popped_stack_address(), 1, 0x00, 0);
+ /* Well well, it seems helgrind keeps the stack accessible */
+#endif
+ (void) popped_stack_address();
+
+ return 0;
+}
+
Added: trunk/helgrind/tests/shmem_abits.stderr.exp
==============================================================================
--- trunk/helgrind/tests/shmem_abits.stderr.exp (added)
+++ trunk/helgrind/tests/shmem_abits.stderr.exp Sun May 10 23:19:31 2015
@@ -0,0 +1,5 @@
+basic heap test
+code test
+stack test
+doing many heap blocks
+random heap free and checks
Added: trunk/helgrind/tests/shmem_abits.stdout.exp
==============================================================================
(empty)
Added: trunk/helgrind/tests/shmem_abits.vgtest
==============================================================================
--- trunk/helgrind/tests/shmem_abits.vgtest (added)
+++ trunk/helgrind/tests/shmem_abits.vgtest Sun May 10 23:19:31 2015
@@ -0,0 +1,2 @@
+prog: shmem_abits
+vgopts: -q
|
|
From: <sv...@va...> - 2015-05-10 18:17:45
|
Author: philippe
Date: Sun May 10 19:17:38 2015
New Revision: 15206
Log:
small refinement in the outer/inner doc
Modified:
trunk/README_DEVELOPERS
Modified: trunk/README_DEVELOPERS
==============================================================================
--- trunk/README_DEVELOPERS (original)
+++ trunk/README_DEVELOPERS Sun May 10 19:17:38 2015
@@ -250,6 +250,7 @@
To compare the performance of multiple Valgrind versions, do :
perl perf/vg_perf --outer-valgrind=../outer/.../bin/valgrind \
+ --outer-tool=callgrind \
--vg=../inner_xxxx --vg=../inner_yyyy perf
(where inner_xxxx and inner_yyyy are the toplevel directories of
the versions to compare).
|
|
From: <sv...@va...> - 2015-05-10 13:34:30
|
Author: rhyskidd
Date: Sun May 10 14:34:22 2015
New Revision: 15205
Log:
Add (presently) failing test case for bz#254164.
Added:
trunk/none/tests/darwin/bug254164.c
trunk/none/tests/darwin/bug254164.stderr.exp
trunk/none/tests/darwin/bug254164.vgtest
Modified:
trunk/none/tests/darwin/ (props changed)
trunk/none/tests/darwin/Makefile.am
Modified: trunk/none/tests/darwin/Makefile.am
==============================================================================
--- trunk/none/tests/darwin/Makefile.am (original)
+++ trunk/none/tests/darwin/Makefile.am Sun May 10 14:34:22 2015
@@ -6,11 +6,13 @@
EXTRA_DIST = \
access_extended.stderr.exp access_extended.vgtest \
apple-main-arg.stderr.exp apple-main-arg.vgtest \
+ bug254164.stderr.exp bug254164.vgtest \
rlimit.stderr.exp rlimit.vgtest
check_PROGRAMS = \
access_extended \
apple-main-arg \
+ bug254164 \
rlimit
Added: trunk/none/tests/darwin/bug254164.c
==============================================================================
--- trunk/none/tests/darwin/bug254164.c (added)
+++ trunk/none/tests/darwin/bug254164.c Sun May 10 14:34:22 2015
@@ -0,0 +1,61 @@
+// Small test program to demonstrate Valgrind bug.
+// https://bugs.kde.org/show_bug.cgi?id=254164
+
+
+#include <stdio.h>
+#include <sys/resource.h>
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <mach/task.h>
+#include <mach/mach_init.h>
+
+void getres(task_t task, unsigned int *rss, unsigned int *vs)
+{
+ struct task_basic_info t_info;
+ mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
+
+ task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
+ *rss = t_info.resident_size;
+ *vs = t_info.virtual_size;
+}
+
+/** It appears task_set_info() is a deprecated interface on modern Darwin
+ * Per comments in osfmk/kern/task.c:
+ *
+ * This routine was added, pretty much exclusively, for registering the
+ * RPC glue vector for in-kernel short circuited tasks. Rather than
+ * removing it completely, I have only disabled that feature (which was
+ * the only feature at the time).
+ */
+/**
+void setres(task_t task)
+{
+ struct task_trace_memory_info t_info;
+ mach_msg_type_number_t t_info_count = TASK_TRACE_MEMORY_INFO_COUNT;
+
+ t_info.user_memory_address = NULL;
+ t_info.buffer_size = 0;
+ t_info.mailbox_array_size = 0;
+
+ task_set_info(task, TASK_TRACE_MEMORY_INFO, (task_info_t)&t_info, &t_info_count);
+}
+ */
+
+int main(void)
+{
+ unsigned int rss, vs;
+ task_t task = MACH_PORT_NULL;
+
+ if (task_for_pid(current_task(), getpid(), &task) != KERN_SUCCESS)
+ abort();
+
+ getres(task, &rss, &vs);
+ //setres(task);
+
+ return 0;
+}
+
Added: trunk/none/tests/darwin/bug254164.stderr.exp
==============================================================================
(empty)
Added: trunk/none/tests/darwin/bug254164.vgtest
==============================================================================
--- trunk/none/tests/darwin/bug254164.vgtest (added)
+++ trunk/none/tests/darwin/bug254164.vgtest Sun May 10 14:34:22 2015
@@ -0,0 +1,2 @@
+prog: bug254164
+vgopts: -q
|
|
From: <sv...@va...> - 2015-05-10 11:38:44
|
Author: florian
Date: Sun May 10 12:38:38 2015
New Revision: 15204
Log:
Rename VG_(am_is_valid_for_client_or_free_or_resvn) to
VG_(am_is_allowed_for_client).
Also a few spelling and white space fixes.
Modified:
branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-generic.c
branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h
Modified: branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c Sun May 10 12:38:38 2015
@@ -1232,8 +1232,7 @@
be consider part of the client's addressable space. It also
considers reservations to be allowable, since from the client's
point of view they don't exist. */
-Bool VG_(am_is_valid_for_client_or_free_or_resvn)
- ( Addr start, SizeT len, UInt prot )
+Bool VG_(am_is_allowed_for_client)( Addr start, SizeT len, UInt prot )
{
const UInt kinds = SkFileC | SkAnonC | SkShmC | SkFree | SkResvn;
@@ -2616,8 +2615,7 @@
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
if (forClient) {
- if (!VG_(am_is_valid_for_client_or_free_or_resvn)
- ( start, len, VKI_PROT_NONE ))
+ if (!VG_(am_is_allowed_for_client)( start, len, VKI_PROT_NONE ))
goto eINVAL;
} else {
if (!VG_(am_is_valid_for_valgrind)
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-generic.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-generic.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-generic.c Sun May 10 12:38:38 2015
@@ -99,8 +99,7 @@
}
/* Returns True iff address range is something the client can
- plausibly mess with: all of it is either already belongs to the
- client or is free or a reservation. */
+ plausibly mess with. */
Bool ML_(valid_client_addr)(Addr start, SizeT size, ThreadId tid,
const HChar *syscallname)
@@ -110,8 +109,7 @@
if (size == 0)
return True;
- ret = VG_(am_is_valid_for_client_or_free_or_resvn)
- (start,size,VKI_PROT_NONE);
+ ret = VG_(am_is_allowed_for_client)(start, size, VKI_PROT_NONE);
if (0)
VG_(printf)("%s: test=%#lx-%#lx ret=%d\n",
Modified: branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h (original)
+++ branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h Sun May 10 12:38:38 2015
@@ -70,8 +70,7 @@
// Querying current status
-/* Find the next segment along from 'here', if it is a file/anon/resvn
- segment. */
+/* Find the next non-free segment along from 'here'. */
extern NSegment const* VG_(am_next_nsegment) ( const NSegment* here,
Bool fwds );
@@ -81,15 +80,13 @@
VKI_PROT_NONE as 'prot'. Will return False if any part of the
area does not belong to valgrind or does not have at least
the stated permissions. */
-extern Bool VG_(am_is_valid_for_valgrind)
- ( Addr start, SizeT len, UInt prot );
+extern Bool VG_(am_is_valid_for_valgrind)( Addr start, SizeT len, UInt prot );
-/* Variant of VG_(am_is_valid_for_client) which allows free areas to
- be consider part of the client's addressable space. It also
+/* Variant of VG_(am_is_valid_for_valgrind) which allows free areas to
+ be considered part of the client's addressable space. It also
considers reservations to be allowable, since from the client's
point of view they don't exist. */
-extern Bool VG_(am_is_valid_for_client_or_free_or_resvn)
- ( Addr start, SizeT len, UInt prot );
+extern Bool VG_(am_is_allowed_for_client)( Addr start, SizeT len, UInt prot );
/* Check whether ADDR looks like an address or address-to-be located in an
extensible client stack segment. */
|
|
From: <sv...@va...> - 2015-05-10 11:25:06
|
Author: florian
Date: Sun May 10 12:24:59 2015
New Revision: 15203
Log:
White space only change.
Modified:
branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
Modified: branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c Sun May 10 12:24:59 2015
@@ -1221,8 +1221,7 @@
/* Test if a piece of memory is addressable by the client with at
least the "prot" protection permissions by examining the underlying
segments. */
-Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
- UInt prot )
+Bool VG_(am_is_valid_for_client)( Addr start, SizeT len, UInt prot )
{
const UInt kinds = SkFileC | SkAnonC | SkShmC;
|
Author: florian
Date: Sun May 10 12:23:01 2015
New Revision: 15202
Log:
Merge from trunk
Added:
branches/ASPACEM_TWEAKS/tests/check_ppc64le_cap
- copied unchanged from r15201, trunk/tests/check_ppc64le_cap
Modified:
branches/ASPACEM_TWEAKS/ (props changed)
branches/ASPACEM_TWEAKS/NEWS
branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_stacktrace.c
branches/ASPACEM_TWEAKS/coregrind/m_syscall.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/priv_syswrap-linux.h
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-amd64-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm64-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips32-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips64-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc32-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc64-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-s390x-linux.c
branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-x86-linux.c
branches/ASPACEM_TWEAKS/darwin12.supp
branches/ASPACEM_TWEAKS/docs/internals/3_10_BUGSTATUS.txt
branches/ASPACEM_TWEAKS/drd/tests/pth_create_chain.vgtest
branches/ASPACEM_TWEAKS/memcheck/tests/amd64/fxsave-amd64.c
branches/ASPACEM_TWEAKS/memcheck/tests/badjump.vgtest
branches/ASPACEM_TWEAKS/memcheck/tests/badjump2.c
branches/ASPACEM_TWEAKS/memcheck/tests/supp_unknown.vgtest
branches/ASPACEM_TWEAKS/none/tests/amd64/ (props changed)
branches/ASPACEM_TWEAKS/none/tests/ppc32/jm_vec_isa_2_07.vgtest
branches/ASPACEM_TWEAKS/none/tests/x86/int.disabled
branches/ASPACEM_TWEAKS/perf/memrw.c
Modified: branches/ASPACEM_TWEAKS/NEWS
==============================================================================
--- branches/ASPACEM_TWEAKS/NEWS (original)
+++ branches/ASPACEM_TWEAKS/NEWS Sun May 10 12:23:01 2015
@@ -87,6 +87,7 @@
339288 support Cavium Octeon MIPS specific BBIT*32 instructions
339442 Fix testsuite build failure on OS X 10.9
339542 Enable compilation with Intel's ICC compiler
+339636 Use fxsave64 and fxrstor64 mnemonics instead of old-school rex64 prefix
339688 Mac-specific ASM does not support .version directive (cpuid,
tronical and pushfpopf tests)
339745 Valgrind crash when check Marmalade app (partial fix)
@@ -170,6 +171,9 @@
346487 Compiler generates "note" about a future ABI change for PPC64
346801 Fix link error on OS X: _vgModuleLocal_sf_maybe_extend_stack
347151 Fix suppression for pthread_rwlock_init on OS X 10.8
+347379 valgrind --leak-check=full memleak errors from system libraries on OS X 10.8
+ == 217236
+347389 unhandled syscall: 373 (Linux ARM syncfs)
n-i-bz Provide implementations of certain compiler builtins to support
compilers who may not provide those
n-i-bz Old STABS code is still being compiled, but never used. Remove it.
Modified: branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c Sun May 10 12:23:01 2015
@@ -1172,11 +1172,11 @@
/* Test if a piece of memory is addressable by client or by valgrind with at
least the "prot" protection permissions by examining the underlying
- segments. If client && freeOk is True then SkFree and SkResvn areas are
- also allowed.
+ segments. The KINDS argument specifies the allowed segments ADDR may
+ belong to in order to be considered "valid".
*/
static
-Bool is_valid_for( Bool client, Addr start, SizeT len, UInt prot, Bool freeOk )
+Bool is_valid_for( UInt kinds, Addr start, SizeT len, UInt prot )
{
Int i, iLo, iHi;
Bool needR, needW, needX;
@@ -1204,34 +1204,17 @@
iHi = find_nsegment_idx(start + len - 1);
}
- if (client) {
- for (i = iLo; i <= iHi; i++) {
- if ( (nsegments[i].kind == SkFileC
- || nsegments[i].kind == SkAnonC
- || nsegments[i].kind == SkShmC
- || (nsegments[i].kind == SkFree && freeOk)
- || (nsegments[i].kind == SkResvn && freeOk))
- && (needR ? nsegments[i].hasR : True)
- && (needW ? nsegments[i].hasW : True)
- && (needX ? nsegments[i].hasX : True) ) {
- /* ok */
- } else {
- return False;
- }
- }
- } else {
- for (i = iLo; i <= iHi; i++) {
- if ( (nsegments[i].kind == SkFileV
- || nsegments[i].kind == SkAnonV)
- && (needR ? nsegments[i].hasR : True)
- && (needW ? nsegments[i].hasW : True)
- && (needX ? nsegments[i].hasX : True) ) {
- /* ok */
- } else {
- return False;
- }
+ for (i = iLo; i <= iHi; i++) {
+ if ( (nsegments[i].kind & kinds) != 0
+ && (needR ? nsegments[i].hasR : True)
+ && (needW ? nsegments[i].hasW : True)
+ && (needX ? nsegments[i].hasX : True) ) {
+ /* ok */
+ } else {
+ return False;
}
}
+
return True;
}
@@ -1241,8 +1224,9 @@
Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
UInt prot )
{
- return is_valid_for(/* client */ True,
- start, len, prot, False/*free not OK*/ );
+ const UInt kinds = SkFileC | SkAnonC | SkShmC;
+
+ return is_valid_for(kinds, start, len, prot);
}
/* Variant of VG_(am_is_valid_for_client) which allows free areas to
@@ -1252,15 +1236,17 @@
Bool VG_(am_is_valid_for_client_or_free_or_resvn)
( Addr start, SizeT len, UInt prot )
{
- return is_valid_for(/* client */ True,
- start, len, prot, True/*free is OK*/ );
+ const UInt kinds = SkFileC | SkAnonC | SkShmC | SkFree | SkResvn;
+
+ return is_valid_for(kinds, start, len, prot);
}
Bool VG_(am_is_valid_for_valgrind) ( Addr start, SizeT len, UInt prot )
{
- return is_valid_for(/* client */ False,
- start, len, prot, False/*irrelevant*/ );
+ const UInt kinds = SkFileV | SkAnonV;
+
+ return is_valid_for(kinds, start, len, prot);
}
Modified: branches/ASPACEM_TWEAKS/coregrind/m_stacktrace.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_stacktrace.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_stacktrace.c Sun May 10 12:23:01 2015
@@ -708,7 +708,7 @@
redirs_used = 0;
# endif
-# if defined(VG_PLAT_USES_PPCTOC)
+# if defined(VG_PLAT_USES_PPCTOC) || defined (VGP_ppc64le_linux)
/* Deal with bogus LR values caused by function
interception/wrapping on ppc-TOC platforms; see comment on
similar code a few lines further down. */
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syscall.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syscall.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syscall.c Sun May 10 12:23:01 2015
@@ -305,10 +305,15 @@
".text\n"
".globl do_syscall_WRK\n"
"do_syscall_WRK:\n"
+" .cfi_startproc\n"
" push %esi\n"
+" .cfi_adjust_cfa_offset 4\n"
" push %edi\n"
+" .cfi_adjust_cfa_offset 4\n"
" push %ebx\n"
+" .cfi_adjust_cfa_offset 4\n"
" push %ebp\n"
+" .cfi_adjust_cfa_offset 4\n"
" movl 16+ 4(%esp),%eax\n"
" movl 16+ 8(%esp),%ebx\n"
" movl 16+12(%esp),%ecx\n"
@@ -318,10 +323,15 @@
" movl 16+28(%esp),%ebp\n"
" int $0x80\n"
" popl %ebp\n"
+" .cfi_adjust_cfa_offset -4\n"
" popl %ebx\n"
+" .cfi_adjust_cfa_offset -4\n"
" popl %edi\n"
+" .cfi_adjust_cfa_offset -4\n"
" popl %esi\n"
+" .cfi_adjust_cfa_offset -4\n"
" ret\n"
+" .cfi_endproc\n"
".previous\n"
);
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/priv_syswrap-linux.h
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/priv_syswrap-linux.h (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/priv_syswrap-linux.h Sun May 10 12:23:01 2015
@@ -287,6 +287,8 @@
DECL_TEMPLATE(linux, sys_getrandom);
DECL_TEMPLATE(linux, sys_memfd_create);
+DECL_TEMPLATE(linux, sys_syncfs);
+
/* ---------------------------------------------------------------------
Wrappers for sockets and ipc-ery. These are split into standalone
procedures because x86-linux hides them inside multiplexors
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-amd64-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-amd64-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-amd64-linux.c Sun May 10 12:23:01 2015
@@ -1062,7 +1062,7 @@
LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 304
LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 305
-// LINX_(__NR_syncfs, sys_ni_syscall), // 306
+ LINX_(__NR_syncfs, sys_syncfs), // 306
LINXY(__NR_sendmmsg, sys_sendmmsg), // 307
// LINX_(__NR_setns, sys_ni_syscall), // 308
LINXY(__NR_getcpu, sys_getcpu), // 309
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm-linux.c Sun May 10 12:23:01 2015
@@ -1212,6 +1212,7 @@
LINXY(__NR_name_to_handle_at, sys_name_to_handle_at),// 370
LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 371
LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 372
+ LINX_(__NR_syncfs, sys_syncfs), // 373
LINXY(__NR_sendmmsg, sys_sendmmsg), // 374
LINXY(__NR_getrandom, sys_getrandom), // 384
LINXY(__NR_memfd_create, sys_memfd_create) // 385
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm64-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm64-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-arm64-linux.c Sun May 10 12:23:01 2015
@@ -1042,6 +1042,8 @@
GENXY(__NR_wait4, sys_wait4), // 260
+ LINX_(__NR_syncfs, sys_syncfs), // 267
+
LINXY(__NR_sendmmsg, sys_sendmmsg), // 269
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 270
LINX_(__NR_process_vm_writev, sys_process_vm_writev), // 271
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-linux.c Sun May 10 12:23:01 2015
@@ -3064,6 +3064,13 @@
}
}
+PRE(sys_syncfs)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_syncfs ( %ld )", ARG1);
+ PRE_REG_READ1(long, "syncfs", unsigned int, fd);
+}
+
/* ---------------------------------------------------------------------
utime wrapper
------------------------------------------------------------------ */
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips32-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips32-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips32-linux.c Sun May 10 12:23:01 2015
@@ -1112,6 +1112,7 @@
LINXY (__NR_prlimit64, sys_prlimit64), // 338
//..
LINXY (__NR_clock_adjtime, sys_clock_adjtime), // 341
+ LINX_ (__NR_syncfs, sys_syncfs), // 342
//..
LINXY (__NR_process_vm_readv, sys_process_vm_readv), // 345
LINX_ (__NR_process_vm_writev, sys_process_vm_writev), // 346
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips64-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips64-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-mips64-linux.c Sun May 10 12:23:01 2015
@@ -909,7 +909,8 @@
LINXY (__NR_process_vm_readv, sys_process_vm_readv),
LINX_ (__NR_process_vm_writev, sys_process_vm_writev),
LINXY(__NR_getrandom, sys_getrandom),
- LINXY(__NR_memfd_create, sys_memfd_create)
+ LINXY(__NR_memfd_create, sys_memfd_create),
+ LINX_(__NR_syncfs, sys_syncfs)
};
SyscallTableEntry * ML_(get_linux_syscall_entry) ( UInt sysno )
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc32-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc32-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc32-linux.c Sun May 10 12:23:01 2015
@@ -1253,7 +1253,7 @@
LINXY(__NR_accept4, sys_accept4), // 344
LINX_(__NR_clock_adjtime, sys_clock_adjtime), // 347
-
+ LINX_(__NR_syncfs, sys_syncfs), // 348
LINXY(__NR_sendmmsg, sys_sendmmsg), // 349
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 351
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc64-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc64-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-ppc64-linux.c Sun May 10 12:23:01 2015
@@ -1155,7 +1155,7 @@
LINXY(__NR_accept4, sys_accept4), // 344
LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 347
-
+ LINX_(__NR_syncfs, sys_syncfs), // 348
LINXY(__NR_sendmmsg, sys_sendmmsg), // 349
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 351
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-s390x-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-s390x-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-s390x-linux.c Sun May 10 12:23:01 2015
@@ -1035,7 +1035,7 @@
LINXY(__NR_name_to_handle_at, sys_name_to_handle_at), // 335
LINXY(__NR_open_by_handle_at, sys_open_by_handle_at), // 336
LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 337
-// ?????(__NR_syncfs, ), // 338
+ LINX_(__NR_syncfs, sys_syncfs), // 338
// ?????(__NR_setns, ), // 339
LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 340
Modified: branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-x86-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-x86-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_syswrap/syswrap-x86-linux.c Sun May 10 12:23:01 2015
@@ -1805,7 +1805,7 @@
LINXY(__NR_name_to_handle_at, sys_name_to_handle_at),// 341
LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 342
LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 343
-// LINX_(__NR_syncfs, sys_ni_syscall), // 344
+ LINX_(__NR_syncfs, sys_syncfs), // 344
LINXY(__NR_sendmmsg, sys_sendmmsg), // 345
// LINX_(__NR_setns, sys_ni_syscall), // 346
Modified: branches/ASPACEM_TWEAKS/darwin12.supp
==============================================================================
--- branches/ASPACEM_TWEAKS/darwin12.supp (original)
+++ branches/ASPACEM_TWEAKS/darwin12.supp Sun May 10 12:23:01 2015
@@ -350,6 +350,108 @@
fun:_objc_init
}
+{
+ OSX108:leak-14
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc_zone_?alloc
+ ...
+ fun:dyld_register_image_state_change_handler
+ ...
+}
+
+{
+ OSX108:leak-15
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:?alloc
+ ...
+ fun:dyld_register_image_state_change_handler
+ ...
+}
+
+{
+ OSX108:leak-16
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc_zone_?alloc
+ ...
+ fun:map_images_nolock
+ fun:map_images
+ ...
+}
+
+{
+ OSX108:leak-17
+ Memcheck:Leak
+ match-leak-kinds: possible
+ fun:malloc_zone_?alloc
+ ...
+ fun:map_images_nolock
+ fun:map_images
+ ...
+}
+
+{
+ OSX108:leak-18
+ Memcheck:Leak
+ match-leak-kinds: indirect
+ fun:malloc_zone_?alloc
+ ...
+ fun:libSystem_initializer
+ ...
+}
+
+{
+ OSX108:leak-19
+ Memcheck:Leak
+ match-leak-kinds: indirect
+ fun:?alloc
+ ...
+ fun:libSystem_initializer
+ ...
+}
+
+{
+ OSX108:leak-20
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc_zone_?alloc
+ ...
+ fun:libSystem_initializer
+ ...
+}
+
+{
+ OSX108:leak-21
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:?alloc
+ ...
+ fun:libSystem_initializer
+ ...
+}
+
+{
+ OSX108:leak-22
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:?alloc
+ ...
+ fun:_libxpc_initializer
+ ...
+}
+
+{
+ OSX108:leak-23
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:realloc
+ fun:new_sem_from_pool
+ ...
+}
+
##----------------------------------------------------------------------##
# Memcheck
##----------------------------------------------------------------------##
Modified: branches/ASPACEM_TWEAKS/docs/internals/3_10_BUGSTATUS.txt
==============================================================================
--- branches/ASPACEM_TWEAKS/docs/internals/3_10_BUGSTATUS.txt (original)
+++ branches/ASPACEM_TWEAKS/docs/internals/3_10_BUGSTATUS.txt Sun May 10 12:23:01 2015
@@ -422,3 +422,13 @@
armv6 probable failure
Tue 28 Apr 13:48:40 CEST 2015
+
+346912 callgrind bails out on vpmaskmovd instruction
+
+347198 vex x86->IR: unhandled instruction bytes: 0x66 0xF 0x3A 0x63
+
+347233 Fix memcheck/tests/strchr on OS X 10.10 (Haswell)
+
+347322 Power PC regression test cleanup
+
+Fri 8 May 11:52:14 CEST 2015
Modified: branches/ASPACEM_TWEAKS/drd/tests/pth_create_chain.vgtest
==============================================================================
--- branches/ASPACEM_TWEAKS/drd/tests/pth_create_chain.vgtest (original)
+++ branches/ASPACEM_TWEAKS/drd/tests/pth_create_chain.vgtest Sun May 10 12:23:01 2015
@@ -1,2 +1,2 @@
-prereq: ./supported_libpthread && [ `uname -m` != ppc ] && [ `uname -m` != ppc64 ]
+prereq: ./supported_libpthread && [ `uname -m` != ppc ] && [ `uname -m` != ppc64 ] && [ `uname -m` != ppc64le ]
prog: pth_create_chain 100
Modified: branches/ASPACEM_TWEAKS/memcheck/tests/amd64/fxsave-amd64.c
==============================================================================
--- branches/ASPACEM_TWEAKS/memcheck/tests/amd64/fxsave-amd64.c (original)
+++ branches/ASPACEM_TWEAKS/memcheck/tests/amd64/fxsave-amd64.c Sun May 10 12:23:01 2015
@@ -17,7 +17,11 @@
__attribute__((noinline))
void do_fxsave ( void* p, int rexw ) {
if (rexw) {
+#if defined(VGO_linux)
+ asm __volatile__("fxsave64 (%0)" : : "r" (p) : "memory" );
+#else
asm __volatile__("rex64/fxsave (%0)" : : "r" (p) : "memory" );
+#endif
} else {
asm __volatile__("fxsave (%0)" : : "r" (p) : "memory" );
}
@@ -26,7 +30,11 @@
__attribute__((noinline))
void do_fxrstor ( void* p, int rexw ) {
if (rexw) {
+#if defined(VGO_linux)
+ asm __volatile__("fxrstor64 (%0)" : : "r" (p) : "memory" );
+#else
asm __volatile__("rex64/fxrstor (%0)" : : "r" (p) : "memory" );
+#endif
} else {
asm __volatile__("fxrstor (%0)" : : "r" (p) : "memory" );
}
Modified: branches/ASPACEM_TWEAKS/memcheck/tests/badjump.vgtest
==============================================================================
--- branches/ASPACEM_TWEAKS/memcheck/tests/badjump.vgtest (original)
+++ branches/ASPACEM_TWEAKS/memcheck/tests/badjump.vgtest Sun May 10 12:23:01 2015
@@ -1,3 +1,3 @@
prog: badjump
-cleanup: rm -f vgcore.pid*
+cleanup: rm -f vgcore.*
stderr_filter: filter_allocs
Modified: branches/ASPACEM_TWEAKS/memcheck/tests/badjump2.c
==============================================================================
--- branches/ASPACEM_TWEAKS/memcheck/tests/badjump2.c (original)
+++ branches/ASPACEM_TWEAKS/memcheck/tests/badjump2.c Sun May 10 12:23:01 2015
@@ -35,7 +35,7 @@
if (setjmp(myjmpbuf) == 0) {
// Jump to zero; will cause seg fault
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) && (_CALL_ELF != 2)
unsigned long int fn[3];
fn[0] = 0;
fn[1] = 0;
Modified: branches/ASPACEM_TWEAKS/memcheck/tests/supp_unknown.vgtest
==============================================================================
--- branches/ASPACEM_TWEAKS/memcheck/tests/supp_unknown.vgtest (original)
+++ branches/ASPACEM_TWEAKS/memcheck/tests/supp_unknown.vgtest Sun May 10 12:23:01 2015
@@ -1,4 +1,4 @@
vgopts: -q --suppressions=supp_unknown.supp
prog: badjump
-cleanup: rm -f vgcore.pid*
+cleanup: rm -f vgcore.*
stderr_filter_args: badjump.c
Modified: branches/ASPACEM_TWEAKS/none/tests/ppc32/jm_vec_isa_2_07.vgtest
==============================================================================
--- branches/ASPACEM_TWEAKS/none/tests/ppc32/jm_vec_isa_2_07.vgtest (original)
+++ branches/ASPACEM_TWEAKS/none/tests/ppc32/jm_vec_isa_2_07.vgtest Sun May 10 12:23:01 2015
@@ -1,2 +1,2 @@
-prereq: ../../../tests/check_isa-2_07_cap
+prereq: ../../../tests/check_isa-2_07_cap && ! ../../../tests/check_ppc64le_cap
prog: test_isa_2_07_part1 -a
Modified: branches/ASPACEM_TWEAKS/none/tests/x86/int.disabled
==============================================================================
--- branches/ASPACEM_TWEAKS/none/tests/x86/int.disabled (original)
+++ branches/ASPACEM_TWEAKS/none/tests/x86/int.disabled Sun May 10 12:23:01 2015
@@ -1,4 +1,4 @@
# This is currently disabled because vex doesn't deal with x86
# int $n for n != 0x80 and so the test is pointless (at the moment).
prog: int
-cleanup: rm -f vgcore.pid*
+cleanup: rm -f vgcore.*
Modified: branches/ASPACEM_TWEAKS/perf/memrw.c
==============================================================================
--- branches/ASPACEM_TWEAKS/perf/memrw.c (original)
+++ branches/ASPACEM_TWEAKS/perf/memrw.c Sun May 10 12:23:01 2015
@@ -106,10 +106,12 @@
nr_thr = 1;
- printf ("total program memory -t %d MB"
- " working set -w %d MB\n",
- (nr_b * sz_b) / (1024*1024),
- (nr_b_ws * sz_b) / (1024*1024));
+ printf ("total program memory -t %llu MB"
+ " working set -w %llu MB\n",
+ ((unsigned long long)nr_b * sz_b)
+ / (unsigned long long) (1024*1024),
+ ((unsigned long long)nr_b_ws * sz_b)
+ / (unsigned long long)(1024*1024));
printf (" working set R or W -l %d times"
" repeat the whole stuff -r %d times\n",
nr_loops,
|
|
From: <sv...@va...> - 2015-05-10 11:07:14
|
Author: florian
Date: Sun May 10 12:07:06 2015
New Revision: 15201
Log:
Simplify is_valid_for taking advantage of the fact that SegKinds
are one-hot encoded.
Modified:
trunk/coregrind/m_aspacemgr/aspacemgr-linux.c
Modified: trunk/coregrind/m_aspacemgr/aspacemgr-linux.c
==============================================================================
--- trunk/coregrind/m_aspacemgr/aspacemgr-linux.c (original)
+++ trunk/coregrind/m_aspacemgr/aspacemgr-linux.c Sun May 10 12:07:06 2015
@@ -1172,11 +1172,11 @@
/* Test if a piece of memory is addressable by client or by valgrind with at
least the "prot" protection permissions by examining the underlying
- segments. If client && freeOk is True then SkFree and SkResvn areas are
- also allowed.
+ segments. The KINDS argument specifies the allowed segments ADDR may
+ belong to in order to be considered "valid".
*/
static
-Bool is_valid_for( Bool client, Addr start, SizeT len, UInt prot, Bool freeOk )
+Bool is_valid_for( UInt kinds, Addr start, SizeT len, UInt prot )
{
Int i, iLo, iHi;
Bool needR, needW, needX;
@@ -1204,34 +1204,17 @@
iHi = find_nsegment_idx(start + len - 1);
}
- if (client) {
- for (i = iLo; i <= iHi; i++) {
- if ( (nsegments[i].kind == SkFileC
- || nsegments[i].kind == SkAnonC
- || nsegments[i].kind == SkShmC
- || (nsegments[i].kind == SkFree && freeOk)
- || (nsegments[i].kind == SkResvn && freeOk))
- && (needR ? nsegments[i].hasR : True)
- && (needW ? nsegments[i].hasW : True)
- && (needX ? nsegments[i].hasX : True) ) {
- /* ok */
- } else {
- return False;
- }
- }
- } else {
- for (i = iLo; i <= iHi; i++) {
- if ( (nsegments[i].kind == SkFileV
- || nsegments[i].kind == SkAnonV)
- && (needR ? nsegments[i].hasR : True)
- && (needW ? nsegments[i].hasW : True)
- && (needX ? nsegments[i].hasX : True) ) {
- /* ok */
- } else {
- return False;
- }
+ for (i = iLo; i <= iHi; i++) {
+ if ( (nsegments[i].kind & kinds) != 0
+ && (needR ? nsegments[i].hasR : True)
+ && (needW ? nsegments[i].hasW : True)
+ && (needX ? nsegments[i].hasX : True) ) {
+ /* ok */
+ } else {
+ return False;
}
}
+
return True;
}
@@ -1241,8 +1224,9 @@
Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
UInt prot )
{
- return is_valid_for(/* client */ True,
- start, len, prot, False/*free not OK*/ );
+ const UInt kinds = SkFileC | SkAnonC | SkShmC;
+
+ return is_valid_for(kinds, start, len, prot);
}
/* Variant of VG_(am_is_valid_for_client) which allows free areas to
@@ -1252,15 +1236,17 @@
Bool VG_(am_is_valid_for_client_or_free_or_resvn)
( Addr start, SizeT len, UInt prot )
{
- return is_valid_for(/* client */ True,
- start, len, prot, True/*free is OK*/ );
+ const UInt kinds = SkFileC | SkAnonC | SkShmC | SkFree | SkResvn;
+
+ return is_valid_for(kinds, start, len, prot);
}
Bool VG_(am_is_valid_for_valgrind) ( Addr start, SizeT len, UInt prot )
{
- return is_valid_for(/* client */ False,
- start, len, prot, False/*irrelevant*/ );
+ const UInt kinds = SkFileV | SkAnonV;
+
+ return is_valid_for(kinds, start, len, prot);
}
|
|
From: <sv...@va...> - 2015-05-10 10:40:17
|
Author: florian
Date: Sun May 10 11:40:09 2015
New Revision: 15200
Log:
VG_(am_create_reservation) and VG_(am_extend_into_adjacent_reservation_client)
no longer need to externally visible.
Modified:
branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h
Modified: branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c (original)
+++ branches/ASPACEM_TWEAKS/coregrind/m_aspacemgr/aspacemgr-linux.c Sun May 10 11:40:09 2015
@@ -2745,8 +2745,8 @@
falls entirely within a single free segment. The returned Bool
indicates whether the creation succeeded. */
-Bool VG_(am_create_reservation) ( Addr start, SizeT length,
- ShrinkMode smode, SSizeT extra )
+static Bool create_reservation (Addr start, SizeT length,
+ ShrinkMode smode, SSizeT extra)
{
Int startI, endI;
NSegment seg;
@@ -2808,9 +2808,9 @@
the reservation segment after the operation must be at least one
page long. The function returns a pointer to the resized segment. */
-const NSegment *VG_(am_extend_into_adjacent_reservation_client)( Addr addr,
- SSizeT delta,
- Bool *overflow)
+static const NSegment *
+extend_into_adjacent_reservation_client (Addr addr, SSizeT delta,
+ Bool *overflow)
{
Int segA, segR;
UInt prot;
@@ -2941,8 +2941,7 @@
/* Try to create the data seg and associated reservation where
BASE says. */
- ok = VG_(am_create_reservation)( resvn_start, resvn_size,
- SmLower, anon_size );
+ ok = create_reservation(resvn_start, resvn_size, SmLower, anon_size);
if (!ok) {
/* Hmm, that didn't work. Well, let aspacem suggest an address
@@ -2951,8 +2950,7 @@
( 0/*floating*/, anon_size + resvn_size, &ok );
if (ok) {
resvn_start = anon_start + anon_size;
- ok = VG_(am_create_reservation)( resvn_start, resvn_size,
- SmLower, anon_size );
+ ok = create_reservation(resvn_start, resvn_size, SmLower, anon_size);
}
}
@@ -3018,8 +3016,8 @@
aspacem_assert(VG_IS_PAGE_ALIGNED(delta));
Bool overflow;
- if (! VG_(am_extend_into_adjacent_reservation_client)( aseg->start, delta,
- &overflow)) {
+ if (! extend_into_adjacent_reservation_client(aseg->start, delta,
+ &overflow)) {
if (overflow)
return VG_(mk_SysRes_Error)(1);
else
@@ -3056,8 +3054,7 @@
/* Create a shrinkable reservation followed by an anonymous
segment. Together these constitute a growdown stack. */
- ok = VG_(am_create_reservation)( resvn_start, resvn_size,
- SmUpper, anon_size );
+ ok = create_reservation(resvn_start, resvn_size, SmUpper, anon_size);
if (ok)
return VG_(am_mmap_anon_fixed_client)( anon_start, anon_size, prot );
@@ -3098,8 +3095,8 @@
VG_(debugLog)(1, "signals",
"extending a stack base 0x%lx down by %lu\n",
anon_seg->start, udelta);
- if (! VG_(am_extend_into_adjacent_reservation_client)
- ( anon_seg->start, -(SSizeT)udelta, &overflow )) {
+ if (! extend_into_adjacent_reservation_client(anon_seg->start,
+ -(SSizeT)udelta, &overflow)) {
if (overflow)
sres = VG_(mk_SysRes_Error)(1);
else
Modified: branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h
==============================================================================
--- branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h (original)
+++ branches/ASPACEM_TWEAKS/coregrind/pub_core_aspacemgr.h Sun May 10 11:40:09 2015
@@ -262,32 +262,6 @@
expected to belong to a client segment. */
extern void VG_(am_set_segment_hasT)( Addr addr );
-/* --- --- --- reservations --- --- --- */
-
-/* Create a reservation from START .. START+LENGTH-1, with the given
- ShrinkMode. When checking whether the reservation can be created,
- also ensure that at least abs(EXTRA) extra free bytes will remain
- above (> 0) or below (< 0) the reservation.
-
- The reservation will only be created if it, plus the extra-zone,
- falls entirely within a single free segment. The returned Bool
- indicates whether the creation succeeded. */
-extern Bool VG_(am_create_reservation)
- ( Addr start, SizeT length, ShrinkMode smode, SSizeT extra );
-
-/* ADDR is the start address of an anonymous client mapping. This fn extends
- the mapping by DELTA bytes, taking the space from a reservation section
- which must be adjacent. If DELTA is positive, the segment is
- extended forwards in the address space, and the reservation must be
- the next one along. If DELTA is negative, the segment is extended
- backwards in the address space and the reservation must be the
- previous one. DELTA must be page aligned. abs(DELTA) must not
- exceed the size of the reservation segment minus one page, that is,
- the reservation segment after the operation must be at least one
- page long. The function returns a pointer to the resized segment. */
-extern const NSegment *VG_(am_extend_into_adjacent_reservation_client)
- ( Addr addr, SSizeT delta, /*OUT*/Bool *overflow );
-
/* --- --- --- resizing/move a mapping --- --- --- */
/* This function grows a client mapping in place into an adjacent free segment.
|
|
From: <sv...@va...> - 2015-05-10 01:33:23
|
Author: rhyskidd
Date: Sun May 10 02:33:16 2015
New Revision: 15199
Log:
testsuite: properly svn:ignore output files in none/tests/amd64. n-i-bz.
Modified:
trunk/none/tests/amd64/ (props changed)
|