You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
1
(2) |
2
(9) |
3
(11) |
4
(12) |
5
(6) |
|
6
|
7
|
8
(3) |
9
(10) |
10
(18) |
11
(10) |
12
(5) |
|
13
(4) |
14
(40) |
15
(12) |
16
(8) |
17
(9) |
18
(6) |
19
|
|
20
|
21
|
22
|
23
(4) |
24
(6) |
25
(6) |
26
(1) |
|
27
(3) |
28
(10) |
|
|
|
|
|
|
From: Andrada G. <and...@gm...> - 2011-02-23 18:01:08
|
Hello Irfan! I and a colleague intended to start working on porting Valgrind to MIPS too, this being our graduation project, before stumbling upon your post. Could you please tell us if you project is open-source? If so, we think there is no point for us to recreate your work. If you find it suitable, we would be glad to contribute to your project; if you haven't ported all the Valgrind tools, we could provide support in this matter. Thanks, Andrada |
|
From: <sv...@va...> - 2011-02-23 13:31:04
|
Author: sewardj
Date: 2011-02-23 13:30:53 +0000 (Wed, 23 Feb 2011)
New Revision: 11569
Log:
A scalability fix for Helgrind for running large workloads. When
creating new vector timestamps (VTSs) via tick and join operations,
preallocate the underlying XArray of ScalarTSs (scalar timestamps) at
the likely final size, using new function VG_(newSizedXA) introduced
in r11558. This reduces overall heap turnover (in VG_AR_TOOL) by a
factor of several. Together with revs 11567 and 11568, it mitigates
the worst-case performance falloff in long runs that involve lots of
threads and lots of synchronisation events (a.k.a Vector timestamps).
Modified:
trunk/helgrind/libhb_core.c
Modified: trunk/helgrind/libhb_core.c
===================================================================
--- trunk/helgrind/libhb_core.c 2011-02-23 13:22:24 UTC (rev 11568)
+++ trunk/helgrind/libhb_core.c 2011-02-23 13:30:53 UTC (rev 11569)
@@ -1544,8 +1544,12 @@
VTS;
-/* Create a new, empty VTS. */
-static VTS* VTS__new ( void );
+/* Create a new, empty VTS. The argument is a hint for the expected
+ number of elements that will eventually be added to the array;
+ doesn't matter (from a correctness perspective) if it's wrong.
+ (Important from a performance perspective though.) Pass zero to
+ mean "no hint". */
+static VTS* VTS__new ( Word nElemsHint );
/* Delete this VTS in its entirety. */
static void VTS__delete ( VTS* vts );
@@ -1622,14 +1626,35 @@
/* Create a new, empty VTS.
*/
-VTS* VTS__new ( void )
+VTS* VTS__new ( Word nElemsHint )
{
VTS* vts;
+ tl_assert(nElemsHint >= 0);
+
+ /* (optional) try to avoid m_mallocfree's tendency to do lengthy
+ searches of freelists which don't contain quite big enough free
+ blocks (due to containing lots of freed VTSs of size
+ nElemsHint-1) by rounding the size up to the next even number,
+ thereby halving the number of different sized blocks in
+ circulation. NOTE: maps 0 to 0, as required to maintain the "no
+ hint" indication.*/
+ if (nElemsHint & 1) nElemsHint++;
+
vts = HG_(zalloc)( "libhb.VTS__new.1", sizeof(VTS) );
tl_assert(vts);
vts->id = VtsID_INVALID;
- vts->ts = VG_(newXA)( HG_(zalloc), "libhb.VTS__new.2",
- HG_(free), sizeof(ScalarTS) );
+ if (nElemsHint == 0) {
+ vts->ts = VG_(newXA)(
+ HG_(zalloc), "libhb.VTS__new.2",
+ HG_(free), sizeof(ScalarTS)
+ );
+ } else {
+ vts->ts = VG_(newSizedXA)(
+ HG_(zalloc), "libhb.VTS__new.3",
+ HG_(free), sizeof(ScalarTS),
+ nElemsHint
+ );
+ }
tl_assert(vts->ts);
return vts;
}
@@ -1653,7 +1678,7 @@
VTS* vts;
tl_assert(thr);
tl_assert(tym >= 1);
- vts = VTS__new();
+ vts = VTS__new(1);
st.thr = thr;
st.tym = tym;
VG_(addToXA)( vts->ts, &st );
@@ -1669,7 +1694,7 @@
ScalarTS* here = NULL;
ScalarTS tmp;
VTS* res;
- Word i, n;
+ Word i, n, hintsize;
stats__vts__tick++;
@@ -1677,8 +1702,9 @@
tl_assert(is_sane_VTS(vts));
//if (0) VG_(printf)("tick vts thrno %ld szin %d\n",
// (Word)me->errmsg_index, (Int)VG_(sizeXA)(vts) );
- res = VTS__new();
n = VG_(sizeXA)( vts->ts );
+ hintsize = n;
+ res = VTS__new(hintsize);
/* main loop doesn't handle zero-entry case correctly, so
special-case it. */
@@ -1738,7 +1764,7 @@
*/
VTS* VTS__join ( VTS* a, VTS* b )
{
- Word ia, ib, useda, usedb;
+ Word ia, ib, useda, usedb, hintsize;
ULong tyma, tymb, tymMax;
Thr* thr;
VTS* res;
@@ -1750,7 +1776,8 @@
useda = VG_(sizeXA)( a->ts );
usedb = VG_(sizeXA)( b->ts );
- res = VTS__new();
+ hintsize = useda > usedb ? useda : usedb;
+ res = VTS__new(hintsize);
ia = ib = 0;
while (1) {
|
|
From: <sv...@va...> - 2011-02-23 13:22:36
|
Author: sewardj
Date: 2011-02-23 13:22:24 +0000 (Wed, 23 Feb 2011)
New Revision: 11568
Log:
Add a new constructor for empty XArrays, VG_(newSizedXA). This is
identical to VG_(newXA) but allows passing in a size hint. In the
case where the likely final size of the XArray is known at creation
time, this allows avoiding the repeated (implicit) resizing and
copying of the array as elements are added, which can save a vast
amount of dynamic memory allocation turnover.
Modified:
trunk/coregrind/m_xarray.c
trunk/include/pub_tool_xarray.h
Modified: trunk/coregrind/m_xarray.c
===================================================================
--- trunk/coregrind/m_xarray.c 2011-02-23 13:18:56 UTC (rev 11567)
+++ trunk/coregrind/m_xarray.c 2011-02-23 13:22:24 UTC (rev 11568)
@@ -44,6 +44,7 @@
Int (*cmpFn) ( void*, void* ); /* cmp fn (may be NULL) */
Word elemSzB; /* element size in bytes */
void* arr; /* pointer to elements */
+ Word initsizeE; /* HINT only: initial size, 0 if no hint */
Word usedsizeE; /* # used elements in arr */
Word totsizeE; /* max size of arr, in elements */
Bool sorted; /* is it sorted? */
@@ -72,6 +73,7 @@
xa->free = free_fn;
xa->cmpFn = NULL;
xa->elemSzB = elemSzB;
+ xa->initsizeE = 0;
xa->usedsizeE = 0;
xa->totsizeE = 0;
xa->sorted = False;
@@ -79,6 +81,19 @@
return xa;
}
+XArray* VG_(newSizedXA) ( void*(*alloc_fn)(HChar*,SizeT),
+ HChar* cc,
+ void(*free_fn)(void*),
+ Word elemSzB,
+ Word nInitialElems )
+{
+ XArray* xa;
+ tl_assert(nInitialElems >= 0);
+ xa = VG_(newXA)( alloc_fn, cc, free_fn, elemSzB );
+ xa->initsizeE = nInitialElems;
+ return xa;
+}
+
XArray* VG_(cloneXA)( HChar* cc, XArray* xao )
{
struct _XArray* xa = (struct _XArray*)xao;
@@ -145,7 +160,7 @@
static inline void ensureSpaceXA ( struct _XArray* xa )
{
- if (xa->usedsizeE == xa->totsizeE) {
+ if (UNLIKELY(xa->usedsizeE == xa->totsizeE)) {
void* tmp;
Word newsz;
if (xa->totsizeE == 0)
@@ -158,7 +173,11 @@
Hence increase the initial array size for tiny elements in
an attempt to avoid reallocations of size 2, 4, 8 if the
array does start to fill up. */
- if (xa->elemSzB == 1) newsz = 8;
+ /* Also, if there's a hinted initial size, use that instead of
+ the logic in the preceding comment. */
+ tl_assert(xa->initsizeE >= 0);
+ if (xa->initsizeE > 0) newsz = xa->initsizeE;
+ else if (xa->elemSzB == 1) newsz = 8;
else if (xa->elemSzB == 2) newsz = 4;
else newsz = 2;
} else {
Modified: trunk/include/pub_tool_xarray.h
===================================================================
--- trunk/include/pub_tool_xarray.h 2011-02-23 13:18:56 UTC (rev 11567)
+++ trunk/include/pub_tool_xarray.h 2011-02-23 13:22:24 UTC (rev 11568)
@@ -54,6 +54,17 @@
void(*free_fn)(void*),
Word elemSzB );
+/* Same as VG_(newXA), except allows specification of an initial
+ number of elements for the array, so as to avoid a potentially
+ large wasted cost of repeatedly resizing the array when the caller
+ knows something about what the expected final size is going to
+ be. */
+extern XArray* VG_(newSizedXA) ( void*(*alloc_fn)(HChar*,SizeT),
+ HChar* cc,
+ void(*free_fn)(void*),
+ Word elemSzB,
+ Word nInitialElems );
+
/* Free all memory associated with an XArray. */
extern void VG_(deleteXA) ( XArray* );
|
|
From: <sv...@va...> - 2011-02-23 13:19:06
|
Author: sewardj
Date: 2011-02-23 13:18:56 +0000 (Wed, 23 Feb 2011)
New Revision: 11567
Log:
Fix a scalability problem observed whilst running Helgrind on a large
workload: when scanning a freelist of a given size for a big-enough
block (to allocate), don't scan all the way around the list. Instead
give up after 100 blocks and try the freelist above. The pathological
case (as observed) is that the freelist contains tens of thousands of
blocks, but all are too small for the current request, hence they are
all visited pointlessly. If the new heuristic is used, the freelist
start point is moved along by one block, so that future searches
eventually inspect the entire freelist, just very slowly.
Also, some improvements to stats gathering, and rename of some
existing stats fields in struct Arena.
Modified:
trunk/coregrind/m_mallocfree.c
Modified: trunk/coregrind/m_mallocfree.c
===================================================================
--- trunk/coregrind/m_mallocfree.c 2011-02-17 12:25:15 UTC (rev 11566)
+++ trunk/coregrind/m_mallocfree.c 2011-02-23 13:18:56 UTC (rev 11567)
@@ -186,9 +186,14 @@
SizeT sblocks_used;
Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
// Stats only.
- SizeT bytes_on_loan;
- SizeT bytes_mmaped;
- SizeT bytes_on_loan_max;
+ SizeT stats__bytes_on_loan;
+ SizeT stats__bytes_mmaped;
+ SizeT stats__bytes_on_loan_max;
+ ULong stats__tot_blocks; /* total # blocks alloc'd */
+ ULong stats__tot_bytes; /* total # bytes alloc'd */
+ ULong stats__nsearches; /* total # freelist checks */
+ // If profiling, when should the next profile happen at
+ // (in terms of stats__bytes_on_loan_max) ?
SizeT next_profile_at;
}
Arena;
@@ -477,13 +482,16 @@
a->min_sblock_szB = min_sblock_szB;
for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
- a->sblocks = & a->sblocks_initial[0];
- a->sblocks_size = SBLOCKS_SIZE_INITIAL;
- a->sblocks_used = 0;
- a->bytes_on_loan = 0;
- a->bytes_mmaped = 0;
- a->bytes_on_loan_max = 0;
- a->next_profile_at = 25 * 1000 * 1000;
+ a->sblocks = & a->sblocks_initial[0];
+ a->sblocks_size = SBLOCKS_SIZE_INITIAL;
+ a->sblocks_used = 0;
+ a->stats__bytes_on_loan = 0;
+ a->stats__bytes_mmaped = 0;
+ a->stats__bytes_on_loan_max = 0;
+ a->stats__tot_blocks = 0;
+ a->stats__tot_bytes = 0;
+ a->stats__nsearches = 0;
+ a->next_profile_at = 25 * 1000 * 1000;
vg_assert(sizeof(a->sblocks_initial)
== SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
}
@@ -495,8 +503,14 @@
for (i = 0; i < VG_N_ARENAS; i++) {
Arena* a = arenaId_to_ArenaP(i);
VG_(message)(Vg_DebugMsg,
- "%8s: %8ld mmap'd, %8ld/%8ld max/curr\n",
- a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
+ "%8s: %8ld mmap'd, %8ld/%8ld max/curr, "
+ "%10llu/%10llu totalloc-blocks/bytes,"
+ " %10llu searches\n",
+ a->name, a->stats__bytes_mmaped,
+ a->stats__bytes_on_loan_max,
+ a->stats__bytes_on_loan,
+ a->stats__tot_blocks, a->stats__tot_bytes,
+ a->stats__nsearches
);
}
}
@@ -695,7 +709,7 @@
//zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
sb->n_payload_bytes = cszB - sizeof(Superblock);
- a->bytes_mmaped += cszB;
+ a->stats__bytes_mmaped += cszB;
VG_(debugLog)(1, "mallocfree",
"newSuperblock at %p (pszB %7ld) owner %s/%s\n",
sb, sb->n_payload_bytes,
@@ -993,7 +1007,7 @@
}
}
- if (arena_bytes_on_loan != a->bytes_on_loan) {
+ if (arena_bytes_on_loan != a->stats__bytes_on_loan) {
# ifdef VERBOSE_MALLOC
VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %ld, "
"arena_bytes_on_loan %ld: "
@@ -1051,7 +1065,7 @@
a->name,
superblockctr,
blockctr_sb, blockctr_sb_free, blockctr_li,
- a->bytes_mmaped, a->bytes_on_loan);
+ a->stats__bytes_mmaped, a->stats__bytes_on_loan);
# undef BOMB
}
@@ -1092,7 +1106,8 @@
VG_(printf)(
"-------- Arena \"%s\": %ld mmap'd, %ld/%ld max/curr --------\n",
- a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
+ a->name, a->stats__bytes_mmaped,
+ a->stats__bytes_on_loan_max, a->stats__bytes_on_loan
);
for (j = 0; j < a->sblocks_used; ++j) {
@@ -1269,6 +1284,7 @@
Block* b = NULL;
Arena* a;
void* v;
+ UWord stats__nsearches = 0;
ensure_mm_init(aid);
a = arenaId_to_ArenaP(aid);
@@ -1301,9 +1317,25 @@
// behaviour.
//
for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
+ UWord nsearches_this_level = 0;
b = a->freelist[lno];
if (NULL == b) continue; // If this list is empty, try the next one.
while (True) {
+ stats__nsearches++;
+ nsearches_this_level++;
+ if (UNLIKELY(nsearches_this_level >= 100)
+ && lno < N_MALLOC_LISTS-1) {
+ /* Avoid excessive scanning on this freelist, and instead
+ try the next one up. But first, move this freelist's
+ start pointer one element along, so as to ensure that
+ subsequent searches of this list don't endlessly
+ revisit only these 100 elements, but in fact slowly
+ progress through the entire list. */
+ b = a->freelist[lno];
+ vg_assert(b); // this list must be nonempty!
+ a->freelist[lno] = get_next_b(b); // step one along
+ break;
+ }
b_bszB = get_bszB(b);
if (b_bszB >= req_bszB) goto obtained_block; // success!
b = get_next_b(b);
@@ -1396,18 +1428,22 @@
}
// Update stats
- a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
- if (a->bytes_on_loan > a->bytes_on_loan_max) {
- a->bytes_on_loan_max = a->bytes_on_loan;
- if (a->bytes_on_loan_max >= a->next_profile_at) {
+ SizeT loaned = bszB_to_pszB(a, b_bszB);
+ a->stats__bytes_on_loan += loaned;
+ if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
+ a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
+ if (a->stats__bytes_on_loan_max >= a->next_profile_at) {
/* next profile after 10% more growth */
a->next_profile_at
= (SizeT)(
- (((ULong)a->bytes_on_loan_max) * 110ULL) / 100ULL );
+ (((ULong)a->stats__bytes_on_loan_max) * 110ULL) / 100ULL );
if (VG_(clo_profile_heap))
cc_analyse_alloc_arena(aid);
}
}
+ a->stats__tot_blocks += (ULong)1;
+ a->stats__tot_bytes += (ULong)loaned;
+ a->stats__nsearches += (ULong)stats__nsearches;
# ifdef DEBUG_MALLOC
sanity_check_malloc_arena(aid);
@@ -1462,7 +1498,7 @@
sb_start = &sb->payload_bytes[0];
sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
- a->bytes_on_loan -= b_pszB;
+ a->stats__bytes_on_loan -= b_pszB;
/* If this is one of V's areas, fill it up with junk to enhance the
chances of catching any later reads of it. Note, 0xDD is
@@ -1610,9 +1646,9 @@
/* Payload ptr for the block we are going to split. Note this
changes a->bytes_on_loan; we save and restore it ourselves. */
- saved_bytes_on_loan = a->bytes_on_loan;
+ saved_bytes_on_loan = a->stats__bytes_on_loan;
base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
- a->bytes_on_loan = saved_bytes_on_loan;
+ a->stats__bytes_on_loan = saved_bytes_on_loan;
/* Give up if we couldn't allocate enough space */
if (base_p == 0)
@@ -1655,9 +1691,13 @@
vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
- a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
- if (a->bytes_on_loan > a->bytes_on_loan_max)
- a->bytes_on_loan_max = a->bytes_on_loan;
+ a->stats__bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
+ if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
+ a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
+ }
+ /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
+ are updated by the call to VG_(arena_malloc) just a few lines
+ above. So we don't need to update them here. */
# ifdef DEBUG_MALLOC
sanity_check_malloc_arena(aid);
@@ -1727,14 +1767,14 @@
// We don't have fastbins so smblks & fsmblks are always 0. Also we don't
// have a separate mmap allocator so set hblks & hblkhd to 0.
- mi->arena = a->bytes_mmaped;
+ mi->arena = a->stats__bytes_mmaped;
mi->ordblks = free_blocks + VG_(free_queue_length);
mi->smblks = 0;
mi->hblks = 0;
mi->hblkhd = 0;
mi->usmblks = 0;
mi->fsmblks = 0;
- mi->uordblks = a->bytes_on_loan - VG_(free_queue_volume);
+ mi->uordblks = a->stats__bytes_on_loan - VG_(free_queue_volume);
mi->fordblks = free_blocks_size + VG_(free_queue_volume);
mi->keepcost = 0; // may want some value in here
}
|