|
From: <sv...@va...> - 2005-12-22 20:11:33
|
Author: njn
Date: 2005-12-22 20:11:28 +0000 (Thu, 22 Dec 2005)
New Revision: 5409
Log:
Tweak stats gathering.
Modified:
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-22 19:50:45 UTC (rev 54=
08)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-22 20:11:28 UTC (rev 54=
09)
@@ -487,17 +487,16 @@
=20
static OSet* secVBitTable;
=20
-static ULong sec_vbits_bytes_allocd =3D 0;
-static ULong sec_vbits_bytes_freed =3D 0;
-static ULong sec_vbits_bytes_curr =3D 0;
-static ULong sec_vbits_bytes_peak =3D 0;
+static ULong sec_vbits_new_nodes =3D 0;
+static ULong sec_vbits_updates =3D 0;
=20
-// sizeof(Addr) is the best value here. We can go from 1 to sizeof(Addr=
)
-// for free -- it doesn't change the size of the SecVBitNode because of
-// padding. If we make it larger, we have bigger nodes, but can possibl=
y
-// fit more partially defined bytes in each node. In practice it seems =
that
-// partially defined bytes are rarely clustered close to each other, so
-// going bigger than sizeof(Addr) does not save space.
+// This must be a power of two; this is checked in mc_pre_clo_init().
+// The size chosen here is a trade-off: if the nodes are bigger (ie. co=
ver
+// a larger address range) they take more space but we can get multiple
+// partially-defined bytes in one if they are close to each other, reduc=
ing
+// the number of total nodes. In practice sometimes they are clustered =
(eg.
+// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguou=
s
+// row), but often not. So we choose something intermediate.
#define BYTES_PER_SEC_VBIT_NODE sizeof(Addr)
=20
typedef=20
@@ -531,13 +530,10 @@
tl_assert(V_BITS8_VALID !=3D vbits8 && V_BITS8_INVALID !=3D vbits8);
if (n) {
n->vbits8[amod] =3D vbits8; // update
+ sec_vbits_updates++;
} else {
// New node: assign the specific byte, make the rest invalid (the=
y
// should never be read as-is, but be cautious).
- sec_vbits_bytes_allocd +=3D sizeof(SecVBitNode);
- sec_vbits_bytes_curr +=3D sizeof(SecVBitNode);
- if (sec_vbits_bytes_curr > sec_vbits_bytes_peak)
- sec_vbits_bytes_peak =3D sec_vbits_bytes_curr;
n =3D VG_(OSet_AllocNode)(secVBitTable, sizeof(SecVBitNode));
n->a =3D aAligned;
for (i =3D 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
@@ -545,6 +541,7 @@
}
n->vbits8[amod] =3D vbits8;
VG_(OSet_Insert)(secVBitTable, n);
+ sec_vbits_new_nodes++;
}
}
=20
@@ -572,8 +569,6 @@
}
n =3D VG_(OSet_Remove)(secVBitTable, &aAligned);
VG_(OSet_FreeNode)(secVBitTable, n);
- sec_vbits_bytes_freed +=3D sizeof(SecVBitNode);
- sec_vbits_bytes_curr -=3D sizeof(SecVBitNode);
tl_assert(n);
}
=20
@@ -3986,8 +3981,12 @@
n_accessible_dist * sizeof(SecMap) / (1024 * 1024) );
=20
VG_(message)(Vg_DebugMsg,
- " memcheck: sec V bit entries: %d",
+ " memcheck: sec V bit nodes: %d",
VG_(OSet_Size)(secVBitTable) );
+ VG_(message)(Vg_DebugMsg,
+ " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %ll=
u)",
+ sec_vbits_new_nodes + sec_vbits_updates,
+ sec_vbits_new_nodes, sec_vbits_updates );
}
=20
if (0) {
@@ -4103,6 +4102,9 @@
=20
// {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
tl_assert(sizeof(UWord) =3D=3D sizeof(Addr));
+
+ // BYTES_PER_SEC_VBIT_NODE must be a power of two.
+ tl_assert(-1 !=3D VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
}
=20
VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
|