|
From: Varun G. <var...@li...> - 2017-08-14 20:27:04
|
Dear Valgrind developers,
Currently Valgrind allows us to create objects in a memory pool, destroy
them individually and destroy them all with the pool.
We are working with linking Valgrind into garbage collector in Eclipse
OMR framework. The garbage collector's implementation is such that it
keeps track of areas in memory that are available and free. It does not
keep track of allocated objects (target language running over it does
that) and in each run is informed by it about the ones that are still
present. Based on this information, it marks remaining area as free
without caring about dead objects.
We now have only have areas that are available and not individual
objects so it is not possible to call Valgrind to free them
individually. Using a set to store them temporarily solves this but it
goes against the whole architecture that is ignoring them.
Since Valgrind is already keeping a track of these objects, it can also
free them in a given range without any extra effort, without sacrificing
existing behavior.
Author: Andrew Young
Index: include/valgrind.h
===================================================================
diff --git a/trunk/include/valgrind.h b/trunk/include/valgrind.h
--- a/trunk/include/valgrind.h (revision 16461)
+++ b/trunk/include/valgrind.h (working copy)
@@ -6157,6 +6157,7 @@
VG_USERREQ__MOVE_MEMPOOL = 0x1308,
VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+ VG_USERREQ__MEMPOOL_CLEAR = 0x130c,
/* Allow printfs to valgrind log. */
/* The first two pass the va_list argument by value, which
@@ -6546,6 +6547,11 @@
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
pool, addr, size, 0, 0)
+/* Disassociate any pieces inside a particular range. */
+#define VALGRIND_MEMPOOL_CLEAR(pool, addr, size) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CLEAR, \
+ pool, addr, size, 0, 0)
+
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
Index: memcheck/mc_include.h
===================================================================
diff --git a/trunk/memcheck/mc_include.h b/trunk/memcheck/mc_include.h
--- a/trunk/memcheck/mc_include.h (revision 16461)
+++ b/trunk/memcheck/mc_include.h (working copy)
@@ -115,6 +115,7 @@
Addr addr, SizeT size );
void MC_(mempool_free) ( Addr pool, Addr addr );
void MC_(mempool_trim) ( Addr pool, Addr addr, SizeT size );
+void MC_(mempool_clear) ( Addr pool, Addr addr, SizeT size );
void MC_(move_mempool) ( Addr poolA, Addr poolB );
void MC_(mempool_change) ( Addr pool, Addr addrA, Addr addrB, SizeT
size );
Bool MC_(mempool_exists) ( Addr pool );
Index: memcheck/mc_main.c
===================================================================
diff --git a/trunk/memcheck/mc_main.c b/trunk/memcheck/mc_main.c
--- a/trunk/memcheck/mc_main.c (revision 16461)
+++ b/trunk/memcheck/mc_main.c (working copy)
@@ -6934,6 +6934,7 @@
&& VG_USERREQ__MEMPOOL_ALLOC != arg[0]
&& VG_USERREQ__MEMPOOL_FREE != arg[0]
&& VG_USERREQ__MEMPOOL_TRIM != arg[0]
+ && VG_USERREQ__MEMPOOL_CLEAR != arg[0]
&& VG_USERREQ__MOVE_MEMPOOL != arg[0]
&& VG_USERREQ__MEMPOOL_CHANGE != arg[0]
&& VG_USERREQ__MEMPOOL_EXISTS != arg[0]
@@ -7200,6 +7201,15 @@
return True;
}
+ case VG_USERREQ__MEMPOOL_CLEAR: {
+ Addr pool = (Addr)arg[1];
+ Addr addr = (Addr)arg[2];
+ UInt size = arg[3];
+
+ MC_(mempool_clear) ( pool, addr, size );
+ return True;
+ }
+
case VG_USERREQ__MOVE_MEMPOOL: {
Addr poolA = (Addr)arg[1];
Addr poolB = (Addr)arg[2];
Index: memcheck/mc_malloc_wrappers.c
===================================================================
diff --git a/trunk/memcheck/mc_malloc_wrappers.c
b/trunk/memcheck/mc_malloc_wrappers.c
--- a/trunk/memcheck/mc_malloc_wrappers.c (revision 16461)
+++ b/trunk/memcheck/mc_malloc_wrappers.c (working copy)
@@ -1075,6 +1075,91 @@
VG_(free)(chunks);
}
+void MC_(mempool_clear)(Addr pool, Addr addr, SizeT szB)
+{
+ MC_Mempool* mp;
+ MC_Chunk* mc;
+ ThreadId tid = VG_(get_running_tid)();
+ UInt n_shadows, i;
+ VgHashNode** chunks;
+
+ if (VG_(clo_verbosity) > 2) {
+ VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
+ pool, addr, szB);
+ VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
+ }
+
+ mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
+ if (mp == NULL) {
+ MC_(record_illegal_mempool_error)(tid, pool);
+ return;
+ }
+
+ check_mempool_sane(mp);
+ chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
+ if (n_shadows == 0) {
+ tl_assert(chunks == NULL);
+ return;
+ }
+
+ tl_assert(chunks != NULL);
+ for (i = 0; i < n_shadows; ++i) {
+
+ Addr lo, hi, min, max;
+
+ mc = (MC_Chunk*) chunks[i];
+
+ lo = mc->data;
+ hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
+
+#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
+
+ if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
+
+ /* The current chunk is entirely inside the trim extent:
+ delete it. */
+
+ if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
+ MC_(record_free_error)(tid, (Addr)mc->data);
+ VG_(free)(chunks);
+ if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
+ return;
+ }
+
+ die_and_free_mem ( tid, mc, mp->rzB );
+
+ } else if ( (! EXTENT_CONTAINS(lo)) &&
+ (! EXTENT_CONTAINS(hi)) ) {
+
+ /* The current chunk is entirely outside the trim extent: keep
+ it. */
+
+ continue;
+
+ } else {
+
+ /* Remove any chunk that intersects. TODO this should only
remove the
+ * part of the chunks which intersects, so it works the same
way as
+ * trim */
+
+ if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
+ MC_(record_free_error)(tid, (Addr)mc->data);
+ VG_(free)(chunks);
+ if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
+ return;
+ }
+
+ die_and_free_mem ( tid, mc, mp->rzB );
+
+ }
+
+#undef EXTENT_CONTAINS
+
+ }
+ check_mempool_sane(mp);
+ VG_(free)(chunks);
+}
+
void MC_(move_mempool)(Addr poolA, Addr poolB)
{
MC_Mempool* mp;
Regards,
Varun Garg
|