|
From: <sv...@va...> - 2009-05-28 19:17:59
|
Author: bart
Date: 2009-05-28 20:17:52 +0100 (Thu, 28 May 2009)
New Revision: 10163
Log:
- Segments now contain a struct bitmap instead of a pointer to a struct
bitmap.
- Inlined DRD_(sg_get_refcnt)().
- Optimized DRD_(thread_update_conflict_set)(). Added DRD_(bm_mark)(),
DRD_(bm_unmark)(), DRD_(bm_is_marked)(), DRD_(bm_clear_marked)() and
DRD(bm_merge2_marked)().
- Cleanup.
Modified:
branches/DRDDEV/drd/drd_bitmap.c
branches/DRDDEV/drd/drd_bitmap.h
branches/DRDDEV/drd/drd_segment.c
branches/DRDDEV/drd/drd_segment.h
branches/DRDDEV/drd/drd_thread.c
branches/DRDDEV/drd/drd_thread_bitmap.h
branches/DRDDEV/drd/pub_drd_bitmap.h
Modified: branches/DRDDEV/drd/drd_bitmap.c
===================================================================
--- branches/DRDDEV/drd/drd_bitmap.c 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_bitmap.c 2009-05-28 19:17:52 UTC (rev 10163)
@@ -55,7 +55,6 @@
struct bitmap* DRD_(bm_new)()
{
- unsigned i;
struct bitmap* bm;
/* If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD */
@@ -63,7 +62,27 @@
tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
bm = VG_(malloc)("drd.bitmap.bn.1", sizeof(*bm));
+ DRD_(bm_init)(bm);
+
+ s_bitmap_creation_count++;
+
+ return bm;
+}
+
+void DRD_(bm_delete)(struct bitmap* const bm)
+{
tl_assert(bm);
+
+ DRD_(bm_cleanup)(bm);
+ VG_(free)(bm);
+}
+
+/** Initialize *bm. */
+void DRD_(bm_init)(struct bitmap* const bm)
+{
+ unsigned i;
+
+ tl_assert(bm);
/* Cache initialization. a1 is initialized with a value that never can
* match any valid address: the upper (ADDR_LSB_BITS + ADDR_IGNORED_BITS)
* bits of a1 are always zero for a valid cache entry.
@@ -75,18 +94,12 @@
}
bm->oset = VG_(OSetGen_Create)(0, 0, DRD_(bm2_alloc_node),
"drd.bitmap.bn.2", DRD_(bm2_free_node));
-
- s_bitmap_creation_count++;
-
- return bm;
}
-void DRD_(bm_delete)(struct bitmap* const bm)
+/** Free the memory allocated by DRD_(bm_init)(). */
+void DRD_(bm_cleanup)(struct bitmap* const bm)
{
- tl_assert(bm);
-
VG_(OSetGen_Destroy)(bm->oset);
- VG_(free)(bm);
}
/**
@@ -465,6 +478,10 @@
for (b0 = address_lsb(b_start); b0 <= address_lsb(b_end - 1); b0++)
{
+ /*
+ * Note: the statement below uses a binary or instead of a logical
+ * or on purpose.
+ */
if (bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0))
{
return True;
@@ -962,14 +979,15 @@
}
/** Merge bitmaps *lhs and *rhs into *lhs. */
-void DRD_(bm_merge2)(struct bitmap* const lhs,
- struct bitmap* const rhs)
+void DRD_(bm_merge2)(struct bitmap* const lhs, struct bitmap* const rhs)
{
struct bitmap2* bm2l;
struct bitmap2* bm2r;
- /* It's not possible to have two independent iterators over the same OSet, */
- /* so complain if lhs == rhs. */
+ /*
+ * It's not possible to have two independent iterators over the same OSet,
+ * so complain if lhs == rhs.
+ */
tl_assert(lhs != rhs);
s_bitmap_merge_count++;
@@ -991,7 +1009,100 @@
}
}
+/** Clear bitmap2::recalc. */
+void DRD_(bm_unmark)(struct bitmap* bm)
+{
+ struct bitmap2* bm2;
+
+ for (VG_(OSetGen_ResetIter)(bm->oset);
+ (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0;
+ )
+ {
+ bm2->recalc = False;
+ }
+}
+
/**
+ * Report whether bitmap2::recalc has been set for the second level bitmap
+ * corresponding to address a.
+ */
+Bool DRD_(bm_is_marked)(struct bitmap* bm, const Addr a)
+{
+ const struct bitmap2* bm2;
+
+ bm2 = bm2_lookup(bm, a);
+ return bm2 && bm2->recalc;
+}
+
+/**
+ * Set bitmap2::recalc in bml for each second level bitmap in bmr that contains
+ * at least one access.
+ *
+ * @note Any new second-level bitmaps inserted in bml by this function are
+ * uninitialized.
+ */
+void DRD_(bm_mark)(struct bitmap* bml, struct bitmap* bmr)
+{
+ struct bitmap2* bm2l;
+ struct bitmap2* bm2r;
+
+ for (VG_(OSetGen_ResetIter)(bmr->oset);
+ (bm2r = VG_(OSetGen_Next)(bmr->oset)) != 0;
+ )
+ {
+ /*if (DRD_(bm_has_any_access(bmr, make_address(bm2r->addr, 0),
+ make_address(bm2r->addr + 1, 0))))*/
+ {
+ bm2l = bm2_lookup_or_insert(bml, bm2r->addr);
+ bm2l->recalc = True;
+ }
+ }
+}
+
+/** Clear all second-level bitmaps for which bitmap2::recalc == True. */
+void DRD_(bm_clear_marked)(struct bitmap* bm)
+{
+ struct bitmap2* bm2;
+
+ for (VG_(OSetGen_ResetIter)(bm->oset);
+ (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0;
+ )
+ {
+ if (bm2->recalc)
+ bm2_clear(bm2);
+ }
+}
+
+/** Merge the second level bitmaps from *rhs into *lhs for which recalc == True. */
+void DRD_(bm_merge2_marked)(struct bitmap* const lhs, struct bitmap* const rhs)
+{
+ struct bitmap2* bm2l;
+ struct bitmap2* bm2r;
+
+ tl_assert(lhs != rhs);
+
+ /*
+ * It's not possible to have two independent iterators over the same OSet,
+ * so complain if lhs == rhs.
+ */
+ tl_assert(lhs != rhs);
+
+ s_bitmap_merge_count++;
+
+ VG_(OSetGen_ResetIter)(rhs->oset);
+
+ for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
+ {
+ bm2l = VG_(OSetGen_Lookup)(lhs->oset, &bm2r->addr);
+ if (bm2l && bm2l->recalc)
+ {
+ tl_assert(bm2l != bm2r);
+ bm2_merge(bm2l, bm2r);
+ }
+ }
+}
+
+/**
* Report whether there are any RW / WR / WW patterns in lhs and rhs.
* @param lhs First bitmap.
* @param rhs Bitmap to be compared with lhs.
Modified: branches/DRDDEV/drd/drd_bitmap.h
===================================================================
--- branches/DRDDEV/drd/drd_bitmap.h 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_bitmap.h 2009-05-28 19:17:52 UTC (rev 10163)
@@ -329,25 +329,11 @@
struct bitmap2
{
Addr addr; ///< address_msb(...)
+ Bool recalc;
struct bitmap1 bm1;
};
-struct bm_cache_elem
-{
- Addr a1;
- struct bitmap2* bm2;
-};
-#define N_CACHE_ELEM 4
-
-/* Complete bitmap. */
-struct bitmap
-{
- struct bm_cache_elem cache[N_CACHE_ELEM];
- OSet* oset;
-};
-
-
static void bm2_clear(struct bitmap2* const bm2);
static __inline__
struct bitmap2* bm2_insert(struct bitmap* const bm, const UWord a1);
@@ -555,6 +541,8 @@
*
* @param bm bitmap pointer.
* @param a1 client address shifted right by ADDR_LSB_BITS.
+ *
+ * @note bitmap2::recalc isn't initialized here on purpose.
*/
static __inline__
struct bitmap2* bm2_insert(struct bitmap* const bm, const UWord a1)
@@ -591,8 +579,8 @@
* Look up the address a1 in bitmap bm, and insert it if not found.
* The returned second level bitmap may not be modified.
*
- * @param a1 client address shifted right by ADDR_LSB_BITS.
* @param bm bitmap pointer.
+ * @param a1 client address shifted right by ADDR_LSB_BITS.
*/
static __inline__
struct bitmap2* bm2_lookup_or_insert(struct bitmap* const bm, const UWord a1)
Modified: branches/DRDDEV/drd/drd_segment.c
===================================================================
--- branches/DRDDEV/drd/drd_segment.c 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_segment.c 2009-05-28 19:17:52 UTC (rev 10163)
@@ -85,7 +85,7 @@
else
DRD_(vc_init)(&sg->vc, 0, 0);
DRD_(vc_increment)(&sg->vc, created);
- sg->bm = DRD_(bm_new)();
+ DRD_(bm_init)(&sg->bm);
if (s_trace_segment)
{
@@ -109,8 +109,7 @@
tl_assert(sg->refcnt == 0);
DRD_(vc_cleanup)(&sg->vc);
- DRD_(bm_delete)(sg->bm);
- sg->bm = 0;
+ DRD_(bm_cleanup)(&sg->bm);
}
/** Allocate and initialize a new segment. */
@@ -150,14 +149,6 @@
VG_(free)(sg);
}
-/** Query the reference count of the specified segment. */
-int DRD_(sg_get_refcnt)(const Segment* const sg)
-{
- tl_assert(sg);
-
- return sg->refcnt;
-}
-
/** Increment the reference count of the specified segment. */
Segment* DRD_(sg_get)(Segment* const sg)
{
@@ -196,7 +187,7 @@
}
/** Merge sg1 and sg2 into sg1. */
-void DRD_(sg_merge)(const Segment* const sg1, Segment* const sg2)
+void DRD_(sg_merge)(Segment* const sg1, Segment* const sg2)
{
tl_assert(sg1);
tl_assert(sg1->refcnt == 1);
@@ -222,17 +213,17 @@
// Keep sg1->stacktrace.
// Keep sg1->vc.
// Merge sg2->bm into sg1->bm.
- DRD_(bm_merge2)(sg1->bm, sg2->bm);
+ DRD_(bm_merge2)(&sg1->bm, &sg2->bm);
}
/** Print the vector clock and the bitmap of the specified segment. */
-void DRD_(sg_print)(const Segment* const sg)
+void DRD_(sg_print)(Segment* const sg)
{
tl_assert(sg);
VG_(printf)("vc: ");
DRD_(vc_print)(&sg->vc);
VG_(printf)("\n");
- DRD_(bm_print)(sg->bm);
+ DRD_(bm_print)(&sg->bm);
}
/** Query whether segment tracing has been enabled. */
Modified: branches/DRDDEV/drd/drd_segment.h
===================================================================
--- branches/DRDDEV/drd/drd_segment.h 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_segment.h 2009-05-28 19:17:52 UTC (rev 10163)
@@ -56,16 +56,17 @@
* Bitmap representing the memory accesses by the instructions associated
* with the segment.
*/
- struct bitmap* bm;
+ struct bitmap bm;
} Segment;
Segment* DRD_(sg_new)(const DrdThreadId creator, const DrdThreadId created);
-int DRD_(sg_get_refcnt)(const Segment* const sg);
+static int DRD_(sg_get_refcnt)(const Segment* const sg);
Segment* DRD_(sg_get)(Segment* const sg);
void DRD_(sg_put)(Segment* const sg);
-void DRD_(sg_merge)(const Segment* const sg1, Segment* const sg2);
-void DRD_(sg_print)(const Segment* const sg);
+static struct bitmap* DRD_(sg_bm)(Segment* const sg);
+void DRD_(sg_merge)(Segment* const sg1, Segment* const sg2);
+void DRD_(sg_print)(Segment* const sg);
Bool DRD_(sg_get_trace)(void);
void DRD_(sg_set_trace)(const Bool trace_segment);
ULong DRD_(sg_get_segments_created_count)(void);
@@ -74,4 +75,26 @@
ULong DRD_(sg_get_segment_merge_count)(void);
+/** Query the reference count of the specified segment. */
+static __inline__ int DRD_(sg_get_refcnt)(const Segment* const sg)
+{
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
+ tl_assert(sg);
+#endif
+
+ return sg->refcnt;
+}
+
+/** Return the pointer to the bitmap of the segment. */
+static __inline__ struct bitmap* DRD_(sg_bm)(Segment* const sg)
+{
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
+ tl_assert(sg);
+#endif
+
+ return &sg->bm;
+}
+
+
+
#endif // __SEGMENT_H
Modified: branches/DRDDEV/drd/drd_thread.c
===================================================================
--- branches/DRDDEV/drd/drd_thread.c 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_thread.c 2009-05-28 19:17:52 UTC (rev 10163)
@@ -210,12 +210,13 @@
{
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
+
return (DRD_(g_threadinfo)[tid].vg_thread_exists
? DRD_(g_threadinfo)[tid].vg_threadid
: VG_INVALID_THREADID);
}
-#if 0
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
/**
* Sanity check of the doubly linked list of segments referenced by a
* ThreadInfo struct.
@@ -224,6 +225,7 @@
static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
{
Segment* p;
+
for (p = ti->first; p; p = p->next) {
if (p->next && p->next->prev != p)
return False;
@@ -595,7 +597,11 @@
{
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
- // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
+ tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+#endif
+
sg->prev = DRD_(g_threadinfo)[tid].last;
sg->next = 0;
if (DRD_(g_threadinfo)[tid].last)
@@ -603,7 +609,10 @@
DRD_(g_threadinfo)[tid].last = sg;
if (DRD_(g_threadinfo)[tid].first == 0)
DRD_(g_threadinfo)[tid].first = sg;
- // tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
+ tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
+#endif
}
/**
@@ -615,7 +624,8 @@
{
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
-#if 0
+
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
#endif
@@ -629,7 +639,7 @@
DRD_(g_threadinfo)[tid].last = sg->prev;
DRD_(sg_put)(sg);
-#if 0
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
#endif
}
@@ -673,8 +683,7 @@
Segment* latest_sg;
first = True;
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
latest_sg = DRD_(g_threadinfo)[i].last;
if (latest_sg)
@@ -700,8 +709,7 @@
Segment* latest_sg;
first = True;
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
latest_sg = DRD_(g_threadinfo)[i].last;
if (latest_sg)
@@ -748,8 +756,7 @@
DRD_(vc_cleanup)(&thread_vc_max);
}
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
Segment* sg;
Segment* sg_next;
@@ -773,8 +780,7 @@
* all segments for which the reference count is strictly greater than one.
* The code below is an optimized version of the following:
*
- * for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- * i++)
+ * for (i = 0; i < DRD_N_THREADS; i++)
* {
* Segment* sg;
*
@@ -804,8 +810,7 @@
tl_assert(sg1->next == sg2);
tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
Segment* sg;
@@ -858,12 +863,11 @@
s_new_segments_since_last_merge = 0;
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
Segment* sg;
-#if 0
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
#endif
@@ -881,66 +885,13 @@
}
}
-#if 0
+#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
#endif
}
}
/**
- * Every change in the vector clock of a thread may cause segments that
- * were previously ordered to this thread to become unordered. Hence,
- * it may be necessary to recalculate the conflict set if the vector clock
- * of the current thread is updated. This function checks whether such a
- * recalculation is necessary.
- *
- * @param tid Thread ID of the thread to which a new segment has been
- * appended.
- * @param new_sg Pointer to the most recent segment of thread tid.
- */
-static Bool conflict_set_update_needed(const DrdThreadId tid,
- const VectorClock* const old_vc,
- const VectorClock* const new_vc)
-{
- unsigned j;
-
- tl_assert(old_vc);
- tl_assert(new_vc);
-
- /*
- * If a new segment has been added to another thread than the running
- * thread, tell the caller to update the conflict set.
- */
- if (tid != DRD_(g_drd_running_tid))
- return True;
-
- tl_assert(tid == DRD_(g_drd_running_tid));
-
- for (j = 0; j < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- j++)
- {
- Segment* q;
-
- if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
- continue;
-
- for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
- {
- const int included_in_old_conflict_set
- = ! DRD_(vc_lte)(&q->vc, old_vc)
- && ! DRD_(vc_lte)(old_vc, &q->vc);
- const int included_in_new_conflict_set
- = ! DRD_(vc_lte)(&q->vc, new_vc)
- && ! DRD_(vc_lte)(new_vc, &q->vc);
- if (included_in_old_conflict_set != included_in_new_conflict_set)
- return True;
- }
- }
-
- return False;
-}
-
-/**
* Create a new segment for the specified thread, and discard any segments
* that cannot cause races anymore.
*/
@@ -962,11 +913,10 @@
* tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
*/
- thread_discard_ordered_segments();
-
if (s_segment_merging
&& ++s_new_segments_since_last_merge >= s_segment_merge_interval)
{
+ thread_discard_ordered_segments();
thread_merge_segments();
}
}
@@ -1035,8 +985,7 @@
/* For all threads, mark the range [ a1, a2 [ as no longer in use. */
other_user = DRD_INVALID_THREADID;
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
Segment* p;
for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
@@ -1044,13 +993,13 @@
if (other_user == DRD_INVALID_THREADID
&& i != DRD_(g_drd_running_tid))
{
- if (UNLIKELY(DRD_(bm_test_and_clear)(p->bm, a1, a2)))
+ if (UNLIKELY(DRD_(bm_test_and_clear)(DRD_(sg_bm)(p), a1, a2)))
{
other_user = i;
}
continue;
}
- DRD_(bm_clear)(p->bm, a1, a2);
+ DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
}
}
@@ -1094,8 +1043,7 @@
unsigned i;
Segment* p;
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
if (DRD_(g_threadinfo)[i].first)
{
@@ -1157,8 +1105,7 @@
&& tid != DRD_INVALID_THREADID);
tl_assert(p);
- for (i = 0; i < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- i++)
+ for (i = 0; i < DRD_N_THREADS; i++)
{
if (i != tid)
{
@@ -1175,7 +1122,7 @@
break;
if (! DRD_(vc_lte)(&p->vc, &q->vc))
{
- if (DRD_(bm_has_conflict_with)(q->bm, addr, addr + size,
+ if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
access_type))
{
tl_assert(q->stacktrace);
@@ -1203,7 +1150,7 @@
for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
{
- if (DRD_(bm_has)(p->bm, addr, addr + size, access_type))
+ if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
{
thread_report_conflicting_segments_segment(tid, addr, size,
access_type, p);
@@ -1298,13 +1245,11 @@
VG_(message)(Vg_UserMsg, "%s", msg);
}
- for (j = 0;
- j < sizeof(DRD_(g_threadinfo)) / sizeof(DRD_(g_threadinfo)[0]);
- j++)
+ for (j = 0; j < DRD_N_THREADS; j++)
{
if (j != tid && DRD_(IsValidDrdThreadId)(j))
{
- const Segment* q;
+ Segment* q;
for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
{
if (! DRD_(vc_lte)(&q->vc, &p->vc)
@@ -1320,7 +1265,7 @@
&q->vc);
VG_(message)(Vg_UserMsg, "%s", msg);
}
- DRD_(bm_merge2)(*conflict_set, q->bm);
+ DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
}
else
{
@@ -1362,20 +1307,64 @@
const VectorClock* const old_vc)
{
const VectorClock* new_vc;
+ Segment* p;
+ unsigned j;
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
tl_assert(old_vc);
+ tl_assert(tid == DRD_(g_drd_running_tid));
+ tl_assert(DRD_(g_conflict_set));
-
new_vc = &DRD_(g_threadinfo)[tid].last->vc;
- if (conflict_set_update_needed(tid, old_vc, new_vc))
+ DRD_(bm_unmark)(DRD_(g_conflict_set));
+
+ for (j = 0; j < DRD_N_THREADS; j++)
{
- thread_compute_conflict_set(&DRD_(g_conflict_set), tid);
- s_conflict_set_combine_vc_count++;
+ Segment* q;
+
+ if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
+ continue;
+
+ for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
+ {
+ const int included_in_old_conflict_set
+ = ! DRD_(vc_lte)(&q->vc, old_vc)
+ && ! DRD_(vc_lte)(old_vc, &q->vc);
+ const int included_in_new_conflict_set
+ = ! DRD_(vc_lte)(&q->vc, new_vc)
+ && ! DRD_(vc_lte)(new_vc, &q->vc);
+ if (included_in_old_conflict_set != included_in_new_conflict_set)
+ {
+ DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
+ }
+ }
}
+ DRD_(bm_clear_marked)(DRD_(g_conflict_set));
+
+ p = DRD_(g_threadinfo)[tid].last;
+ {
+ for (j = 0; j < DRD_N_THREADS; j++)
+ {
+ if (j != tid && DRD_(IsValidDrdThreadId)(j))
+ {
+ Segment* q;
+ for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
+ {
+ if (! DRD_(vc_lte)(&q->vc, &p->vc)
+ && ! DRD_(vc_lte)(&p->vc, &q->vc))
+ {
+ DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
+ }
+ }
+ }
+ }
+ }
+
+ s_conflict_set_combine_vc_count++;
+
tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
}
Modified: branches/DRDDEV/drd/drd_thread_bitmap.h
===================================================================
--- branches/DRDDEV/drd/drd_thread_bitmap.h 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/drd_thread_bitmap.h 2009-05-28 19:17:52 UTC (rev 10163)
@@ -35,7 +35,7 @@
static __inline__
Bool bm_access_load_1_triggers_conflict(const Addr a1)
{
- DRD_(bm_access_load_1)(DRD_(running_thread_get_segment)()->bm, a1);
+ DRD_(bm_access_load_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1);
return DRD_(bm_load_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1);
}
@@ -45,13 +45,13 @@
{
if ((a1 & 1) == 0)
{
- bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 2);
+ bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2);
return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 2);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 2, eLoad);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 2, eLoad);
@@ -63,13 +63,13 @@
{
if ((a1 & 3) == 0)
{
- bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 4);
+ bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4);
return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 4);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 4, eLoad);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 4, eLoad);
@@ -81,20 +81,20 @@
{
if ((a1 & 7) == 0)
{
- bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1, 8);
+ bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8);
return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 8);
}
else if ((a1 & 3) == 0)
{
- bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 0, 4);
- bm_access_aligned_load(DRD_(running_thread_get_segment)()->bm, a1 + 4, 4);
+ bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 0, 4);
+ bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 4, 4);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 8, eLoad);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 8, eLoad);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 8, eLoad);
@@ -104,7 +104,7 @@
static __inline__
Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
{
- DRD_(bm_access_range_load)(DRD_(running_thread_get_segment)()->bm, a1, a2);
+ DRD_(bm_access_range_load)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2);
return DRD_(bm_load_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a2);
}
@@ -112,7 +112,7 @@
static __inline__
Bool bm_access_store_1_triggers_conflict(const Addr a1)
{
- DRD_(bm_access_store_1)(DRD_(running_thread_get_segment)()->bm, a1);
+ DRD_(bm_access_store_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1);
return DRD_(bm_store_1_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1);
}
@@ -122,13 +122,13 @@
{
if ((a1 & 1) == 0)
{
- bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 2);
+ bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2);
return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 2);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 2, eStore);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 2, eStore);
@@ -140,13 +140,13 @@
{
if ((a1 & 3) == 0)
{
- bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 4);
+ bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4);
return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 4);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 4, eStore);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 4, eStore);
@@ -158,22 +158,22 @@
{
if ((a1 & 7) == 0)
{
- bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm, a1, 8);
+ bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8);
return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(),
a1, 8);
}
else if ((a1 & 3) == 0)
{
- bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm,
+ bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1 + 0, 4);
- bm_access_aligned_store(DRD_(running_thread_get_segment)()->bm,
+ bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1 + 4, 4);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 8, eStore);
}
else
{
- DRD_(bm_access_range)(DRD_(running_thread_get_segment)()->bm,
+ DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()),
a1, a1 + 8, eStore);
return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a1 + 8, eStore);
@@ -183,7 +183,7 @@
static __inline__
Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
{
- DRD_(bm_access_range_store)(DRD_(running_thread_get_segment)()->bm, a1, a2);
+ DRD_(bm_access_range_store)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2);
return DRD_(bm_store_has_conflict_with)(DRD_(thread_get_conflict_set)(),
a1, a2);
}
Modified: branches/DRDDEV/drd/pub_drd_bitmap.h
===================================================================
--- branches/DRDDEV/drd/pub_drd_bitmap.h 2009-05-28 17:39:56 UTC (rev 10162)
+++ branches/DRDDEV/drd/pub_drd_bitmap.h 2009-05-28 19:17:52 UTC (rev 10163)
@@ -57,11 +57,28 @@
typedef enum { eLoad, eStore, eStart, eEnd } BmAccessTypeT;
+struct bm_cache_elem
+{
+ Addr a1;
+ struct bitmap2* bm2;
+};
+#define N_CACHE_ELEM 4
+
+/* Complete bitmap. */
+struct bitmap
+{
+ struct bm_cache_elem cache[N_CACHE_ELEM];
+ struct _OSet* oset;
+};
+
+
/* Function declarations. */
struct bitmap* DRD_(bm_new)(void);
void DRD_(bm_delete)(struct bitmap* const bm);
+void DRD_(bm_init)(struct bitmap* const bm);
+void DRD_(bm_cleanup)(struct bitmap* const bm);
void DRD_(bm_access_range)(struct bitmap* const bm,
const Addr a1, const Addr a2,
const BmAccessTypeT access_type);
@@ -113,8 +130,12 @@
const Addr a1, const Addr a2);
Bool DRD_(bm_equal)(struct bitmap* const lhs, struct bitmap* const rhs);
void DRD_(bm_swap)(struct bitmap* const bm1, struct bitmap* const bm2);
-void DRD_(bm_merge2)(struct bitmap* const lhs,
- struct bitmap* const rhs);
+void DRD_(bm_merge2)(struct bitmap* const lhs, struct bitmap* const rhs);
+void DRD_(bm_unmark)(struct bitmap* bm);
+Bool DRD_(bm_is_marked)(struct bitmap* bm, const Addr a);
+void DRD_(bm_mark)(struct bitmap* bm1, struct bitmap* bm2);
+void DRD_(bm_clear_marked)(struct bitmap* bm);
+void DRD_(bm_merge2_marked)(struct bitmap* const lhs, struct bitmap* const rhs);
int DRD_(bm_has_races)(struct bitmap* const bm1,
struct bitmap* const bm2);
void DRD_(bm_report_races)(ThreadId const tid1, ThreadId const tid2,
|