|
From: <sv...@va...> - 2007-10-16 21:07:41
|
Author: sewardj
Date: 2007-10-16 22:07:43 +0100 (Tue, 16 Oct 2007)
New Revision: 7008
Log:
Various small changes aimed at improving usability:
* make VALGRIND_TC_CLEAN_MEMORY work, for the benefit of recycling
allocators
* handle pthread_mutex_trylock more correctly
* handle pthread_mutex_timedlock at all
* don't crash after reporting an error of destroying a freed lock
* a bunch more suppressions
Modified:
branches/THRCHECK/glibc-2.X-thrcheck.supp
branches/THRCHECK/thrcheck/tc_intercepts.c
branches/THRCHECK/thrcheck/tc_main.c
branches/THRCHECK/thrcheck/thrcheck.h
Modified: branches/THRCHECK/glibc-2.X-thrcheck.supp
===================================================================
--- branches/THRCHECK/glibc-2.X-thrcheck.supp 2007-10-16 20:59:56 UTC (rev 7007)
+++ branches/THRCHECK/glibc-2.X-thrcheck.supp 2007-10-16 21:07:43 UTC (rev 7008)
@@ -54,14 +54,22 @@
fun:pthread_join
fun:pthread_join
}
-#z{
-#z thrcheck-glibc2X-pthjoin-2
-#z Thrcheck:Race
-#z fun:__free_tcb
-#z fun:pthread_join
-#z fun:pthread_join
-#z}
-#z
+{
+ thrcheck-glibc2X-pthjoin-2
+ Thrcheck:Race
+ fun:__deallocate_stack
+ fun:pthread_join
+ fun:pthread_join
+}
+{
+ thrcheck-glibc2X-pthjoin-3
+ Thrcheck:Race
+ fun:free_stacks
+ fun:__deallocate_stack
+ fun:pthread_join
+ fun:pthread_join
+}
+
#z###--- IO_file ---###
#z{
#z thrcheck-glibc2X-IOfile-1
@@ -131,6 +139,15 @@
fun:pthread_mutex_lock
}
+###--- pthread_mutex_unlock ---###
+{
+ thrcheck-glibc2X-pthmxunlock-1
+ Thrcheck:Race
+ fun:__lll_mutex_unlock_wake
+ fun:pthread_mutex_unlock
+ fun:pthread_mutex_unlock
+}
+
###--- pthread_mutex_destroy ---###
{
thrcheck-glibc2X-pthmxdestroy-1
@@ -225,20 +242,19 @@
fun:pthread_mutex_trylock
}
-#z###--- pthread_cond_timedwait ---###
-#z{
-#z thrcheck-glibc2X-pthmxtimedwait-1
-#z Thrcheck:Race
-#z fun:pthread_cond_timedwait@@GLIBC_*
-#z fun:pthread_cond_timedwait*
-#z}
-#z{
-#z thrcheck-glibc2X-pthmxtimedwait-2
-#z Thrcheck:Race
-#z fun:__lll_mutex_lock_wait
-#z fun:pthread_cond_timedwait@@GLIBC_*
-#z fun:pthread_cond_timedwait*
-#z}
+###--- pthread_cond_timedwait ---###
+# ditto
+{
+ thrcheck-glibc2X-pthmxtimedwait-1
+ Thrcheck:Race
+ fun:pthread_cond_timedwait@@GLIBC_2.3.2
+}
+{
+ thrcheck-glibc2X-pthmxtimedwait-2
+ Thrcheck:Race
+ fun:__lll_mutex_unlock_wake
+ fun:pthread_cond_timedwait@@GLIBC_*
+}
###--- libpthread internal stuff ---###
{
@@ -251,8 +267,7 @@
thrcheck-glibc2X-libpthread-2
Thrcheck:Race
fun:__lll_mutex_unlock_wake
- fun:_L_mutex_unlock_*
- fun:__pthread_mutex_unlock_usercnt
+ fun:_L_*unlock_*
}
{
thrcheck-glibc2X-libpthread-3
@@ -261,14 +276,14 @@
fun:_L_mutex_lock_*
fun:start_thread
}
-#z{
-#z thrcheck-glibc2X-libpthread-4
-#z Thrcheck:Race
-#z fun:__lll_mutex_lock_wait
-#z fun:_L_mutex_lock_*
-#z fun:pthread_mutex_lock
-#z}
{
+ thrcheck-glibc2X-libpthread-4
+ Thrcheck:Race
+ fun:__lll_mutex_lock_wait
+ fun:_L_mutex_lock_*
+ fun:pthread_mutex_lock
+}
+{
thrcheck-glibc2X-libpthread-5
Thrcheck:Race
fun:mythread_wrapper
@@ -281,29 +296,29 @@
fun:start_thread
fun:*clone*
}
-#z{
-#z thrcheck-glibc2X-libpthread-7
-#z Thrcheck:Race
-#z fun:__deallocate_stack
-#z fun:__free_tcb
-#z fun:start_thread
-#z}
-#z{
-#z thrcheck-glibc2X-libpthread-8
-#z Thrcheck:Race
-#z fun:__deallocate_stack
-#z fun:pthread_join
-#z fun:pthread_join
-#z}
-#z
-#z###--- fork ---###
-#z{
-#z thrcheck-glibc2X-fork-1
-#z Thrcheck:Race
-#z fun:__reclaim_stacks
-#z fun:fork
-#z}
+{
+ thrcheck-glibc2X-libpthread-7
+ Thrcheck:Race
+ fun:__deallocate_stack
+ fun:__free_tcb
+ fun:start_thread
+}
+{
+ thrcheck-glibc2X-libpthread-8
+ Thrcheck:Race
+ fun:__deallocate_stack
+ fun:__free_tcb
+ fun:pthread_join
+}
+###--- fork ---###
+{
+ thrcheck-glibc2X-fork-1
+ Thrcheck:Race
+ fun:__reclaim_stacks
+ fun:fork
+}
+
###--- glibc-2.5 specific ---###
{
thrcheck-glibc25-ld25-64bit-1
Modified: branches/THRCHECK/thrcheck/tc_intercepts.c
===================================================================
--- branches/THRCHECK/thrcheck/tc_intercepts.c 2007-10-16 20:59:56 UTC (rev 7007)
+++ branches/THRCHECK/thrcheck/tc_intercepts.c 2007-10-16 21:07:43 UTC (rev 7008)
@@ -281,13 +281,14 @@
/*----------------------------------------------------------------*/
/* Handled: pthread_mutex_init pthread_mutex_destroy
- pthread_mutex_lock pthread_mutex_trylock
+ pthread_mutex_lock
+ pthread_mutex_trylock pthread_mutex_timedlock
pthread_mutex_unlock
- Unhandled: pthread_mutex_timedlock -- FIXME
-
- FIXME: pthread_spin_init pthread_spin_destroy
- pthread_spin_lock pthread_spin_unlock pthread_spin_trylock
+ Unhandled: pthread_spin_init pthread_spin_destroy
+ pthread_spin_lock
+ pthread_spin_trylock
+ pthread_spin_unlock
*/
// pthread_mutex_init
@@ -365,8 +366,8 @@
fprintf(stderr, "<< pthread_mxlock %p", mutex); fflush(stderr);
}
- DO_CREQ_v_W(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
- pthread_mutex_t*,mutex);
+ DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
+ pthread_mutex_t*,mutex, long,0/*!isTryLock*/);
CALL_FN_W_W(ret, fn, mutex);
@@ -389,8 +390,12 @@
}
-// pthread_mutex_trylock. AFAICS the handling needed here is identical
-// to that for pthread_mutex_lock.
+// pthread_mutex_trylock. The handling needed here is very similar
+// to that for pthread_mutex_lock, except that we need to tell
+// the pre-lock creq that this is a trylock-style operation, and
+// therefore not to complain if the lock is nonrecursive and
+// already locked by this thread -- because then it'll just fail
+// immediately with EBUSY.
PTH_FUNC(int, pthreadZumutexZutrylock, // pthread_mutex_trylock
pthread_mutex_t *mutex)
{
@@ -401,8 +406,8 @@
fprintf(stderr, "<< pthread_mxtrylock %p", mutex); fflush(stderr);
}
- DO_CREQ_v_W(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
- pthread_mutex_t*,mutex);
+ DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
+ pthread_mutex_t*,mutex, long,1/*isTryLock*/);
CALL_FN_W_W(ret, fn, mutex);
@@ -426,6 +431,44 @@
}
+// pthread_mutex_timedlock. Identical logic to pthread_mutex_trylock.
+PTH_FUNC(int, pthreadZumutexZutimedlock, // pthread_mutex_timedlock
+ pthread_mutex_t *mutex,
+ void* timeout)
+{
+ int ret;
+ OrigFn fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, "<< pthread_mxtimedlock %p %p", mutex, timeout);
+ fflush(stderr);
+ }
+
+ DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
+ pthread_mutex_t*,mutex, long,1/*isTryLock-ish*/);
+
+ CALL_FN_W_WW(ret, fn, mutex,timeout);
+
+ /* There's a hole here: libpthread now knows the lock is locked,
+ but the tool doesn't, so some other thread could run and detect
+ that the lock has been acquired by someone (this thread). Does
+ this matter? Not sure, but I don't think so. */
+
+ if (ret == 0 /*success*/) {
+ DO_CREQ_v_W(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_POST,
+ pthread_mutex_t*,mutex);
+ } else {
+ if (ret != ETIMEDOUT)
+ DO_PthAPIerror( "pthread_mutex_timedlock", ret );
+ }
+
+ if (TRACE_PTH_FNS) {
+ fprintf(stderr, " :: mxtimedlock -> %d >>\n", ret);
+ }
+ return ret;
+}
+
+
// pthread_mutex_unlock
PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
pthread_mutex_t *mutex)
@@ -547,8 +590,9 @@
DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_COND_WAIT_POST,
pthread_cond_t*,cond, pthread_mutex_t*,mutex);
- } else {
- DO_PthAPIerror( "pthread_cond_timedwait", ret );
+ } else {
+ if (ret != ETIMEDOUT)
+ DO_PthAPIerror( "pthread_cond_timedwait", ret );
}
if (TRACE_PTH_FNS) {
@@ -819,8 +863,8 @@
fprintf(stderr, "<< QMutex::lock %p", self); fflush(stderr);
}
- DO_CREQ_v_W(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
- void*, self);
+ DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
+ void*,self, long,0/*!isTryLock*/);
CALL_FN_v_W(fn, self);
@@ -869,8 +913,8 @@
fprintf(stderr, "<< QMutex::tryLock %p", self); fflush(stderr);
}
- DO_CREQ_v_W(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
- void*, self);
+ DO_CREQ_v_WW(_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE,
+ void*,self, long,1/*isTryLock*/);
CALL_FN_W_W(ret, fn, self);
Modified: branches/THRCHECK/thrcheck/tc_main.c
===================================================================
--- branches/THRCHECK/thrcheck/tc_main.c 2007-10-16 20:59:56 UTC (rev 7007)
+++ branches/THRCHECK/thrcheck/tc_main.c 2007-10-16 21:07:43 UTC (rev 7008)
@@ -2241,7 +2241,6 @@
static void record_error_UnlockUnlocked ( Thread*, Lock* );
static void record_error_UnlockForeign ( Thread*, Thread*, Lock* );
static void record_error_UnlockBogus ( Thread*, Addr );
-static void record_error_DestroyLocked ( Thread*, Lock* );
static void record_error_PthAPIerror ( Thread*, HChar*, Word, HChar* );
static void record_error_LockOrder ( Thread*, Lock*, Lock* );
@@ -4805,7 +4804,7 @@
tl_assert( lk->guestaddr == (Addr)mutex );
if (lk->heldBy) {
/* Basically act like we unlocked the lock */
- record_error_DestroyLocked( thr, lk );
+ record_error_Misc( thr, "pthread_mutex_destroy of a locked mutex" );
/* remove lock from locksets of all owning threads */
remove_Lock_from_locksets_of_all_owning_Threads( lk );
TC_(deleteBag)( lk->heldBy );
@@ -4820,7 +4819,8 @@
all__sanity_check("evh__tc_PTHREAD_MUTEX_DESTROY_PRE");
}
-static void evh__TC_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid, void* mutex )
+static void evh__TC_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
+ void* mutex, Word isTryLock )
{
/* Just check the mutex is sane; nothing else to do. */
// 'mutex' may be invalid - not checked by wrapper
@@ -4830,6 +4830,7 @@
VG_(printf)("evh__tc_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
(Int)tid, (void*)mutex );
+ tl_assert(isTryLock == 0 || isTryLock == 1);
thr = map_threads_maybe_lookup( tid );
tl_assert(thr); /* cannot fail - Thread* must already exist */
@@ -4841,12 +4842,15 @@
}
if ( lk
+ && isTryLock == 0
&& (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
&& lk->heldBy
&& lk->heldW
&& TC_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
- /* uh, it's a non-recursive lock and we already w-hold it. Duh.
- Deadlock coming up; but at least produce an error message. */
+ /* uh, it's a non-recursive lock and we already w-hold it, and
+ this is a real lock operation (not a speculative "tryLock"
+ kind of thing. Duh. Deadlock coming up; but at least
+ produce an error message. */
record_error_Misc( thr, "Attempt to re-lock a "
"non-recursive lock I already hold" );
}
@@ -5067,7 +5071,7 @@
tl_assert( lk->guestaddr == (Addr)rwl );
if (lk->heldBy) {
/* Basically act like we unlocked the lock */
- record_error_DestroyLocked( thr, lk );
+ record_error_Misc( thr, "pthread_rwlock_destroy of a locked mutex" );
/* remove lock from locksets of all owning threads */
remove_Lock_from_locksets_of_all_owning_Threads( lk );
TC_(deleteBag)( lk->heldBy );
@@ -5961,14 +5965,15 @@
/* --- --- User-visible client requests --- --- */
case VG_USERREQ__TC_CLEAN_MEMORY:
- if (1) VG_(printf)("VG_USERREQ__TC_CLEAN_MEMORY(%p,%d)\n",
+ if (0) VG_(printf)("VG_USERREQ__TC_CLEAN_MEMORY(%p,%d)\n",
args[1], args[2]);
/* Call die_mem to (expensively) tidy up properly, if there
are any held locks etc in the area */
- // FIXME: next line causes firefox to stall - no idea why
- // evh__die_mem(args[1], args[2]);
- /* and then set it to New */
- evh__new_mem(args[1], args[2]);
+ if (args[2] > 0) { /* length */
+ evh__die_mem(args[1], args[2]);
+ /* and then set it to New */
+ evh__new_mem(args[1], args[2]);
+ }
break;
/* --- --- Client requests for Thrcheck's use only --- --- */
@@ -6059,8 +6064,8 @@
evh__TC_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
break;
- case _VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*
- evh__TC_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1] );
+ case _VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
+ evh__TC_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
break;
case _VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
@@ -6214,7 +6219,6 @@
XE_UnlockUnlocked, // unlocking a not-locked lock
XE_UnlockForeign, // unlocking a lock held by some other thread
XE_UnlockBogus, // unlocking an address not known to be a lock
- XE_DestroyLocked, // pth_mx_destroy on locked lock
XE_PthAPIerror, // error from the POSIX pthreads API
XE_LockOrder, // lock order error
XE_Misc // misc other error (w/ string to describe it)
@@ -6253,10 +6257,6 @@
Addr lock_ga; /* purported address of the lock */
} UnlockBogus;
struct {
- Thread* thr; /* doing the unlocking */
- Lock* lock; /* lock (that is locked and now destroyed) */
- } DestroyLocked;
- struct {
Thread* thr;
HChar* fnname; /* persistent, in tool-arena */
Word err; /* pth error code */
@@ -6289,7 +6289,6 @@
XS_UnlockUnlocked,
XS_UnlockForeign,
XS_UnlockBogus,
- XS_DestroyLocked,
XS_PthAPIerror,
XS_LockOrder,
XS_Misc
@@ -6382,19 +6381,6 @@
XE_UnlockBogus, 0, NULL, &xe );
}
-static void record_error_DestroyLocked ( Thread* thr, Lock* lk ) {
- XError xe;
- tl_assert( is_sane_Thread(thr) );
- tl_assert( is_sane_LockN(lk) );
- init_XError(&xe);
- xe.tag = XE_DestroyLocked;
- xe.XE.DestroyLocked.thr = thr;
- xe.XE.DestroyLocked.lock = mk_LockP_from_LockN(lk);
- // FIXME: tid vs thr
- VG_(maybe_record_error)( map_threads_reverse_lookup_SLOW(thr),
- XE_DestroyLocked, 0, NULL, &xe );
-}
-
static
void record_error_LockOrder ( Thread* thr, Lock* before, Lock* after ) {
XError xe;
@@ -6470,18 +6456,15 @@
case XE_UnlockBogus:
return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
&& xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
- case XE_DestroyLocked:
- return xe1->XE.DestroyLocked.thr == xe2->XE.DestroyLocked.thr
- && xe1->XE.DestroyLocked.lock == xe2->XE.DestroyLocked.lock;
case XE_PthAPIerror:
return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
&& 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
xe2->XE.PthAPIerror.fnname)
&& xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
case XE_LockOrder:
- return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr
- && xe1->XE.LockOrder.before == xe2->XE.LockOrder.before
- && xe1->XE.LockOrder.after == xe2->XE.LockOrder.after;
+ return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
+ /* && xe1->XE.LockOrder.before == xe2->XE.LockOrder.before
+ && xe1->XE.LockOrder.after == xe2->XE.LockOrder.after; */
case XE_Misc:
return xe1->XE.Misc.thr == xe2->XE.Misc.thr
&& 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
@@ -6847,7 +6830,6 @@
case XE_UnlockUnlocked: return "UnlockUnlocked";
case XE_UnlockForeign: return "UnlockForeign";
case XE_UnlockBogus: return "UnlockBogus";
- case XE_DestroyLocked: return "DestroyLocked";
case XE_PthAPIerror: return "PthAPIerror";
case XE_LockOrder: return "LockOrder";
case XE_Misc: return "Misc";
@@ -6867,7 +6849,6 @@
TRY("UnlockUnlocked", XS_UnlockUnlocked);
TRY("UnlockForeign", XS_UnlockForeign);
TRY("UnlockBogus", XS_UnlockBogus);
- TRY("DestroyLocked", XS_DestroyLocked);
TRY("PthAPIerror", XS_PthAPIerror);
TRY("LockOrder", XS_LockOrder);
TRY("Misc", XS_Misc);
@@ -6891,7 +6872,6 @@
case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
- case XS_DestroyLocked: return VG_(get_error_kind)(err) == XE_DestroyLocked;
case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
Modified: branches/THRCHECK/thrcheck/thrcheck.h
===================================================================
--- branches/THRCHECK/thrcheck/thrcheck.h 2007-10-16 20:59:56 UTC (rev 7007)
+++ branches/THRCHECK/thrcheck/thrcheck.h 2007-10-16 21:07:43 UTC (rev 7008)
@@ -76,7 +76,7 @@
_VG_USERREQ__TC_PTHREAD_MUTEX_DESTROY_PRE, // pth_mx_t*
_VG_USERREQ__TC_PTHREAD_MUTEX_UNLOCK_PRE, // pth_mx_t*
_VG_USERREQ__TC_PTHREAD_MUTEX_UNLOCK_POST, // pth_mx_t*
- _VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE, // pth_mx_t*
+ _VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_PRE, // pth_mx_t*, long isTryLock
_VG_USERREQ__TC_PTHREAD_MUTEX_LOCK_POST, // pth_mx_t*
_VG_USERREQ__TC_PTHREAD_COND_SIGNAL_PRE, // pth_cond_t*
_VG_USERREQ__TC_PTHREAD_COND_BROADCAST_PRE, // pth_cond_t*
@@ -94,12 +94,12 @@
about the specified memory range, and resets it to New. This is
particularly useful for memory allocators that wish to recycle
memory. */
-#define VALGRIND_TC_CLEAN_MEMORY(_qzz_start, _qzz_len) \
- do { \
- unsigned long _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, VG_USERREQ__TC_CLEAN_MEMORY, \
- _qzz_start, _qzz_len, 0, 0); \
- (void)0; \
+#define VALGRIND_TC_CLEAN_MEMORY(_qzz_start, _qzz_len) \
+ do { \
+ unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__TC_CLEAN_MEMORY, \
+ _qzz_start, _qzz_len, 0, 0, 0); \
+ (void)0; \
} while(0)
#endif /* __THRCHECK_H */
|