|
From: Nicholas N. <nj...@ca...> - 2004-02-28 15:43:57
|
CVS commit by nethercote:
Some newer systems have a new version of pthreadtypes.h that comes from glibc
2.3.3, which is binary-compatible, but not source compatible with the old one,
which came from LinuxThreads. We were using the types defined in the old one,
which caused compilation errors on systems using the new one.
This commit introduces our own versions of these types. Our versions are laid
out identically to the LinuxThreads ones, but the field names are different.
We convert all pthread types to our versions before using them, so we don't
rely on the pthreadtypes.h types any more. Hopefully this will fix the
problem; I have three reports that it does. Let's see...
M +37 -1 vg_include.h 1.183
M +221 -77 vg_libpthread.c 1.145
M +80 -108 vg_scheduler.c 1.142
--- valgrind/coregrind/vg_include.h #1.182:1.183
@@ -645,4 +645,39 @@ extern Addr VG_(do_useseg) ( UInt seg_se
/* ---------------------------------------------------------------------
+ Exports of vg_libpthread.c
+ ------------------------------------------------------------------ */
+
+/* Replacements for pthread types, shared between vg_libpthread.c and
+ vg_scheduler.c. See comment in vg_libpthread.c above the other
+ vg_pthread_*_t types for a description of how these are used. */
+
+struct _vg_pthread_fastlock
+{
+ long int __vg_status; /* "Free" or "taken" or head of waiting list */
+ int __vg_spinlock; /* Used by compare_and_swap emulation. Also,
+ adaptive SMP lock stores spin count here. */
+};
+
+typedef struct
+{
+ int __vg_m_reserved; /* Reserved for future use */
+ int __vg_m_count; /* Depth of recursive locking */
+ /*_pthread_descr*/ void* __vg_m_owner; /* Owner thread (if recursive or errcheck) */
+ int __vg_m_kind; /* Mutex kind: fast, recursive or errcheck */
+ struct _vg_pthread_fastlock __vg_m_lock; /* Underlying fast lock */
+} vg_pthread_mutex_t;
+
+typedef struct
+{
+ struct _vg_pthread_fastlock __vg_c_lock; /* Protect against concurrent access */
+ /*_pthread_descr*/ void* __vg_c_waiting; /* Threads waiting on this condition */
+ // Padding ensures the size is 48 bytes
+ char __vg_padding[48 - sizeof(struct _vg_pthread_fastlock)
+ - sizeof(void*) - sizeof(long long)];
+ long long __vg_align;
+} vg_pthread_cond_t;
+
+
+/* ---------------------------------------------------------------------
Exports of vg_scheduler.c
------------------------------------------------------------------ */
@@ -703,5 +738,5 @@ typedef
the condition variable indicated by the .associated_cv field.
In all other cases, should be NULL. */
- void* /*pthread_mutex_t* */ associated_mx;
+ vg_pthread_mutex_t* associated_mx;
/* When .status == WaitCV, points to the condition variable I am
--- valgrind/coregrind/vg_libpthread.c #1.144:1.145
@@ -83,4 +83,86 @@
/* ---------------------------------------------------------------------
+ Our own definition of types that vary between LinuxThreads and NPTL.
+ ------------------------------------------------------------------ */
+
+/* Moving from LinuxThreads to NPTL, several crucial types (eg.
+ pthread_mutex_t, pthread_mutexattr_t, etc) in pthreadtypes.h were changed
+ in binary-compatible, but source-incompatible, ways. We can similarly
+ use any layout we want, so long as it's binary-compatible. However, we
+ can no longer use the LinuxThreads types, because they won't work on NPTL
+ systems. Thus, we have to introduce a layer of indirection, and define
+ our own versions of these types (vg_pthread_mutex_t, etc). NPTL does
+ pretty much the same thing, and it keeps many of its internal types
+ secret.
+
+ We can layout our types however we want, as long as we put the small
+ number of fields in the right place for binary compatibility (eg.
+ mutex->kind). To make life easy, our versions have the exact same layout
+ as the LinuxThreads ones; only the type names and field names are
+ different (they differ only by include "vg" at the start).
+
+ In our implementation of the pthread operations (pthread_mutex_lock(),
+ pthread_mutexattr_settype(), etc) we always cast the standard pthread
+ types to our own types, (eg. pthread_mutex_t --> vg_pthread_mutex_t),
+ before working with them.
+
+ Note that we have various mutexes (and condvars) in this file that have the
+ type pthread_mutex_t (and pthread_cond_t). That is fine, because they
+ are always only handled by calling the standard pthread functions (eg.
+ pthread_mutex_lock()) on them. Phew.
+
+ WARNING: as a result of all this, we should *never* access these standard
+ pthread types as is; they *must* be converted to the vg_pthread_foo_t
+ equivalent. It would be nice if this was enforced... (but compilation
+ on NPTL-only systems should fail if this rule isn't followed...?)
+*/
+
+#include <sched.h> // for 'struct __sched_param'
+
+typedef struct __vg_pthread_attr_s
+{
+ int __vg_detachstate;
+ int __vg_schedpolicy;
+ struct __sched_param __vg_schedparam;
+ int __vg_inheritsched;
+ int __vg_scope;
+ size_t __vg_guardsize;
+ int __vg_stackaddr_set;
+ void *__vg_stackaddr;
+ size_t __vg_stacksize;
+} vg_pthread_attr_t;
+
+typedef struct
+{
+ int __vg_mutexkind;
+} vg_pthread_mutexattr_t;
+
+typedef struct _vg_pthread_rwlock_t
+{
+ struct _vg_pthread_fastlock __vg_rw_lock; /* Lock to guarantee mutual exclusion */
+ int __vg_rw_readers; /* Number of readers */
+ /*_pthread_descr*/ void* __vg_rw_writer; /* Identity of writer, or NULL if none */
+ /*_pthread_descr*/ void* __vg_rw_read_waiting; /* Threads waiting for reading */
+ /*_pthread_descr*/ void* __vg_rw_write_waiting; /* Threads waiting for writing */
+ int __vg_rw_kind; /* Reader/Writer preference selection */
+ int __vg_rw_pshared; /* Shared between processes or not */
+} vg_pthread_rwlock_t;
+
+typedef struct
+{
+ int __vg_lockkind;
+ int __vg_pshared;
+} vg_pthread_rwlockattr_t;
+
+/* Converting pthread types to vg_pthread types. We always check that the
+ passed-in type is as big as ours, for safety. We also zero the pointer
+ to the original struct, to ensure we don't accidentally use it again. */
+
+#define CONVERT(foo, x, vg_x) \
+ my_assert(sizeof(*x) >= sizeof(vg_pthread_##foo##_t)); \
+ vg_x = (vg_pthread_##foo##_t*)x; \
+ x = 0; // ensure we don't accidentally use x again!
+
+/* ---------------------------------------------------------------------
Forwardses.
------------------------------------------------------------------ */
@@ -302,13 +384,20 @@ pthread_t pthread_self(void)
int pthread_attr_init(pthread_attr_t *attr)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
/* Just initialise the fields which we might look at. */
- attr->__detachstate = PTHREAD_CREATE_JOINABLE;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
/* Linuxthreads sets this field to the value __getpagesize(), so I
guess the following is OK. */
- attr->__guardsize = VKI_BYTES_PER_PAGE; return 0;
+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+ return 0;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
if (detachstate != PTHREAD_CREATE_JOINABLE
&& detachstate != PTHREAD_CREATE_DETACHED) {
@@ -317,5 +406,5 @@ int pthread_attr_setdetachstate(pthread_
return EINVAL;
}
- attr->__detachstate = detachstate;
+ vg_attr->__vg_detachstate = detachstate;
return 0;
}
@@ -323,5 +412,8 @@ int pthread_attr_setdetachstate(pthread_
int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
- *detachstate = attr->__detachstate;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
+ *detachstate = vg_attr->__vg_detachstate;
return 0;
}
@@ -413,17 +506,20 @@ int pthread_getattr_np (pthread_t thread
int detached;
size_t limit;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
ensure_valgrind("pthread_getattr_np");
kludged("pthread_getattr_np", NULL);
limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
- 1000; /* paranoia */
- attr->__detachstate = PTHREAD_CREATE_JOINABLE;
- attr->__schedpolicy = SCHED_OTHER;
- attr->__schedparam.sched_priority = 0;
- attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
- attr->__scope = PTHREAD_SCOPE_SYSTEM;
- attr->__guardsize = VKI_BYTES_PER_PAGE;
- attr->__stackaddr = NULL;
- attr->__stackaddr_set = 0;
- attr->__stacksize = limit;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
+ vg_attr->__vg_schedpolicy = SCHED_OTHER;
+ vg_attr->__vg_schedparam.sched_priority = 0;
+ vg_attr->__vg_inheritsched = PTHREAD_EXPLICIT_SCHED;
+ vg_attr->__vg_scope = PTHREAD_SCOPE_SYSTEM;
+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+ vg_attr->__vg_stackaddr = NULL;
+ vg_attr->__vg_stackaddr_set = 0;
+ vg_attr->__vg_stacksize = limit;
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
@@ -431,5 +527,5 @@ int pthread_getattr_np (pthread_t thread
my_assert(detached == 0 || detached == 1);
if (detached)
- attr->__detachstate = PTHREAD_CREATE_DETACHED;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_DETACHED;
return 0;
}
@@ -464,7 +560,9 @@ int pthread_attr_getstacksize ( const pt
int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
return EINVAL;
- attr->__schedpolicy = policy;
+ vg_attr->__vg_schedpolicy = policy;
return 0;
}
@@ -472,5 +570,7 @@ int pthread_attr_setschedpolicy(pthread_
int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
- *policy = attr->__schedpolicy;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ *policy = vg_attr->__vg_schedpolicy;
return 0;
}
@@ -499,5 +599,7 @@ WEAK
int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
{
- *guardsize = attr->__guardsize;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ *guardsize = vg_attr->__vg_guardsize;
return 0;
}
@@ -786,4 +888,6 @@ pthread_create (pthread_t *__restrict __
NewThreadInfo* info;
int gs;
+ vg_pthread_attr_t* __vg_attr;
+ CONVERT(attr, __attr, __vg_attr);
ensure_valgrind("pthread_create");
@@ -798,6 +902,6 @@ pthread_create (pthread_t *__restrict __
my_assert(info != NULL);
- if (__attr)
- info->attr__detachstate = __attr->__detachstate;
+ if (__vg_attr)
+ info->attr__detachstate = __vg_attr->__vg_detachstate;
else
info->attr__detachstate = PTHREAD_CREATE_JOINABLE;
@@ -984,5 +1088,7 @@ void _pthread_cleanup_pop_restore (struc
int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
{
- attr->__mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ vg_pthread_mutexattr_t* vg_attr;
+ CONVERT(mutexattr, attr, vg_attr);
+ vg_attr->__vg_mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
return 0;
}
@@ -990,4 +1096,7 @@ int __pthread_mutexattr_init(pthread_mut
int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
+ vg_pthread_mutexattr_t* vg_attr;
+ CONVERT(mutexattr, attr, vg_attr);
+
switch (type) {
# ifndef GLIBC_2_1
@@ -1000,5 +1109,5 @@ int __pthread_mutexattr_settype(pthread_
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- attr->__mutexkind = type;
+ vg_attr->__vg_mutexkind = type;
return 0;
default:
@@ -1034,9 +1143,14 @@ int __pthread_mutex_init(pthread_mutex_t
const pthread_mutexattr_t *mutexattr)
{
- mutex->__m_count = 0;
- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
- if (mutexattr)
- mutex->__m_kind = mutexattr->__mutexkind;
+ vg_pthread_mutex_t* vg_mutex;
+ vg_pthread_mutexattr_t* vg_mutexattr;
+ CONVERT(mutex, mutex, vg_mutex);
+ CONVERT(mutexattr, mutexattr, vg_mutexattr);
+
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ if (vg_mutexattr)
+ vg_mutex->__vg_m_kind = vg_mutexattr->__vg_mutexkind;
return 0;
}
@@ -1046,9 +1160,11 @@ int __pthread_mutex_lock(pthread_mutex_t
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_LOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -1056,7 +1172,7 @@ int __pthread_mutex_lock(pthread_mutex_t
if (0)
kludged("prehistoric lock", NULL);
- mutex->__m_owner = (_pthread_descr)1;
- mutex->__m_count = 1;
- mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+ vg_mutex->__vg_m_count = 1;
+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -1067,9 +1183,11 @@ int __pthread_mutex_trylock(pthread_mute
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -1077,7 +1195,7 @@ int __pthread_mutex_trylock(pthread_mute
if (0)
kludged("prehistoric trylock", NULL);
- mutex->__m_owner = (_pthread_descr)1;
- mutex->__m_count = 1;
- mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+ vg_mutex->__vg_m_count = 1;
+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -1088,9 +1206,11 @@ int __pthread_mutex_unlock(pthread_mutex
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_UNLOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -1098,7 +1218,7 @@ int __pthread_mutex_unlock(pthread_mutex
if (0)
kludged("prehistoric unlock", NULL);
- mutex->__m_owner = 0;
- mutex->__m_count = 0;
- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = 0;
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -1108,18 +1228,20 @@ int __pthread_mutex_unlock(pthread_mutex
int __pthread_mutex_destroy(pthread_mutex_t *mutex)
{
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
+
/* Valgrind doesn't hold any resources on behalf of the mutex, so no
need to involve it. */
- if (mutex->__m_count > 0) {
+ if (vg_mutex->__vg_m_count > 0) {
/* Oh, the horror. glibc's internal use of pthreads "knows"
that destroying a lock does an implicit unlock. Make it
explicit. */
- __pthread_mutex_unlock(mutex);
- pthread_error("pthread_mutex_destroy: "
- "mutex is still in use");
+ __pthread_mutex_unlock( (pthread_mutex_t*)vg_mutex );
+ pthread_error("pthread_mutex_destroy: mutex is still in use");
return EBUSY;
}
- mutex->__m_count = 0;
- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
return 0;
}
@@ -1145,5 +1267,7 @@ int pthread_cond_init( pthread_cond_t *c
const pthread_condattr_t *cond_attr)
{
- cond->__c_waiting = (_pthread_descr)VG_INVALID_THREADID;
+ vg_pthread_cond_t* vg_cond;
+ CONVERT(cond, cond, vg_cond);
+ vg_cond->__vg_c_waiting = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
return 0;
}
@@ -1193,8 +1317,11 @@ int pthread_cond_wait(pthread_cond_t *co
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
+
ensure_valgrind("pthread_cond_wait");
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_COND_WAIT,
- cond, mutex, 0, 0);
+ cond, vg_mutex, 0, 0);
return res;
}
@@ -1209,4 +1336,6 @@ int pthread_cond_timedwait ( pthread_con
unsigned long long int ull_ms_now_after_1970;
unsigned long long int ull_ms_end_after_1970;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
ensure_valgrind("pthread_cond_timedwait");
@@ -1230,5 +1359,5 @@ int pthread_cond_timedwait ( pthread_con
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_COND_TIMEDWAIT,
- cond, mutex, ms_end, 0);
+ cond, vg_mutex, ms_end, 0);
return res;
}
@@ -2658,7 +2787,7 @@ void init_vg_rwlock ( vg_rwlock_t* vg_rw
address of our version. Further, if the LinuxThreads version
appears to have been statically initialised, do the same to the one
- we allocate here. The pthread_rwlock_t.__rw_readers field is set
- to zero by PTHREAD_RWLOCK_INITIALIZER, so we take zero as meaning
- uninitialised and non-zero meaning initialised.
+ we allocate here. The vg_pthread_rwlock_t.__vg_rw_readers field is set
+ to zero by PTHREAD_RWLOCK_INITIALIZER (as are several other fields), so
+ we take zero as meaning uninitialised and non-zero meaning initialised.
*/
static vg_rwlock_t* rw_remap ( pthread_rwlock_t* orig )
@@ -2666,4 +2795,6 @@ static vg_rwlock_t* rw_remap ( pthread_r
int res, i;
vg_rwlock_t* vg_rwl;
+ vg_pthread_rwlock_t* vg_orig;
+
res = __pthread_mutex_lock(&rw_remap_mx);
my_assert(res == 0);
@@ -2689,8 +2820,9 @@ static vg_rwlock_t* rw_remap ( pthread_r
/* Initialise the shadow, if required. */
- if (orig->__rw_readers == 0) {
- orig->__rw_readers = 1;
+ CONVERT(rwlock, orig, vg_orig);
+ if (vg_orig->__vg_rw_readers == 0) {
+ vg_orig->__vg_rw_readers = 1;
init_vg_rwlock(vg_rwl);
- if (orig->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
+ if (vg_orig->__vg_rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
vg_rwl->prefer_w = 0;
}
@@ -2704,12 +2836,17 @@ int pthread_rwlock_init ( pthread_rwlock
{
vg_rwlock_t* rwl;
+ vg_pthread_rwlock_t* vg_orig;
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlock, orig, vg_orig);
+ CONVERT(rwlockattr, attr, vg_attr);
+
if (0) printf ("pthread_rwlock_init\n");
/* Force the remapper to initialise the shadow. */
- orig->__rw_readers = 0;
+ vg_orig->__vg_rw_readers = 0;
/* Install the lock preference; the remapper needs to know it. */
- orig->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
- if (attr)
- orig->__rw_kind = attr->__lockkind;
- rwl = rw_remap ( orig );
+ vg_orig->__vg_rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
+ if (vg_attr)
+ vg_orig->__vg_rw_kind = vg_attr->__vg_lockkind;
+ rwl = rw_remap ( (pthread_rwlock_t*)vg_orig );
return 0;
}
@@ -2960,7 +3100,8 @@ int
pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
{
- attr->__lockkind = 0;
- attr->__pshared = PTHREAD_PROCESS_PRIVATE;
-
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlockattr, attr, vg_attr);
+ vg_attr->__vg_lockkind = 0;
+ vg_attr->__vg_pshared = PTHREAD_PROCESS_PRIVATE;
return 0;
}
@@ -2977,4 +3118,7 @@ int
pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
{
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlockattr, attr, vg_attr);
+
if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
return EINVAL;
@@ -2984,5 +3128,5 @@ pthread_rwlockattr_setpshared (pthread_r
return ENOSYS;
- attr->__pshared = pshared;
+ vg_attr->__vg_pshared = pshared;
return 0;
--- valgrind/coregrind/vg_scheduler.c #1.141:1.142
@@ -1932,37 +1932,24 @@ void do__apply_in_new_thread ( ThreadId
-------------------------------------------------------- */
-/* pthread_mutex_t is a struct with at 5 words:
- typedef struct
- {
- int __m_reserved; -- Reserved for future use
- int __m_count; -- Depth of recursive locking
- _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
- int __m_kind; -- Mutex kind: fast, recursive or errcheck
- struct _pthread_fastlock __m_lock; -- Underlying fast lock
- } pthread_mutex_t;
+/* vg_pthread_mutex_t is defined in vg_include.h.
- #define PTHREAD_MUTEX_INITIALIZER \
- {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
- # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
- # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
- # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
+ The initializers zero everything, except possibly the fourth word,
+ which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
+ of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
How we use it:
- __m_kind never changes and indicates whether or not it is recursive.
+ __vg_m_kind never changes and indicates whether or not it is recursive.
- __m_count indicates the lock count; if 0, the mutex is not owned by
+ __vg_m_count indicates the lock count; if 0, the mutex is not owned by
anybody.
- __m_owner has a ThreadId value stuffed into it. We carefully arrange
+ __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
statically initialised mutexes correctly appear
to belong to nobody.
- In summary, a not-in-use mutex is distinguised by having __m_owner
- == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
+ In summary, a not-in-use mutex is distinguised by having __vg_m_owner
+ == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
conditions holds, the other should too.
@@ -1978,5 +1965,5 @@ void do__apply_in_new_thread ( ThreadId
/* Helper fns ... */
static
-void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
+void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
Char* caller )
{
@@ -1994,11 +1981,11 @@ void release_one_thread_waiting_on_mutex
}
- VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex );
+ VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
vg_assert(i <= VG_N_THREADS);
if (i == VG_N_THREADS) {
/* Nobody else is waiting on it. */
- mutex->__m_count = 0;
- mutex->__m_owner = VG_INVALID_THREADID;
+ mutex->__vg_m_count = 0;
+ mutex->__vg_m_owner = VG_INVALID_THREADID;
} else {
/* Notionally transfer the hold to thread i, whose
@@ -2006,5 +1993,5 @@ void release_one_thread_waiting_on_mutex
/* The .count is already == 1. */
vg_assert(VG_(threads)[i].associated_mx == mutex);
- mutex->__m_owner = (_pthread_descr)i;
+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
VG_(threads)[i].status = VgTs_Runnable;
VG_(threads)[i].associated_mx = NULL;
@@ -2025,5 +2012,5 @@ static
void do_pthread_mutex_lock( ThreadId tid,
Bool is_trylock,
- pthread_mutex_t* mutex )
+ vg_pthread_mutex_t* mutex )
{
Char msg_buf[100];
@@ -2050,5 +2037,5 @@ void do_pthread_mutex_lock( ThreadId tid
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2060,5 +2047,5 @@ void do_pthread_mutex_lock( ThreadId tid
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2069,6 +2056,6 @@ void do_pthread_mutex_lock( ThreadId tid
}
- if (mutex->__m_count > 0) {
- if (!VG_(is_valid_tid)((ThreadId)mutex->__m_owner)) {
+ if (mutex->__vg_m_count > 0) {
+ if (!VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)) {
VG_(record_pthread_error)( tid,
"pthread_mutex_lock/trylock: mutex has invalid owner");
@@ -2078,13 +2065,13 @@ void do_pthread_mutex_lock( ThreadId tid
/* Someone has it already. */
- if ((ThreadId)mutex->__m_owner == tid) {
+ if ((ThreadId)mutex->__vg_m_owner == tid) {
/* It's locked -- by me! */
- if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
+ if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
/* return 0 (success). */
- mutex->__m_count++;
+ mutex->__vg_m_count++;
SET_PTHREQ_RETVAL(tid, 0);
if (0)
VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
- tid, mutex, mutex->__m_count);
+ tid, mutex, mutex->__vg_m_count);
return;
} else {
@@ -2098,5 +2085,5 @@ void do_pthread_mutex_lock( ThreadId tid
/* Someone else has it; we have to wait. Mark ourselves
thusly. */
- /* GUARD: __m_count > 0 && __m_owner is valid */
+ /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
if (is_trylock) {
/* caller is polling; so return immediately. */
@@ -2119,11 +2106,11 @@ void do_pthread_mutex_lock( ThreadId tid
} else {
/* Nobody owns it. Sanity check ... */
- vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
+ vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
VG_TRACK ( pre_mutex_lock, tid, mutex );
/* We get it! [for the first time]. */
- mutex->__m_count = 1;
- mutex->__m_owner = (_pthread_descr)tid;
+ mutex->__vg_m_count = 1;
+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
/* return 0 (success). */
@@ -2137,5 +2124,5 @@ void do_pthread_mutex_lock( ThreadId tid
static
void do_pthread_mutex_unlock ( ThreadId tid,
- pthread_mutex_t* mutex )
+ vg_pthread_mutex_t* mutex )
{
Char msg_buf[100];
@@ -2159,12 +2146,12 @@ void do_pthread_mutex_unlock ( ThreadId
/* If this was locked before the dawn of time, pretend it was
locked now so that it balances with unlocks */
- if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) {
- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
- VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
- VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
+ if (mutex->__vg_m_kind & VG_PTHREAD_PREHISTORY) {
+ mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
+ VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
+ VG_TRACK( post_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
}
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2176,5 +2163,5 @@ void do_pthread_mutex_unlock ( ThreadId
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2186,5 +2173,5 @@ void do_pthread_mutex_unlock ( ThreadId
/* Barf if we don't currently hold the mutex. */
- if (mutex->__m_count == 0) {
+ if (mutex->__vg_m_count == 0) {
/* nobody holds it */
VG_(record_pthread_error)( tid,
@@ -2194,5 +2181,5 @@ void do_pthread_mutex_unlock ( ThreadId
}
- if ((ThreadId)mutex->__m_owner != tid) {
+ if ((ThreadId)mutex->__vg_m_owner != tid) {
/* we don't hold it */
VG_(record_pthread_error)( tid,
@@ -2204,7 +2191,7 @@ void do_pthread_mutex_unlock ( ThreadId
/* If it's a multiply-locked recursive mutex, just decrement the
lock count and return. */
- if (mutex->__m_count > 1) {
- vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
- mutex->__m_count --;
+ if (mutex->__vg_m_count > 1) {
+ vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
+ mutex->__vg_m_count --;
SET_PTHREQ_RETVAL(tid, 0); /* success */
return;
@@ -2213,6 +2200,6 @@ void do_pthread_mutex_unlock ( ThreadId
/* Now we're sure it is locked exactly once, and by the thread who
is now doing an unlock on it. */
- vg_assert(mutex->__m_count == 1);
- vg_assert((ThreadId)mutex->__m_owner == tid);
+ vg_assert(mutex->__vg_m_count == 1);
+ vg_assert((ThreadId)mutex->__vg_m_owner == tid);
/* Release at max one thread waiting on this mutex. */
@@ -2228,24 +2215,9 @@ void do_pthread_mutex_unlock ( ThreadId
-------------------------------------------------------- */
-/* The relevant native types are as follows:
- (copied from /usr/include/bits/pthreadtypes.h)
-
- -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
- typedef struct
- {
- struct _pthread_fastlock __c_lock; -- Protect against concurrent access
- _pthread_descr __c_waiting; -- Threads waiting on this condition
- } pthread_cond_t;
-
- -- Attribute for conditionally variables.
- typedef struct
- {
- int __dummy;
- } pthread_condattr_t;
-
- #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
+/* The relevant type (vg_pthread_cond_t) is in vg_include.h.
- We don't use any fields of pthread_cond_t for anything at all.
- Only the identity of the CVs is important.
+ We don't use any fields of vg_pthread_cond_t for anything at all.
+ Only the identity of the CVs is important. (Actually, we initialise
+ __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
Linux pthreads supports no attributes on condition variables, so we
@@ -2257,6 +2229,6 @@ void do_pthread_cond_timedwait_TIMEOUT (
{
Char msg_buf[100];
- pthread_mutex_t* mx;
- pthread_cond_t* cv;
+ vg_pthread_mutex_t* mx;
+ vg_pthread_cond_t* cv;
vg_assert(VG_(is_valid_tid)(tid)
@@ -2268,13 +2240,13 @@ void do_pthread_cond_timedwait_TIMEOUT (
vg_assert(cv != NULL);
- if (mx->__m_owner == VG_INVALID_THREADID) {
+ if (mx->__vg_m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread tid. */
- vg_assert(mx->__m_count == 0);
+ vg_assert(mx->__vg_m_count == 0);
VG_(threads)[tid].status = VgTs_Runnable;
SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
VG_(threads)[tid].associated_cv = NULL;
VG_(threads)[tid].associated_mx = NULL;
- mx->__m_owner = (_pthread_descr)tid;
- mx->__m_count = 1;
+ mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
+ mx->__vg_m_count = 1;
VG_TRACK( post_mutex_lock, tid, mx );
@@ -2282,5 +2254,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
+ "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
cv, mx );
print_pthread_event(tid, msg_buf);
@@ -2288,5 +2260,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
} else {
/* Currently held. Make thread tid be blocked on it. */
- vg_assert(mx->__m_count > 0);
+ vg_assert(mx->__vg_m_count > 0);
VG_TRACK( pre_mutex_lock, tid, mx );
@@ -2297,5 +2269,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
+ "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
cv, mx );
print_pthread_event(tid, msg_buf);
@@ -2306,5 +2278,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
static
-void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
+void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
Int n_to_release,
Char* caller )
@@ -2312,5 +2284,5 @@ void release_N_threads_waiting_on_cond (
Int i;
Char msg_buf[100];
- pthread_mutex_t* mx;
+ vg_pthread_mutex_t* mx;
while (True) {
@@ -2338,12 +2310,12 @@ void release_N_threads_waiting_on_cond (
VG_TRACK( pre_mutex_lock, i, mx );
- if (mx->__m_owner == VG_INVALID_THREADID) {
+ if (mx->__vg_m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread i. */
- vg_assert(mx->__m_count == 0);
+ vg_assert(mx->__vg_m_count == 0);
VG_(threads)[i].status = VgTs_Runnable;
VG_(threads)[i].associated_cv = NULL;
VG_(threads)[i].associated_mx = NULL;
- mx->__m_owner = (_pthread_descr)i;
- mx->__m_count = 1;
+ mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
+ mx->__vg_m_count = 1;
/* .m_edx already holds pth_cond_wait success value (0) */
@@ -2358,5 +2330,5 @@ void release_N_threads_waiting_on_cond (
} else {
/* Currently held. Make thread i be blocked on it. */
- vg_assert(mx->__m_count > 0);
+ vg_assert(mx->__vg_m_count > 0);
VG_(threads)[i].status = VgTs_WaitMX;
VG_(threads)[i].associated_cv = NULL;
@@ -2379,6 +2351,6 @@ void release_N_threads_waiting_on_cond (
static
void do_pthread_cond_wait ( ThreadId tid,
- pthread_cond_t *cond,
- pthread_mutex_t *mutex,
+ vg_pthread_cond_t *cond,
+ vg_pthread_mutex_t *mutex,
UInt ms_end )
{
@@ -2407,5 +2379,5 @@ void do_pthread_cond_wait ( ThreadId tid
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2417,5 +2389,5 @@ void do_pthread_cond_wait ( ThreadId tid
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2427,6 +2399,6 @@ void do_pthread_cond_wait ( ThreadId tid
/* Barf if we don't currently hold the mutex. */
- if (mutex->__m_count == 0 /* nobody holds it */
- || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
+ if (mutex->__vg_m_count == 0 /* nobody holds it */
+ || (ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
VG_(record_pthread_error)( tid,
"pthread_cond_wait/timedwait: mutex is unlocked "
@@ -2459,5 +2431,5 @@ static
void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
Bool broadcast,
- pthread_cond_t *cond )
+ vg_pthread_cond_t *cond )
{
Char msg_buf[100];
@@ -3011,6 +2983,6 @@ void do_client_request ( ThreadId tid )
case VG_USERREQ__PTHREAD_COND_WAIT:
do_pthread_cond_wait( tid,
- (pthread_cond_t *)(arg[1]),
- (pthread_mutex_t *)(arg[2]),
+ (vg_pthread_cond_t *)(arg[1]),
+ (vg_pthread_mutex_t *)(arg[2]),
0xFFFFFFFF /* no timeout */ );
break;
@@ -3018,6 +2990,6 @@ void do_client_request ( ThreadId tid )
case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
do_pthread_cond_wait( tid,
- (pthread_cond_t *)(arg[1]),
- (pthread_mutex_t *)(arg[2]),
+ (vg_pthread_cond_t *)(arg[1]),
+ (vg_pthread_mutex_t *)(arg[2]),
arg[3] /* timeout millisecond point */ );
break;
@@ -3027,5 +2999,5 @@ void do_client_request ( ThreadId tid )
tid,
False, /* signal, not broadcast */
- (pthread_cond_t *)(arg[1]) );
+ (vg_pthread_cond_t *)(arg[1]) );
break;
@@ -3034,5 +3006,5 @@ void do_client_request ( ThreadId tid )
tid,
True, /* broadcast, not signal */
- (pthread_cond_t *)(arg[1]) );
+ (vg_pthread_cond_t *)(arg[1]) );
break;
@@ -3277,6 +3249,6 @@ static
void scheduler_sanity ( void )
{
- pthread_mutex_t* mx;
- pthread_cond_t* cv;
+ vg_pthread_mutex_t* mx;
+ vg_pthread_cond_t* cv;
Int i;
struct timeout* top;
@@ -3317,7 +3289,7 @@ void scheduler_sanity ( void )
vg_assert(cv == NULL);
/* 1 */ vg_assert(mx != NULL);
- /* 2 */ vg_assert(mx->__m_count > 0);
- /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
- /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner);
+ /* 2 */ vg_assert(mx->__vg_m_count > 0);
+ /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
+ /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner);
} else
if (VG_(threads)[i].status == VgTs_WaitCV) {
|