|
From: Dirk M. <mu...@kd...> - 2004-01-02 22:49:33
|
CVS commit by mueller:
movntpd/movntps support (patch by Tom Hughes)
M +21 -0 vg_to_ucode.c 1.87.2.16
--- valgrind/coregrind/vg_to_ucode.c #1.87.2.15:1.87.2.16
@@ -4669,4 +4669,25 @@ static Addr disInstr ( UCodeBlock* cb, A
}
+ /* MOVNTPS -- 16-byte store with temporal hint (which we
+ ignore). */
+ if (insn[0] == 0x0F
+ && insn[1] == 0x2B) {
+ eip = dis_SSE2_load_store_or_mov
+ (cb, sorb, eip+2, 16, True /* is_store */, "movntps",
+ insn[0], insn[1] );
+ goto decode_success;
+ }
+
+ /* MOVNTPD -- 16-byte store with temporal hint (which we
+ ignore). */
+ if (sz == 2
+ && insn[0] == 0x0F
+ && insn[1] == 0x2B) {
+ eip = dis_SSE3_load_store_or_mov
+ (cb, sorb, eip+2, 16, True /* is_store */, "movntpd",
+ 0x66, insn[0], insn[1] );
+ goto decode_success;
+ }
+
/* MOVD -- 4-byte move between xmmregs and (ireg or memory). */
if (sz == 2
|
|
From: Dirk M. <mu...@kd...> - 2004-01-07 19:19:31
|
CVS commit by mueller:
rsqrtss support (backport)
M +9 -0 vg_to_ucode.c 1.87.2.17
--- valgrind/coregrind/vg_to_ucode.c #1.87.2.16:1.87.2.17
@@ -4791,4 +4791,13 @@ static Addr disInstr ( UCodeBlock* cb, A
}
+ /* RSQRTSS: square root reciprocal of scalar float. */
+ if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x52) {
+ vg_assert(sz == 4);
+ eip = dis_SSE3_reg_or_mem ( cb, sorb, eip+3, 4,
+ "sqrtss",
+ insn[0], insn[1], insn[2] );
+ goto decode_success;
+ }
+
/* MOVLPS -- 8-byte load/store. How is this different from MOVLPS
? */
|
|
From: Dirk M. <mu...@kd...> - 2004-04-07 01:41:28
|
CVS commit by mueller:
add our own definitions of pthread related structures to avoid compile
clash with stupid NPTL headers.
M +37 -1 vg_include.h 1.142.2.5
M +220 -74 vg_libpthread.c 1.130.2.4
M +80 -108 vg_scheduler.c 1.128.2.1
--- valgrind/coregrind/vg_scheduler.c #1.128:1.128.2.1
@@ -2276,37 +2276,24 @@ void do__apply_in_new_thread ( ThreadId
-------------------------------------------------------- */
-/* pthread_mutex_t is a struct with at 5 words:
- typedef struct
- {
- int __m_reserved; -- Reserved for future use
- int __m_count; -- Depth of recursive locking
- _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck)
- int __m_kind; -- Mutex kind: fast, recursive or errcheck
- struct _pthread_fastlock __m_lock; -- Underlying fast lock
- } pthread_mutex_t;
+/* vg_pthread_mutex_t is defined in vg_include.h.
- #define PTHREAD_MUTEX_INITIALIZER \
- {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
- # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
- # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
- # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
- {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
+ The initializers zero everything, except possibly the fourth word,
+ which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one
+ of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP
How we use it:
- __m_kind never changes and indicates whether or not it is recursive.
+ __vg_m_kind never changes and indicates whether or not it is recursive.
- __m_count indicates the lock count; if 0, the mutex is not owned by
+ __vg_m_count indicates the lock count; if 0, the mutex is not owned by
anybody.
- __m_owner has a ThreadId value stuffed into it. We carefully arrange
+ __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange
that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that
statically initialised mutexes correctly appear
to belong to nobody.
- In summary, a not-in-use mutex is distinguised by having __m_owner
- == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those
+ In summary, a not-in-use mutex is distinguised by having __vg_m_owner
+ == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those
conditions holds, the other should too.
@@ -2322,5 +2309,5 @@ void do__apply_in_new_thread ( ThreadId
/* Helper fns ... */
static
-void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex,
+void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex,
Char* caller )
{
@@ -2338,11 +2325,11 @@ void release_one_thread_waiting_on_mutex
}
- VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex );
+ VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex );
vg_assert(i <= VG_N_THREADS);
if (i == VG_N_THREADS) {
/* Nobody else is waiting on it. */
- mutex->__m_count = 0;
- mutex->__m_owner = VG_INVALID_THREADID;
+ mutex->__vg_m_count = 0;
+ mutex->__vg_m_owner = VG_INVALID_THREADID;
} else {
/* Notionally transfer the hold to thread i, whose
@@ -2350,5 +2337,5 @@ void release_one_thread_waiting_on_mutex
/* The .count is already == 1. */
vg_assert(VG_(threads)[i].associated_mx == mutex);
- mutex->__m_owner = (_pthread_descr)i;
+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)i;
VG_(threads)[i].status = VgTs_Runnable;
VG_(threads)[i].associated_mx = NULL;
@@ -2369,5 +2356,5 @@ static
void do_pthread_mutex_lock( ThreadId tid,
Bool is_trylock,
- pthread_mutex_t* mutex )
+ vg_pthread_mutex_t* mutex )
{
Char msg_buf[100];
@@ -2394,5 +2381,5 @@ void do_pthread_mutex_lock( ThreadId tid
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2404,5 +2391,5 @@ void do_pthread_mutex_lock( ThreadId tid
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2413,18 +2400,18 @@ void do_pthread_mutex_lock( ThreadId tid
}
- if (mutex->__m_count > 0) {
+ if (mutex->__vg_m_count > 0) {
- vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__m_owner));
+ vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner));
/* Someone has it already. */
- if ((ThreadId)mutex->__m_owner == tid) {
+ if ((ThreadId)mutex->__vg_m_owner == tid) {
/* It's locked -- by me! */
- if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
+ if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
/* return 0 (success). */
- mutex->__m_count++;
+ mutex->__vg_m_count++;
SET_PTHREQ_RETVAL(tid, 0);
if (0)
VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
- tid, mutex, mutex->__m_count);
+ tid, mutex, mutex->__vg_m_count);
return;
} else {
@@ -2438,5 +2425,5 @@ void do_pthread_mutex_lock( ThreadId tid
/* Someone else has it; we have to wait. Mark ourselves
thusly. */
- /* GUARD: __m_count > 0 && __m_owner is valid */
+ /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */
if (is_trylock) {
/* caller is polling; so return immediately. */
@@ -2459,11 +2446,11 @@ void do_pthread_mutex_lock( ThreadId tid
} else {
/* Nobody owns it. Sanity check ... */
- vg_assert(mutex->__m_owner == VG_INVALID_THREADID);
+ vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID);
VG_TRACK ( pre_mutex_lock, tid, mutex );
/* We get it! [for the first time]. */
- mutex->__m_count = 1;
- mutex->__m_owner = (_pthread_descr)tid;
+ mutex->__vg_m_count = 1;
+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid;
/* return 0 (success). */
@@ -2477,5 +2464,5 @@ void do_pthread_mutex_lock( ThreadId tid
static
void do_pthread_mutex_unlock ( ThreadId tid,
- pthread_mutex_t* mutex )
+ vg_pthread_mutex_t* mutex )
{
Char msg_buf[100];
@@ -2499,12 +2486,12 @@ void do_pthread_mutex_unlock ( ThreadId
/* If this was locked before the dawn of time, pretend it was
locked now so that it balances with unlocks */
- if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) {
- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
- VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
- VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex );
+ if (mutex->__vg_m_kind & VG_PTHREAD_PREHISTORY) {
+ mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
+ VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
+ VG_TRACK( post_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex );
}
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2516,5 +2503,5 @@ void do_pthread_mutex_unlock ( ThreadId
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2526,5 +2513,5 @@ void do_pthread_mutex_unlock ( ThreadId
/* Barf if we don't currently hold the mutex. */
- if (mutex->__m_count == 0) {
+ if (mutex->__vg_m_count == 0) {
/* nobody holds it */
VG_(record_pthread_error)( tid,
@@ -2534,5 +2521,5 @@ void do_pthread_mutex_unlock ( ThreadId
}
- if ((ThreadId)mutex->__m_owner != tid) {
+ if ((ThreadId)mutex->__vg_m_owner != tid) {
/* we don't hold it */
VG_(record_pthread_error)( tid,
@@ -2544,7 +2531,7 @@ void do_pthread_mutex_unlock ( ThreadId
/* If it's a multiply-locked recursive mutex, just decrement the
lock count and return. */
- if (mutex->__m_count > 1) {
- vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
- mutex->__m_count --;
+ if (mutex->__vg_m_count > 1) {
+ vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
+ mutex->__vg_m_count --;
SET_PTHREQ_RETVAL(tid, 0); /* success */
return;
@@ -2553,6 +2540,6 @@ void do_pthread_mutex_unlock ( ThreadId
/* Now we're sure it is locked exactly once, and by the thread who
is now doing an unlock on it. */
- vg_assert(mutex->__m_count == 1);
- vg_assert((ThreadId)mutex->__m_owner == tid);
+ vg_assert(mutex->__vg_m_count == 1);
+ vg_assert((ThreadId)mutex->__vg_m_owner == tid);
/* Release at max one thread waiting on this mutex. */
@@ -2568,24 +2555,9 @@ void do_pthread_mutex_unlock ( ThreadId
-------------------------------------------------------- */
-/* The relevant native types are as follows:
- (copied from /usr/include/bits/pthreadtypes.h)
-
- -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER
- typedef struct
- {
- struct _pthread_fastlock __c_lock; -- Protect against concurrent access
- _pthread_descr __c_waiting; -- Threads waiting on this condition
- } pthread_cond_t;
-
- -- Attribute for conditionally variables.
- typedef struct
- {
- int __dummy;
- } pthread_condattr_t;
-
- #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0}
+/* The relevant type (vg_pthread_cond_t) is in vg_include.h.
- We don't use any fields of pthread_cond_t for anything at all.
- Only the identity of the CVs is important.
+ We don't use any fields of vg_pthread_cond_t for anything at all.
+ Only the identity of the CVs is important. (Actually, we initialise
+ __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.)
Linux pthreads supports no attributes on condition variables, so we
@@ -2597,6 +2569,6 @@ void do_pthread_cond_timedwait_TIMEOUT (
{
Char msg_buf[100];
- pthread_mutex_t* mx;
- pthread_cond_t* cv;
+ vg_pthread_mutex_t* mx;
+ vg_pthread_cond_t* cv;
vg_assert(VG_(is_valid_tid)(tid)
@@ -2608,13 +2580,13 @@ void do_pthread_cond_timedwait_TIMEOUT (
vg_assert(cv != NULL);
- if (mx->__m_owner == VG_INVALID_THREADID) {
+ if (mx->__vg_m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread tid. */
- vg_assert(mx->__m_count == 0);
+ vg_assert(mx->__vg_m_count == 0);
VG_(threads)[tid].status = VgTs_Runnable;
SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */
VG_(threads)[tid].associated_cv = NULL;
VG_(threads)[tid].associated_mx = NULL;
- mx->__m_owner = (_pthread_descr)tid;
- mx->__m_count = 1;
+ mx->__vg_m_owner = (/*_pthread_descr*/void*)tid;
+ mx->__vg_m_count = 1;
VG_TRACK( post_mutex_lock, tid, mx );
@@ -2622,5 +2594,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
+ "pthread_cond_timedwait cv %p: TIMEOUT with mx %p",
cv, mx );
print_pthread_event(tid, msg_buf);
@@ -2628,5 +2600,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
} else {
/* Currently held. Make thread tid be blocked on it. */
- vg_assert(mx->__m_count > 0);
+ vg_assert(mx->__vg_m_count > 0);
VG_TRACK( pre_mutex_lock, tid, mx );
@@ -2637,5 +2609,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
+ "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p",
cv, mx );
print_pthread_event(tid, msg_buf);
@@ -2647,5 +2619,5 @@ void do_pthread_cond_timedwait_TIMEOUT (
static
-void release_N_threads_waiting_on_cond ( pthread_cond_t* cond,
+void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond,
Int n_to_release,
Char* caller )
@@ -2653,5 +2625,5 @@ void release_N_threads_waiting_on_cond (
Int i;
Char msg_buf[100];
- pthread_mutex_t* mx;
+ vg_pthread_mutex_t* mx;
while (True) {
@@ -2679,12 +2651,12 @@ void release_N_threads_waiting_on_cond (
VG_TRACK( pre_mutex_lock, i, mx );
- if (mx->__m_owner == VG_INVALID_THREADID) {
+ if (mx->__vg_m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread i. */
- vg_assert(mx->__m_count == 0);
+ vg_assert(mx->__vg_m_count == 0);
VG_(threads)[i].status = VgTs_Runnable;
VG_(threads)[i].associated_cv = NULL;
VG_(threads)[i].associated_mx = NULL;
- mx->__m_owner = (_pthread_descr)i;
- mx->__m_count = 1;
+ mx->__vg_m_owner = (/*_pthread_descr*/void*)i;
+ mx->__vg_m_count = 1;
/* .m_edx already holds pth_cond_wait success value (0) */
@@ -2699,5 +2671,5 @@ void release_N_threads_waiting_on_cond (
} else {
/* Currently held. Make thread i be blocked on it. */
- vg_assert(mx->__m_count > 0);
+ vg_assert(mx->__vg_m_count > 0);
VG_(threads)[i].status = VgTs_WaitMX;
VG_(threads)[i].associated_cv = NULL;
@@ -2720,6 +2692,6 @@ void release_N_threads_waiting_on_cond (
static
void do_pthread_cond_wait ( ThreadId tid,
- pthread_cond_t *cond,
- pthread_mutex_t *mutex,
+ vg_pthread_cond_t *cond,
+ vg_pthread_mutex_t *mutex,
UInt ms_end )
{
@@ -2748,5 +2720,5 @@ void do_pthread_cond_wait ( ThreadId tid
/* More paranoia ... */
- switch (mutex->__m_kind) {
+ switch (mutex->__vg_m_kind) {
# ifndef GLIBC_2_1
case PTHREAD_MUTEX_TIMED_NP:
@@ -2758,5 +2730,5 @@ void do_pthread_cond_wait ( ThreadId tid
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- if (mutex->__m_count >= 0) break;
+ if (mutex->__vg_m_count >= 0) break;
/* else fall thru */
default:
@@ -2768,6 +2740,6 @@ void do_pthread_cond_wait ( ThreadId tid
/* Barf if we don't currently hold the mutex. */
- if (mutex->__m_count == 0 /* nobody holds it */
- || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
+ if (mutex->__vg_m_count == 0 /* nobody holds it */
+ || (ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) {
VG_(record_pthread_error)( tid,
"pthread_cond_wait/timedwait: mutex is unlocked "
@@ -2798,5 +2770,5 @@ static
void do_pthread_cond_signal_or_broadcast ( ThreadId tid,
Bool broadcast,
- pthread_cond_t *cond )
+ vg_pthread_cond_t *cond )
{
Char msg_buf[100];
@@ -3363,6 +3335,6 @@ void do_client_request ( ThreadId tid )
case VG_USERREQ__PTHREAD_COND_WAIT:
do_pthread_cond_wait( tid,
- (pthread_cond_t *)(arg[1]),
- (pthread_mutex_t *)(arg[2]),
+ (vg_pthread_cond_t *)(arg[1]),
+ (vg_pthread_mutex_t *)(arg[2]),
0xFFFFFFFF /* no timeout */ );
break;
@@ -3370,6 +3342,6 @@ void do_client_request ( ThreadId tid )
case VG_USERREQ__PTHREAD_COND_TIMEDWAIT:
do_pthread_cond_wait( tid,
- (pthread_cond_t *)(arg[1]),
- (pthread_mutex_t *)(arg[2]),
+ (vg_pthread_cond_t *)(arg[1]),
+ (vg_pthread_mutex_t *)(arg[2]),
arg[3] /* timeout millisecond point */ );
break;
@@ -3379,5 +3351,5 @@ void do_client_request ( ThreadId tid )
tid,
False, /* signal, not broadcast */
- (pthread_cond_t *)(arg[1]) );
+ (vg_pthread_cond_t *)(arg[1]) );
break;
@@ -3386,5 +3358,5 @@ void do_client_request ( ThreadId tid )
tid,
True, /* broadcast, not signal */
- (pthread_cond_t *)(arg[1]) );
+ (vg_pthread_cond_t *)(arg[1]) );
break;
@@ -3548,6 +3520,6 @@ static
void scheduler_sanity ( void )
{
- pthread_mutex_t* mx;
- pthread_cond_t* cv;
+ vg_pthread_mutex_t* mx;
+ vg_pthread_cond_t* cv;
Int i;
@@ -3568,7 +3540,7 @@ void scheduler_sanity ( void )
vg_assert(cv == NULL);
/* 1 */ vg_assert(mx != NULL);
- /* 2 */ vg_assert(mx->__m_count > 0);
- /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
- /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner);
+ /* 2 */ vg_assert(mx->__vg_m_count > 0);
+ /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner));
+ /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner);
} else
if (VG_(threads)[i].status == VgTs_WaitCV) {
--- valgrind/coregrind/vg_libpthread.c #1.130.2.3:1.130.2.4
@@ -76,4 +76,86 @@
/* ---------------------------------------------------------------------
+ Our own definition of types that vary between LinuxThreads and NPTL.
+ ------------------------------------------------------------------ */
+
+/* Moving from LinuxThreads to NPTL, several crucial types (eg.
+ pthread_mutex_t, pthread_mutexattr_t, etc) were changed in
+ binary-compatible, but source-incompatible, ways. We can similarly use
+ any layout we want, so long as it's binary-compatible. However, we can
+ no longer use the LinuxThreads types, because they won't work on NPTL
+ systems. Thus, we have to introduce a layer of indirection, and define
+ our own versions of these types (vg_pthread_mutex_t, etc). NPTL does
+ pretty much the same thing, and it keeps many of its internal types
+ secret.
+
+ We can layout our types however we want, as long as we put the small
+ number of fields in the right place for binary compatibility (eg.
+ mutex->kind). To make life easy, our versions have the exact same layout
+ as the LinuxThreads ones; only the type names and field names are
+ different.
+
+ In our implementation of the pthread operations (pthread_mutex_lock(),
+ pthread_mutexattr_settype(), etc) we always cast the standard pthread
+ types to our own types, (eg. pthread_mutex_t --> vg_pthread_mutex_t),
+ before working with them.
+
+ Note that we have various mutexes (and condvars) in this file that have the
+ type pthread_mutex_t (and pthread_cond_t). That is fine, because they
+ are always only handled by calling the standard pthread functions (eg.
+ pthread_mutex_lock()) on them. Phew.
+
+ WARNING: as a result of all this, we should *never* access these standard
+ pthread types as is; they *must* be converted to the vg_pthread_foo_t
+ equivalent. XXX: how to enforce this? pre-processor hackery? (well,
+ it won't compile on any NPTL-only system if not followed...)
+*/
+
+#include <sched.h> // for 'struct __sched_param'
+
+typedef struct __vg_pthread_attr_s
+{
+ int __vg_detachstate;
+ int __vg_schedpolicy;
+ struct __sched_param __vg_schedparam;
+ int __vg_inheritsched;
+ int __vg_scope;
+ size_t __vg_guardsize;
+ int __vg_stackaddr_set;
+ void *__vg_stackaddr;
+ size_t __vg_stacksize;
+} vg_pthread_attr_t;
+
+typedef struct
+{
+ int __vg_mutexkind;
+} vg_pthread_mutexattr_t;
+
+typedef struct _vg_pthread_rwlock_t
+{
+ struct _vg_pthread_fastlock __vg_rw_lock; /* Lock to guarantee mutual exclusion */
+ int __vg_rw_readers; /* Number of readers */
+ /*_pthread_descr*/ void* __vg_rw_writer; /* Identity of writer, or NULL if none */
+ /*_pthread_descr*/ void* __vg_rw_read_waiting; /* Threads waiting for reading */
+ /*_pthread_descr*/ void* __vg_rw_write_waiting; /* Threads waiting for writing */
+ int __vg_rw_kind; /* Reader/Writer preference selection */
+ int __vg_rw_pshared; /* Shared between processes or not */
+} vg_pthread_rwlock_t;
+
+typedef struct
+{
+ int __vg_lockkind;
+ int __vg_pshared;
+} vg_pthread_rwlockattr_t;
+
+/* Converting pthread types to vg_pthread types. We always check that the
+ passed-in type is as big as ours, for safety. We also zero the pointer
+ to the original struct, to ensure we don't accidentally use it again. */
+
+#define CONVERT(foo, x, vg_x) \
+ my_assert(sizeof(*x) >= sizeof(vg_pthread_##foo##_t)); \
+ vg_x = (vg_pthread_##foo##_t*)x; \
+ x = 0; // ensure we don't accidentally use x again!
+
+/* ---------------------------------------------------------------------
Forwardses.
------------------------------------------------------------------ */
@@ -314,13 +396,20 @@ pthread_t pthread_self(void)
int pthread_attr_init(pthread_attr_t *attr)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
/* Just initialise the fields which we might look at. */
- attr->__detachstate = PTHREAD_CREATE_JOINABLE;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
/* Linuxthreads sets this field to the value __getpagesize(), so I
guess the following is OK. */
- attr->__guardsize = VKI_BYTES_PER_PAGE; return 0;
+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+ return 0;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
if (detachstate != PTHREAD_CREATE_JOINABLE
&& detachstate != PTHREAD_CREATE_DETACHED) {
@@ -329,5 +418,5 @@ int pthread_attr_setdetachstate(pthread_
return EINVAL;
}
- attr->__detachstate = detachstate;
+ vg_attr->__vg_detachstate = detachstate;
return 0;
}
@@ -335,5 +424,8 @@ int pthread_attr_setdetachstate(pthread_
int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
- *detachstate = attr->__detachstate;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
+ *detachstate = vg_attr->__vg_detachstate;
return 0;
}
@@ -425,17 +518,21 @@ int pthread_getattr_np (pthread_t thread
int detached;
size_t limit;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+
ensure_valgrind("pthread_getattr_np");
kludged("pthread_getattr_np");
limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
- 1000; /* paranoia */
- attr->__detachstate = PTHREAD_CREATE_JOINABLE;
- attr->__schedpolicy = SCHED_OTHER;
- attr->__schedparam.sched_priority = 0;
- attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
- attr->__scope = PTHREAD_SCOPE_SYSTEM;
- attr->__guardsize = VKI_BYTES_PER_PAGE;
- attr->__stackaddr = NULL;
- attr->__stackaddr_set = 0;
- attr->__stacksize = limit;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
+ vg_attr->__vg_schedpolicy = SCHED_OTHER;
+ vg_attr->__vg_schedparam.sched_priority = 0;
+ vg_attr->__vg_inheritsched = PTHREAD_EXPLICIT_SCHED;
+ vg_attr->__vg_scope = PTHREAD_SCOPE_SYSTEM;
+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+ vg_attr->__vg_stackaddr = NULL;
+ vg_attr->__vg_stackaddr_set = 0;
+ vg_attr->__vg_stacksize = limit;
+
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
@@ -443,5 +540,5 @@ int pthread_getattr_np (pthread_t thread
my_assert(detached == 0 || detached == 1);
if (detached)
- attr->__detachstate = PTHREAD_CREATE_DETACHED;
+ vg_attr->__vg_detachstate = PTHREAD_CREATE_DETACHED;
return 0;
}
@@ -476,7 +573,9 @@ int pthread_attr_getstacksize ( const pt
int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
return EINVAL;
- attr->__schedpolicy = policy;
+ vg_attr->__vg_schedpolicy = policy;
return 0;
}
@@ -484,5 +583,7 @@ int pthread_attr_setschedpolicy(pthread_
int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
- *policy = attr->__schedpolicy;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ *policy = vg_attr->__vg_schedpolicy;
return 0;
}
@@ -510,5 +611,7 @@ WEAK
int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
{
- *guardsize = attr->__guardsize;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ *guardsize = vg_attr->__vg_guardsize;
return 0;
}
@@ -707,4 +810,6 @@ pthread_create (pthread_t *__restrict __
int tid_child;
NewThreadInfo* info;
+ vg_pthread_attr_t* __vg_attr;
+ CONVERT(attr, __attr, __vg_attr);
ensure_valgrind("pthread_create");
@@ -719,6 +824,6 @@ pthread_create (pthread_t *__restrict __
my_assert(info != NULL);
- if (__attr)
- info->attr__detachstate = __attr->__detachstate;
+ if (__vg_attr)
+ info->attr__detachstate = __vg_attr->__vg_detachstate;
else
info->attr__detachstate = PTHREAD_CREATE_JOINABLE;
@@ -882,5 +987,7 @@ void _pthread_cleanup_pop_restore (struc
int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
{
- attr->__mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ vg_pthread_mutexattr_t* vg_attr;
+ CONVERT(mutexattr, attr, vg_attr);
+ vg_attr->__vg_mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP;
return 0;
}
@@ -888,4 +995,7 @@ int __pthread_mutexattr_init(pthread_mut
int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
+ vg_pthread_mutexattr_t* vg_attr;
+ CONVERT(mutexattr, attr, vg_attr);
+
switch (type) {
# ifndef GLIBC_2_1
@@ -898,5 +1008,5 @@ int __pthread_mutexattr_settype(pthread_
case PTHREAD_MUTEX_RECURSIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
- attr->__mutexkind = type;
+ vg_attr->__vg_mutexkind = type;
return 0;
default:
@@ -932,9 +1042,14 @@ int __pthread_mutex_init(pthread_mutex_t
const pthread_mutexattr_t *mutexattr)
{
- mutex->__m_count = 0;
- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
- if (mutexattr)
- mutex->__m_kind = mutexattr->__mutexkind;
+ vg_pthread_mutex_t* vg_mutex;
+ vg_pthread_mutexattr_t* vg_mutexattr;
+ CONVERT(mutex, mutex, vg_mutex);
+ CONVERT(mutexattr, mutexattr, vg_mutexattr);
+
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ if (vg_mutexattr)
+ vg_mutex->__vg_m_kind = vg_mutexattr->__vg_mutexkind;
return 0;
}
@@ -945,8 +1060,11 @@ int __pthread_mutex_lock(pthread_mutex_t
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
+
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_LOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -954,7 +1072,7 @@ int __pthread_mutex_lock(pthread_mutex_t
if (0)
kludged("prehistoric lock");
- mutex->__m_owner = (_pthread_descr)1;
- mutex->__m_count = 1;
- mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+ vg_mutex->__vg_m_count = 1;
+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -965,9 +1083,11 @@ int __pthread_mutex_trylock(pthread_mute
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -975,7 +1095,7 @@ int __pthread_mutex_trylock(pthread_mute
if (0)
kludged("prehistoric trylock");
- mutex->__m_owner = (_pthread_descr)1;
- mutex->__m_count = 1;
- mutex->__m_kind |= VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1;
+ vg_mutex->__vg_m_count = 1;
+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -986,9 +1106,11 @@ int __pthread_mutex_unlock(pthread_mutex
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_MUTEX_UNLOCK,
- mutex, 0, 0, 0);
+ vg_mutex, 0, 0, 0);
return res;
} else {
@@ -996,7 +1118,7 @@ int __pthread_mutex_unlock(pthread_mutex
if (0)
kludged("prehistoric unlock");
- mutex->__m_owner = 0;
- mutex->__m_count = 0;
- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY;
+ vg_mutex->__vg_m_owner = 0;
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY;
return 0; /* success */
}
@@ -1006,18 +1128,20 @@ int __pthread_mutex_unlock(pthread_mutex
int __pthread_mutex_destroy(pthread_mutex_t *mutex)
{
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
+
/* Valgrind doesn't hold any resources on behalf of the mutex, so no
need to involve it. */
- if (mutex->__m_count > 0) {
+ if (vg_mutex->__vg_m_count > 0) {
/* Oh, the horror. glibc's internal use of pthreads "knows"
that destroying a lock does an implicit unlock. Make it
explicit. */
- __pthread_mutex_unlock(mutex);
- pthread_error("pthread_mutex_destroy: "
- "mutex is still in use");
+ __pthread_mutex_unlock( (pthread_mutex_t*)vg_mutex );
+ pthread_error("pthread_mutex_destroy: mutex is still in use");
return EBUSY;
}
- mutex->__m_count = 0;
- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID;
- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
+ vg_mutex->__vg_m_count = 0;
+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP;
return 0;
}
@@ -1043,5 +1167,7 @@ int pthread_cond_init( pthread_cond_t *c
const pthread_condattr_t *cond_attr)
{
- cond->__c_waiting = (_pthread_descr)VG_INVALID_THREADID;
+ vg_pthread_cond_t* vg_cond;
+ CONVERT(cond, cond, vg_cond);
+ vg_cond->__vg_c_waiting = (/*_pthread_descr*/void*)VG_INVALID_THREADID;
return 0;
}
@@ -1090,8 +1216,11 @@ int pthread_cond_wait(pthread_cond_t *co
{
int res;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
+
ensure_valgrind("pthread_cond_wait");
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_COND_WAIT,
- cond, mutex, 0, 0);
+ cond, vg_mutex, 0, 0);
return res;
}
@@ -1106,4 +1235,6 @@ int pthread_cond_timedwait ( pthread_con
unsigned long long int ull_ms_now_after_1970;
unsigned long long int ull_ms_end_after_1970;
+ vg_pthread_mutex_t* vg_mutex;
+ CONVERT(mutex, mutex, vg_mutex);
ensure_valgrind("pthread_cond_timedwait");
@@ -1127,5 +1258,5 @@ int pthread_cond_timedwait ( pthread_con
VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
VG_USERREQ__PTHREAD_COND_TIMEDWAIT,
- cond, mutex, ms_end, 0);
+ cond, vg_mutex, ms_end, 0);
return res;
}
@@ -2683,7 +2814,7 @@ void init_vg_rwlock ( vg_rwlock_t* vg_rw
address of our version. Further, if the LinuxThreads version
appears to have been statically initialised, do the same to the one
- we allocate here. The pthread_rwlock_t.__rw_readers field is set
- to zero by PTHREAD_RWLOCK_INITIALIZER, so we take zero as meaning
- uninitialised and non-zero meaning initialised.
+ we allocate here. The vg_pthread_rwlock_t.__vg_rw_readers field is set
+ to zero by PTHREAD_RWLOCK_INITIALIZER (as are several other fields), so
+ we take zero as meaning uninitialised and non-zero meaning initialised.
*/
static vg_rwlock_t* rw_remap ( pthread_rwlock_t* orig )
@@ -2691,4 +2822,6 @@ static vg_rwlock_t* rw_remap ( pthread_r
int res, i;
vg_rwlock_t* vg_rwl;
+ vg_pthread_rwlock_t* vg_orig;
+
res = __pthread_mutex_lock(&rw_remap_mx);
my_assert(res == 0);
@@ -2714,8 +2847,9 @@ static vg_rwlock_t* rw_remap ( pthread_r
/* Initialise the shadow, if required. */
- if (orig->__rw_readers == 0) {
- orig->__rw_readers = 1;
+ CONVERT(rwlock, orig, vg_orig);
+ if (vg_orig->__vg_rw_readers == 0) {
+ vg_orig->__vg_rw_readers = 1;
init_vg_rwlock(vg_rwl);
- if (orig->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
+ if (vg_orig->__vg_rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
vg_rwl->prefer_w = 0;
}
@@ -2729,12 +2863,17 @@ int pthread_rwlock_init ( pthread_rwlock
{
vg_rwlock_t* rwl;
+ vg_pthread_rwlock_t* vg_orig;
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlock, orig, vg_orig);
+ CONVERT(rwlockattr, attr, vg_attr);
+
if (0) printf ("pthread_rwlock_init\n");
/* Force the remapper to initialise the shadow. */
- orig->__rw_readers = 0;
+ vg_orig->__vg_rw_readers = 0;
/* Install the lock preference; the remapper needs to know it. */
- orig->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
- if (attr)
- orig->__rw_kind = attr->__lockkind;
- rwl = rw_remap ( orig );
+ vg_orig->__vg_rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
+ if (vg_attr)
+ vg_orig->__vg_rw_kind = vg_attr->__vg_lockkind;
+ rwl = rw_remap ( (pthread_rwlock_t*)vg_orig );
return 0;
}
@@ -2985,7 +3127,8 @@ int
pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
{
- attr->__lockkind = 0;
- attr->__pshared = PTHREAD_PROCESS_PRIVATE;
-
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlockattr, attr, vg_attr);
+ vg_attr->__vg_lockkind = 0;
+ vg_attr->__vg_pshared = PTHREAD_PROCESS_PRIVATE;
return 0;
}
@@ -3002,4 +3145,7 @@ int
pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
{
+ vg_pthread_rwlockattr_t* vg_attr;
+ CONVERT(rwlockattr, attr, vg_attr);
+
if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
return EINVAL;
@@ -3009,5 +3155,5 @@ pthread_rwlockattr_setpshared (pthread_r
return ENOSYS;
- attr->__pshared = pshared;
+ vg_attr->__vg_pshared = pshared;
return 0;
--- valgrind/coregrind/vg_include.h #1.142.2.4:1.142.2.5
@@ -664,4 +664,39 @@ extern Addr VG_(do_useseg) ( UInt seg_se
/* ---------------------------------------------------------------------
+ Exports of vg_libpthread.c
+ ------------------------------------------------------------------ */
+
+/* Replacements for pthread types, shared between vg_libpthread.c and
+ vg_scheduler.c. See comment in vg_libpthread.c above the other
+ vg_pthread_*_t types for a description of how these are used. */
+
+struct _vg_pthread_fastlock
+{
+ long int __vg_status; /* "Free" or "taken" or head of waiting list */
+ int __vg_spinlock; /* Used by compare_and_swap emulation. Also,
+ adaptive SMP lock stores spin count here. */
+};
+
+typedef struct
+{
+ int __vg_m_reserved; /* Reserved for future use */
+ int __vg_m_count; /* Depth of recursive locking */
+ /*_pthread_descr*/ void* __vg_m_owner; /* Owner thread (if recursive or errcheck) */
+ int __vg_m_kind; /* Mutex kind: fast, recursive or errcheck */
+ struct _vg_pthread_fastlock __vg_m_lock; /* Underlying fast lock */
+} vg_pthread_mutex_t;
+
+typedef struct
+{
+ struct _vg_pthread_fastlock __vg_c_lock; /* Protect against concurrent access */
+ /*_pthread_descr*/ void* __vg_c_waiting; /* Threads waiting on this condition */
+ // Padding ensures the size is 48 bytes
+ char __vg_padding[48 - sizeof(struct _vg_pthread_fastlock)
+ - sizeof(void*) - sizeof(long long)];
+ long long __vg_align;
+} vg_pthread_cond_t;
+
+
+/* ---------------------------------------------------------------------
Exports of vg_scheduler.c
------------------------------------------------------------------ */
@@ -722,5 +757,5 @@ typedef
the condition variable indicated by the .associated_cv field.
In all other cases, should be NULL. */
- void* /*pthread_mutex_t* */ associated_mx;
+ vg_pthread_mutex_t* associated_mx;
/* When .status == WaitCV, points to the condition variable I am
|