|
From: Jeremy F. <je...@go...> - 2005-02-22 02:33:04
|
CVS commit by fitzhardinge:
SIGCONT has special properties with respect to interrupting/restarting syscalls.
The easiest thing to do is let the kernel handle it, so we do - only install
a handler for SIGCONT if the client asks for it.
M +11 -0 vg_signals.c 1.125
--- valgrind/coregrind/vg_signals.c #1.124:1.125
@@ -251,4 +251,15 @@ void calculate_SKSS_from_SCSS ( SKSS* ds
break;
+ case VKI_SIGCONT:
+ /* Let the kernel handle SIGCONT unless the client is actually
+ catching it. */
+ if (vg_scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
+ skss_handler = VKI_SIG_DFL;
+ else if (vg_scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
+ skss_handler = VKI_SIG_IGN;
+ else
+ skss_handler = vg_async_signalhandler;
+ break;
+
default:
if (sig == VKI_SIGVGKILL)
|
|
From: Jeremy F. <je...@go...> - 2005-02-22 02:33:59
|
CVS commit by fitzhardinge:
Always report core-dumping signals. If the client gets a fatal core-dumping
signal, always report it, since it indicates a real program bug.
M +1 -1 vg_signals.c 1.126
--- valgrind/coregrind/vg_signals.c #1.125:1.126
@@ -1296,5 +1296,5 @@ static void vg_default_action(const vki_
}
- if (VG_(clo_verbosity) != 0 && (could_core || VG_(clo_verbosity) > 1)) {
+ if (VG_(clo_verbosity) != 0 || could_core) {
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Process terminating with default action of signal %d (%s)%s",
|
|
From: Jeremy F. <je...@go...> - 2005-02-22 02:34:37
|
CVS commit by fitzhardinge:
Implement sys_sched_rr_get_interval. Tested in posixtestsuite.
M +1 -0 core.h 1.89
M +12 -0 vg_syscalls.c 1.253
M +1 -1 x86-linux/syscalls.c 1.25
--- valgrind/coregrind/vg_syscalls.c #1.252:1.253
@@ -4712,4 +4712,16 @@ POST(sys_sched_getparam)
}
+PRE(sys_sched_rr_get_interval, 0)
+{
+ PRINT("sched_rr_get_interval ( %d, %p )", arg1, arg2);
+ PRE_REG_READ2(int, "sched_rr_get_interval", vki_pid_t, pid, struct vki_timespec *, tp);
+ SYS_PRE_MEM_WRITE("sched_rr_get_interval(tp)", arg2, sizeof(struct vki_timespec));
+}
+
+POST(sys_sched_rr_get_interval)
+{
+ POST_MEM_WRITE(arg2, sizeof(struct vki_timespec));
+}
+
PRE(sys_select, MayBlock)
{
--- valgrind/coregrind/core.h #1.88:1.89
@@ -1381,4 +1381,5 @@ GEN_SYSCALL_WRAPPER(sys_munlockall);
GEN_SYSCALL_WRAPPER(sys_sched_setparam);
GEN_SYSCALL_WRAPPER(sys_sched_getparam);
+GEN_SYSCALL_WRAPPER(sys_sched_rr_get_interval);
GEN_SYSCALL_WRAPPER(sys_sched_setscheduler);
GEN_SYSCALL_WRAPPER(sys_sched_getscheduler);
--- valgrind/coregrind/x86-linux/syscalls.c #1.24:1.25
@@ -894,5 +894,5 @@ const struct SyscallTableEntry VGA_(sysc
GENX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
- // (__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161 */*
+ GENXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161 */*
GENXY(__NR_nanosleep, sys_nanosleep), // 162
GENX_(__NR_mremap, sys_mremap), // 163
|
|
From: Jeremy F. <je...@go...> - 2005-02-22 03:16:10
|
CVS commit by fitzhardinge:
Don't print non-coredumping fatal signals at normal verbosity levels..
Also, only always-print coredumping signals which were sent by the kernel
(ie, don't always print ones sent by kill, etc).
M +1 -1 vg_signals.c 1.128
--- valgrind/coregrind/vg_signals.c #1.127:1.128
@@ -1296,5 +1296,5 @@ static void vg_default_action(const vki_
}
- if (VG_(clo_verbosity) != 0 || could_core) {
+ if (VG_(clo_verbosity) > 1 || (could_core && info->si_code > VKI_SI_USER)) {
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "Process terminating with default action of signal %d (%s)%s",
|
|
From: Jeremy F. <je...@go...> - 2005-02-23 02:22:36
|
CVS commit by fitzhardinge:
Don't loop infinitely if it looks like we've lost the siginfo from
a signal. If this happens, it generally means some other program has
consumed the user's full quota of siginfo structures, and Valgrind can't
live without them.
BUGS:99142
M +8 -0 vg_signals.c 1.129
--- valgrind/coregrind/vg_signals.c #1.128:1.129
@@ -1728,4 +1728,12 @@ void vg_sync_signalhandler ( Int sigNo,
Bool resume_sched = False;
+ if (info->_sifields._kill._pid == 0) {
+ VG_(message)(Vg_UserMsg, "Signal %d (%s) appears to have lost its siginfo; I can't go on.",
+ sigNo, signame(sigNo));
+ VG_(message)(Vg_UserMsg, " This may be because one of your programs has consumed your");
+ VG_(message)(Vg_UserMsg, " ration of siginfo structures.");
+ exit(99);
+ }
+
if (VG_(threads)[tid].status == VgTs_WaitSys) {
/* This is like a normal async signal, but we can't simply
|
|
From: Jeremy F. <je...@go...> - 2005-02-23 07:17:36
|
CVS commit by fitzhardinge:
Report lost siginfos a bit more idiomatically.
M +24 -8 vg_signals.c 1.130
--- valgrind/coregrind/vg_signals.c #1.129:1.130
@@ -1728,12 +1728,4 @@ void vg_sync_signalhandler ( Int sigNo,
Bool resume_sched = False;
- if (info->_sifields._kill._pid == 0) {
- VG_(message)(Vg_UserMsg, "Signal %d (%s) appears to have lost its siginfo; I can't go on.",
- sigNo, signame(sigNo));
- VG_(message)(Vg_UserMsg, " This may be because one of your programs has consumed your");
- VG_(message)(Vg_UserMsg, " ration of siginfo structures.");
- exit(99);
- }
-
if (VG_(threads)[tid].status == VgTs_WaitSys) {
/* This is like a normal async signal, but we can't simply
@@ -1749,4 +1741,28 @@ void vg_sync_signalhandler ( Int sigNo,
}
+ if (info->_sifields._kill._pid == 0) {
+ /* There's a per-user limit of pending siginfo signals. If
+ you exceed this, by having more than that number of
+ pending signals with siginfo, then new signals are
+ delivered without siginfo. This condition can be caused
+ by any unrelated program you're running at the same time
+ as Valgrind, if it has a large number of pending siginfo
+ signals which it isn't taking delivery of.
+
+ Since we depend on siginfo to work out why we were sent a
+ signal and what we should do about it, we really can't
+ continue unless we get it. */
+ VG_(message)(Vg_UserMsg, "Signal %d (%s) appears to have lost its siginfo; I can't go on.",
+ sigNo, signame(sigNo));
+ VG_(message)(Vg_UserMsg, " This may be because one of your programs has consumed your");
+ VG_(message)(Vg_UserMsg, " ration of siginfo structures.");
+
+ /* It's a fatal signal, so we force the default handler. */
+ VG_(set_default_handler)(sigNo);
+ VG_(deliver_signal)(tid, info);
+ VG_(resume_scheduler)(tid);
+ exit(99); /* If we can't resume, then just exit */
+ }
+
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg, "Routing user-sent sync signal %d via queue; resume_sched=%d",
|
|
From: Jeremy F. <je...@go...> - 2005-02-25 05:32:25
|
CVS commit by fitzhardinge:
Fix a long-standing bug in the core-dumper, revealed by the
avoid-device-maps patch.
M +15 -12 vg_signals.c 1.132
--- valgrind/coregrind/vg_signals.c #1.131:1.132
@@ -1096,5 +1096,5 @@ static void make_coredump(ThreadId tid,
Elf32_Phdr *phdrs;
Int num_phdrs;
- Int i;
+ Int i, idx;
UInt off;
struct note *notelist, *note;
@@ -1186,13 +1186,15 @@ static void make_coredump(ThreadId tid,
off = PGROUNDUP(off);
- for(seg = VG_(first_segment)(), i = 1;
+ for(seg = VG_(first_segment)(), idx = 1;
seg != NULL;
- seg = VG_(next_segment)(seg), i++) {
+ seg = VG_(next_segment)(seg)) {
if (!may_dump(seg))
continue;
- fill_phdr(&phdrs[i], seg, off, (seg->len + off) < max_size);
+ fill_phdr(&phdrs[idx], seg, off, (seg->len + off) < max_size);
- off += phdrs[i].p_filesz;
+ off += phdrs[idx].p_filesz;
+
+ idx++;
}
@@ -1206,18 +1208,19 @@ static void make_coredump(ThreadId tid,
VG_(lseek)(core_fd, phdrs[1].p_offset, VKI_SEEK_SET);
- for(seg = VG_(first_segment)(), i = 1;
+ for(seg = VG_(first_segment)(), idx = 1;
seg != NULL;
- seg = VG_(next_segment)(seg), i++) {
+ seg = VG_(next_segment)(seg)) {
if (!should_dump(seg))
continue;
- if (phdrs[i].p_filesz > 0) {
+ if (phdrs[idx].p_filesz > 0) {
Int ret;
- vg_assert(VG_(lseek)(core_fd, phdrs[i].p_offset, VKI_SEEK_SET) == phdrs[i].p_offset);
+ vg_assert(VG_(lseek)(core_fd, phdrs[idx].p_offset, VKI_SEEK_SET) == phdrs[idx].p_offset);
+ vg_assert(seg->len >= phdrs[idx].p_filesz);
- vg_assert(seg->len >= phdrs[i].p_filesz);
- ret = VG_(write)(core_fd, (void *)seg->addr, phdrs[i].p_filesz);
+ ret = VG_(write)(core_fd, (void *)seg->addr, phdrs[idx].p_filesz);
}
+ idx++;
}
|
|
From: Jeremy F. <je...@go...> - 2005-02-25 05:33:59
|
CVS commit by fitzhardinge:
Allocate enough space for the terminal \0 when allocating space for the filename.
M +1 -1 vg_procselfmaps.c 1.19
--- valgrind/coregrind/vg_procselfmaps.c #1.18:1.19
@@ -241,5 +241,5 @@ void VG_(parse_procselfmaps) (
/* Minor hack: put a '\0' at the filename end for the call to
`record_mapping', then restore the old char with `tmp'. */
- filename = VG_(arena_malloc)(VG_AR_CORE, i_eol-i);
+ filename = VG_(arena_malloc)(VG_AR_CORE, i_eol-i+1);
VG_(memcpy)(filename, &procmap_buf[i], i_eol-i);
filename[i_eol - i] = '\0';
|
|
From: Jeremy F. <je...@go...> - 2005-02-25 05:35:46
|
CVS commit by fitzhardinge:
Close a couple of small leaks.
M +2 -0 ume.c 1.40
M +1 -0 vg_main.c 1.253
--- valgrind/coregrind/vg_main.c #1.252:1.253
@@ -810,4 +810,5 @@ static char **fix_environment(char **ori
}
+ free(inject_path);
ret[envc] = NULL;
--- valgrind/coregrind/ume.c #1.39:1.40
@@ -420,4 +420,5 @@ static int load_ELF(char *hdr, int len,
info->interp_base = (ESZ(Addr))base;
+ free(interp->p);
free(interp);
} else
@@ -429,4 +430,5 @@ static int load_ELF(char *hdr, int len,
info->init_eip = (Addr)entry;
+ free(e->p);
free(e);
|
|
From: Jeremy F. <je...@go...> - 2005-02-28 01:13:07
|
CVS commit by fitzhardinge:
Insert MALLOCLIKE/FREELIKE instrumentation into VG_(arena_*) functions.
I'm not convinced this is correct yet, but it works enough.
This change includes some fixes for problems found while running V
under memcheck.
M +11 -4 stage1.c 1.33
M +11 -3 vg_main.c 1.254
M +17 -8 vg_malloc2.c 1.35
M +1 -0 vg_signals.c 1.134
M +4 -4 vg_symtab2.c 1.104
--- valgrind/coregrind/vg_malloc2.c #1.34:1.35
@@ -32,4 +32,5 @@
#include "core.h"
+#include "memcheck/memcheck.h"
//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
@@ -479,4 +480,5 @@ Superblock* newSuperblock ( Arena* a, Si
}
vg_assert(NULL != sb);
+ VALGRIND_DISCARD(VALGRIND_MAKE_WRITABLE(sb, cszB));
vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
sb->n_payload_bytes = cszB - sizeof(Superblock);
@@ -867,4 +869,5 @@ void mkFreeBlock ( Arena* a, Block* b, S
SizeT pszB = bszB_to_pszB(a, bszB);
vg_assert(b_lno == pszB_to_listNo(pszB));
+ VALGRIND_DISCARD(VALGRIND_MAKE_WRITABLE(b, bszB));
// Set the size fields and indicate not-in-use.
set_bszB_lo(b, mk_free_bszB(bszB));
@@ -896,4 +899,5 @@ void mkInuseBlock ( Arena* a, Block* b,
UInt i;
vg_assert(bszB >= min_useful_bszB(a));
+ VALGRIND_MAKE_WRITABLE(b, bszB);
set_bszB_lo(b, mk_inuse_bszB(bszB));
set_bszB_hi(b, mk_inuse_bszB(bszB));
@@ -1030,4 +1034,6 @@ void* VG_(arena_malloc) ( ArenaId aid, S
v = get_block_payload(a, b);
vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
+
+ VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
return v;
}
@@ -1125,4 +1131,6 @@ void VG_(arena_free) ( ArenaId aid, void
# endif
+ VALGRIND_FREELIKE_BLOCK(ptr, 0);
+
VGP_POPCC(VgpMalloc);
}
@@ -1251,4 +1259,7 @@ void* VG_(arena_malloc_aligned) ( ArenaI
vg_assert( (((Addr)align_p) % req_alignB) == 0 );
+
+ VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
+
return align_p;
}
@@ -1269,5 +1280,4 @@ SizeT VG_(arena_payload_szB) ( ArenaId a
void* VG_(arena_calloc) ( ArenaId aid, SizeT alignB, SizeT nmemb, SizeT nbytes )
{
- UInt i;
SizeT size;
UChar* p;
@@ -1283,5 +1293,7 @@ void* VG_(arena_calloc) ( ArenaId aid, S
p = VG_(arena_malloc_aligned) ( aid, alignB, size );
- for (i = 0; i < size; i++) p[i] = 0;
+ VG_(memset)(p, 0, nbytes);
+
+ VALGRIND_MALLOCLIKE_BLOCK(p, nbytes, 0, True);
VGP_POPCC(VgpMalloc);
@@ -1296,6 +1308,5 @@ void* VG_(arena_realloc) ( ArenaId aid,
Arena* a;
SizeT old_bszB, old_pszB;
- UInt i;
- UChar *p_old, *p_new;
+ UChar *p_new;
Block* b;
@@ -1326,9 +1337,7 @@ void* VG_(arena_realloc) ( ArenaId aid,
}
- p_old = (UChar*)ptr;
- for (i = 0; i < old_pszB; i++)
- p_new[i] = p_old[i];
+ VG_(memcpy)(p_new, ptr, old_pszB);
- VG_(arena_free)(aid, p_old);
+ VG_(arena_free)(aid, ptr);
VGP_POPCC(VgpMalloc);
--- valgrind/coregrind/vg_main.c #1.253:1.254
@@ -46,4 +46,6 @@
#include <unistd.h>
+#include "memcheck/memcheck.h"
+
#ifndef AT_DCACHEBSIZE
#define AT_DCACHEBSIZE 19
@@ -1178,5 +1180,5 @@ static void load_tool( const char *tooln
{
Bool ok;
- int len = strlen(VG_(libdir)) + strlen(toolname)*2 + 16;
+ int len = strlen(VG_(libdir)) + strlen(toolname) + 16;
char buf[len];
void* handle;
@@ -2184,8 +2186,12 @@ static void build_segment_map_callback (
vg_assert(0 != r_esp);
if (is_stack_segment) {
- if (0)
- VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
+ if (0) {
+ VG_(message)(Vg_DebugMsg, "invalidating stack area: %p .. %p",
start,r_esp);
+ VG_(message)(Vg_DebugMsg, " validating stack area: %p .. %p",
+ r_esp, start+size);
+ }
VG_TRACK( die_mem_stack, start, r_esp-start );
+ VG_TRACK( post_mem_write, r_esp, (start+size)-r_esp );
}
}
@@ -2466,4 +2473,5 @@ int main(int argc, char **argv, char **e
sp_at_startup = setup_client_stack(init_sp, cl_argv, env, &info,
&client_auxv);
+ free(env);
}
--- valgrind/coregrind/stage1.c #1.32:1.33
@@ -44,4 +44,5 @@
#include "core.h"
#include "ume.h"
+#include "memcheck/memcheck.h"
static int stack[SIGSTKSZ*4];
@@ -304,8 +305,5 @@ int main(int argc, char** argv)
{
struct rlimit rlim;
- const char *cp = getenv(VALGRINDLIB);
-
- if (cp != NULL)
- valgrind_lib = cp;
+ const char *cp;
// Initial stack pointer is to argc, which is immediately before argv[0]
@@ -313,4 +311,13 @@ int main(int argc, char** argv)
init_sp = argv - 1;
+ /* The Linux libc startup sequence leaves this in an apparently
+ undefined state, but it really is defined, so mark it so. */
+ VALGRIND_MAKE_READABLE(init_sp, sizeof(int));
+
+ cp = getenv(VALGRINDLIB);
+
+ if (cp != NULL)
+ valgrind_lib = cp;
+
/* Set the address space limit as high as it will go, since we make
a lot of very large mappings. */
--- valgrind/coregrind/vg_symtab2.c #1.103:1.104
@@ -1106,5 +1106,5 @@ static
Addr find_debug_file( Char* objpath, Char* debugname, UInt crc, UInt* size )
{
- Char *objdir = VG_(strdup)(objpath);
+ Char *objdir = VG_(arena_strdup)(VG_AR_SYMTAB, objpath);
Char *objdirptr;
Char *debugpath;
@@ -1114,5 +1114,5 @@ Addr find_debug_file( Char* objpath, Cha
*objdirptr = '\0';
- debugpath = VG_(malloc)(VG_(strlen)(objdir) + VG_(strlen)(debugname) + 16);
+ debugpath = VG_(arena_malloc)(VG_AR_SYMTAB, VG_(strlen)(objdir) + VG_(strlen)(debugname) + 16);
VG_(sprintf)(debugpath, "%s/%s", objdir, debugname);
@@ -1126,6 +1126,6 @@ Addr find_debug_file( Char* objpath, Cha
}
- VG_(free)(debugpath);
- VG_(free)(objdir);
+ VG_(arena_free)(VG_AR_SYMTAB, debugpath);
+ VG_(arena_free)(VG_AR_SYMTAB, objdir);
return addr;
--- valgrind/coregrind/vg_signals.c #1.133:1.134
@@ -2080,4 +2080,5 @@ void VG_(sigstartup_actions) ( void )
tsa.ksa_handler = (void *)vg_sync_signalhandler;
tsa.sa_flags = VKI_SA_SIGINFO;
+ tsa.sa_restorer = 0;
VG_(sigfillset)(&tsa.sa_mask);
|
|
From: Jeremy F. <je...@go...> - 2005-02-28 01:07:54
|
CVS commit by fitzhardinge:
Fix the sigsuspend syscalls, by making sure the change of the signal
mask is atomic with respect to the syscall blocking. This should fix
the problems with LinuxThreads programs hanging.
M +14 -16 vg_signals.c 1.133
M +1 -5 vg_syscalls.c 1.255
--- valgrind/coregrind/vg_syscalls.c #1.254:1.255
@@ -5461,5 +5461,4 @@ PRE(sys_sigsuspend, MayBlock)
int, history0, int, history1,
vki_old_sigset_t, mask);
- convert_sigset_to_rt((vki_old_sigset_t *)arg3, &tst->eff_sig_mask);
}
@@ -5475,8 +5474,6 @@ PRE(sys_rt_sigsuspend, MayBlock)
PRINT("sys_rt_sigsuspend ( %p, %d )", arg1,arg2 );
PRE_REG_READ2(int, "rt_sigsuspend", vki_sigset_t *, mask, vki_size_t, size)
- if (arg1 != (Addr)NULL) {
+ if (arg1 != (Addr)NULL)
SYS_PRE_MEM_READ( "rt_sigsuspend(mask)", arg1, sizeof(vki_sigset_t) );
- tst->eff_sig_mask = *(vki_sigset_t *)arg1;
- }
}
--- valgrind/coregrind/vg_signals.c #1.132:1.133
@@ -1621,4 +1621,10 @@ void vg_async_signalhandler ( Int sigNo,
sigNo, tid, info->si_code);
+ /* Update the thread's effective signal mask. The only syscall
+ this should apply to is sigsuspend, which has a temporary signal
+ mask set for signals delivered while it is blocked. The signal
+ handler will restore this on signal return. */
+ tst->eff_sig_mask = uc->uc_sigmask;
+
/* Update thread state properly */
VGA_(interrupted_syscall)(tid, uc,
@@ -1729,17 +1735,11 @@ void vg_sync_signalhandler ( Int sigNo,
Action: make thread runnable, queue signal, resume scheduler
*/
- Bool resume_sched = False;
-
if (VG_(threads)[tid].status == VgTs_WaitSys) {
- /* This is like a normal async signal, but we can't simply
- call async_signalhandler because the client may actually
- have the signal blocked, so we still need to queue it. */
- resume_sched = True;
-
- VG_(set_running)(tid);
-
- /* Update thread state properly */
- VGA_(interrupted_syscall)(tid, uc,
- !!(vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART));
+ /* Since this signal interrupted a syscall, it means the
+ client's signal mask was applied, so we can't get here
+ unless the client wants this signal right now. This means
+ we can simply use the async_signalhandler. */
+ vg_async_signalhandler(sigNo, info, uc);
+ VG_(core_panic)("vg_async_signalhandler returned!?\n");
}
@@ -1769,6 +1769,6 @@ void vg_sync_signalhandler ( Int sigNo,
if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg, "Routing user-sent sync signal %d via queue; resume_sched=%d",
- sigNo, resume_sched);
+ VG_(message)(Vg_DebugMsg, "Routing user-sent sync signal %d via queue",
+ sigNo);
/* Since every thread has these signals unblocked, we can't rely
@@ -1780,6 +1780,4 @@ void vg_sync_signalhandler ( Int sigNo,
queue_signal(0, info); /* shared pending */
- if (resume_sched)
- VG_(resume_scheduler)(tid);
return;
}
|
|
From: Jeremy F. <je...@go...> - 2005-02-28 06:56:56
|
CVS commit by fitzhardinge:
Fix assertion; padfile can == 0
M +1 -1 vg_main.c 1.255
--- valgrind/coregrind/vg_main.c #1.254:1.255
@@ -1403,5 +1403,5 @@ void as_unpad(void *start, void *end, in
int res;
- vg_assert(padfile > 0);
+ vg_assert(padfile >= 0);
res = fstat(padfile, &padstat);
|
|
From: Jeremy F. <je...@go...> - 2005-02-28 15:59:43
|
CVS commit by fitzhardinge:
Fix leak of Valgrind-internal thread stacks. This was leaking 64k
each time a thread exited.
BUGS: 100036
M +1 -1 vg_scheduler.c 1.225
--- valgrind/coregrind/vg_scheduler.c #1.224:1.225
@@ -513,5 +513,5 @@ void mostly_clear_thread_record ( Thread
VG_(sigemptyset)(&VG_(threads)[tid].eff_sig_mask);
- VGA_(os_state_init)(&VG_(threads)[tid]);
+ VGA_(os_state_clear)(&VG_(threads)[tid]);
/* start with no altstack */
|
|
From: Tom H. <th...@cy...> - 2005-02-28 16:00:46
|
CVS commit by thughes:
First stab at getting the new thread modelling code to report errors
properly through the main error reporting system.
M +7 -0 core.h 1.92
M +17 -12 vg_errcontext.c 1.68
M +1 -0 vg_pthreadmodel.c 1.2
M +160 -10 vg_threadmodel.c 1.3
--- valgrind/coregrind/core.h #1.91:1.92
@@ -1030,4 +1030,5 @@ typedef
enum {
ThreadErr = -1, // Thread error
+ MutexErr = -2, // Mutex error
}
CoreErrorKind;
@@ -1869,4 +1870,10 @@ extern void VG_(tm_mutex_unlock) (Thread
extern Bool VG_(tm_mutex_exists) (Addr mutexp);
+extern UInt VG_(tm_error_update_extra) (Error *err);
+extern Bool VG_(tm_error_equal) (VgRes res, Error *e1, Error *e2);
+extern void VG_(tm_error_print) (Error *err);
+
+extern void VG_(tm_init) ();
+
/* ----- pthreads ----- */
extern void VG_(pthread_init) ();
--- valgrind/coregrind/vg_errcontext.c #1.67:1.68
@@ -202,10 +202,7 @@ static Bool eq_Error ( VgRes res, Error*
switch (e1->ekind) {
case ThreadErr:
+ case MutexErr:
vg_assert(VG_(needs).core_errors);
- if (e1->string == e2->string)
- return True;
- if (0 == VG_(strcmp)(e1->string, e2->string))
- return True;
- return False;
+ return VG_(tm_error_equal)(res, e1, e2);
default:
if (VG_(needs).skin_errors)
@@ -229,7 +226,7 @@ static void pp_Error ( Error* err, Bool
switch (err->ekind) {
case ThreadErr:
+ case MutexErr:
vg_assert(VG_(needs).core_errors);
- VG_(message)(Vg_UserMsg, "%s", err->string );
- VG_(pp_ExeContext)(err->where);
+ VG_(tm_error_print)(err);
break;
default:
@@ -330,5 +327,5 @@ static void gen_suppression(Error* err)
VG_(printf)(" <insert a suppression name here>\n");
- if (ThreadErr == err->ekind) {
+ if (ThreadErr == err->ekind || MutexErr == err->ekind) {
VG_(printf)(" core:PThread\n");
@@ -516,7 +513,15 @@ void VG_(maybe_record_error) ( ThreadId
*p = err;
- /* update `extra', for non-core errors (core ones don't use 'extra') */
- if (VG_(needs).skin_errors && ThreadErr != ekind) {
+ /* update `extra' */
+ if (VG_(needs).skin_errors) {
+ switch (ekind) {
+ case ThreadErr:
+ case MutexErr:
+ extra_size = VG_(tm_error_update_extra)(p);
+ break;
+ default:
extra_size = SK_(update_extra)(p);
+ break;
+ }
/* copy block pointed to by `extra', if there is one */
@@ -934,5 +939,5 @@ Bool supp_matches_error(Supp* su, Error*
switch (su->skind) {
case PThreadSupp:
- return (err->ekind == ThreadErr);
+ return (err->ekind == ThreadErr || err->ekind == MutexErr);
default:
if (VG_(needs).skin_errors) {
--- valgrind/coregrind/vg_pthreadmodel.c #1.1:1.2
@@ -461,4 +461,5 @@ void VG_(pthread_init)()
VG_(add_wrapper)("soname:libpthread.so.0", wraps[i].name, &wraps[i].wrapper);
}
+ VG_(tm_init)();
VG_(tm_thread_create)(VG_INVALID_THREADID, VG_(master_tid), True);
}
--- valgrind/coregrind/vg_threadmodel.c #1.2:1.3
@@ -114,4 +114,11 @@ enum thread_error
};
+struct thread_error_data
+{
+ enum thread_error err;
+ struct thread *th;
+ const Char *action;
+};
+
static const Char *pp_threadstate(const struct thread *th)
{
@@ -248,6 +255,7 @@ static struct thread *thread_get(ThreadI
static void thread_report(ThreadId tid, enum thread_error err, const Char *action)
{
+ Char *errstr = "?";
struct thread *th = thread_get(tid);
- const Char *errstr = "?";
+ struct thread_error_data errdata;
switch(err) {
@@ -260,6 +268,24 @@ static void thread_report(ThreadId tid,
}
- VG_(printf)("*** thread problem: tid=%d(%s) err=\"%s\" action=\"%s\"\n",
- tid, pp_threadstate(th), errstr, action);
+ errdata.err = err;
+ errdata.th = th;
+ errdata.action = action;
+
+ VG_(maybe_record_error)(tid, ThreadErr, 0, errstr, &errdata);
+}
+
+static void pp_thread_error(Error *err)
+{
+ struct thread_error_data *errdata = VG_(get_error_extra)(err);
+ struct thread *th = errdata->th;
+ Char *errstr = VG_(get_error_string)(err);
+
+ VG_(message)(Vg_UserMsg, "Found %s thread in state %s while %s\n",
+ errstr, pp_threadstate(th), errdata->action);
+ VG_(pp_ExeContext)(VG_(get_error_where)(err));
+
+ VG_(message)(Vg_UserMsg, "Thread was %s",
+ th->state == TS_Dead ? "destroyed" : "created");
+ VG_(pp_ExeContext)(th->ec_created);
}
@@ -526,5 +552,5 @@ struct mutex
};
-enum mutex_err
+enum mutex_error
{
MXE_NotExist, /* never existed */
@@ -537,4 +563,11 @@ enum mutex_err
};
+struct mutex_error_data
+{
+ enum mutex_error err;
+ struct mutex *mx;
+ const Char *action;
+};
+
static struct mutex *mx_get(Addr mutexp);
@@ -585,8 +618,9 @@ static void mutex_setstate(ThreadId tid,
}
-static void mutex_report(ThreadId tid, Addr mutexp, enum mutex_err err, const Char *action)
+static void mutex_report(ThreadId tid, Addr mutexp, enum mutex_error err, const Char *action)
{
- const Char *errstr="?";
+ Char *errstr="?";
struct mutex *mx = mx_get(mutexp);
+ struct mutex_error_data errdata;
switch(err) {
@@ -597,10 +631,45 @@ static void mutex_report(ThreadId tid, A
case MXE_Locked: errstr="locked"; break;
case MXE_NotOwner: errstr="unowned"; break;
- case MXE_Deadlock: errstr="deadlock"; break;
+ case MXE_Deadlock: errstr="deadlock on"; break;
}
- /* report a mutex-related error */
- VG_(printf)("*** mutex error: thread=%d found %s mutex %p(state=%s) while %s\n",
- tid, errstr, mutexp, mx ? pp_mutexstate(mx) : (const Char *)"-", action);
+ errdata.err = err;
+ errdata.mx = mx;
+ errdata.action = action;
+
+ VG_(maybe_record_error)(tid, MutexErr, 0, errstr, &errdata);
+}
+
+static void pp_mutex_error(Error *err)
+{
+ struct mutex_error_data *errdata = VG_(get_error_extra)(err);
+ struct mutex *mx = errdata->mx;
+ Char *errstr = VG_(get_error_string)(err);
+
+ VG_(message)(Vg_UserMsg, "Found %s mutex %p while %s\n",
+ errstr, mx ? mx->mutex : 0, errdata->action);
+ VG_(pp_ExeContext)(VG_(get_error_where)(err));
+
+ switch (mx->state) {
+ case MX_Init:
+ case MX_Dead:
+ break;
+ case MX_Locked:
+ VG_(message)(Vg_UserMsg, "Mutex was locked by thread %d", mx->owner);
+ VG_(pp_ExeContext)(mx->ec_locked);
+ break;
+ case MX_Unlocking:
+ VG_(message)(Vg_UserMsg, "Mutex being unlocked");
+ VG_(pp_ExeContext)(mx->ec_locked);
+ break;
+ case MX_Free:
+ VG_(message)(Vg_UserMsg, "Mutex was unlocked");
+ VG_(pp_ExeContext)(mx->ec_locked);
+ break;
+ }
+
+ VG_(message)(Vg_UserMsg, "Mutex was %s",
+ mx->state == MX_Dead ? "destroyed" : "created");
+ VG_(pp_ExeContext)(mx->ec_create);
}
@@ -935,2 +1004,83 @@ void VG_(tm_cond_signal)(ThreadId tid, v
}
+/* --------------------------------------------------
+ Error handling
+ -------------------------------------------------- */
+
+UInt VG_(tm_error_update_extra)(Error *err)
+{
+ switch (VG_(get_error_kind)(err)) {
+ case ThreadErr: {
+ struct thread_error_data *errdata = VG_(get_error_extra)(err);
+ struct thread *new_th = VG_(malloc)(sizeof(struct thread));
+
+ VG_(memcpy)(new_th, errdata->th, sizeof(struct thread));
+
+ errdata->th = new_th;
+
+ return sizeof(struct thread_error_data);
+ }
+
+ case MutexErr: {
+ struct mutex_error_data *errdata = VG_(get_error_extra)(err);
+ struct mutex *new_mx = VG_(malloc)(sizeof(struct mutex));
+
+ VG_(memcpy)(new_mx, errdata->mx, sizeof(struct mutex));
+
+ errdata->mx = new_mx;
+
+ return sizeof(struct mutex_error_data);
+ }
+
+ default:
+ return 0;
+ }
+}
+
+Bool VG_(tm_error_equal)(VgRes res, Error *e1, Error *e2)
+{
+ /* Guaranteed by calling function */
+ vg_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
+
+ switch (VG_(get_error_kind)(e1)) {
+ case ThreadErr: {
+ struct thread_error_data *errdata1 = VG_(get_error_extra)(e1);
+ struct thread_error_data *errdata2 = VG_(get_error_extra)(e2);
+
+ return errdata1->err == errdata2->err;
+ }
+
+ case MutexErr: {
+ struct mutex_error_data *errdata1 = VG_(get_error_extra)(e1);
+ struct mutex_error_data *errdata2 = VG_(get_error_extra)(e2);
+
+ return errdata1->err == errdata2->err;
+ }
+
+ default:
+ VG_(printf)("Error:\n unknown error code %d\n",
+ VG_(get_error_kind)(e1));
+ VG_(core_panic)("unknown error code in VG_(tm_error_equal)");
+ }
+}
+
+void VG_(tm_error_print)(Error *err)
+{
+ switch (VG_(get_error_kind)(err)) {
+ case ThreadErr:
+ pp_thread_error(err);
+ break;
+ case MutexErr:
+ pp_mutex_error(err);
+ break;
+ }
+}
+
+/* --------------------------------------------------
+ Initialisation
+ -------------------------------------------------- */
+
+void VG_(tm_init)()
+{
+ VG_(needs_core_errors)();
+}
|
|
From: Jeremy F. <je...@go...> - 2005-02-28 16:00:16
|
CVS commit by fitzhardinge:
Using nanosleep in yield was making it very slow; sched_yield seems better.
M +5 -7 vg_scheduler.c 1.226
--- valgrind/coregrind/vg_scheduler.c #1.225:1.226
@@ -403,12 +403,10 @@ void VG_(vg_yield)(void)
/*
Tell the kernel we're yielding.
-
- Use a short sleep rather than an actual sched_yield, because we
- don't want the kernel to give up on us forever.
-
- (This should probably be a no-op if we haven't started more
- than one thread, but it probably doesn't make much difference.)
*/
+ if (1)
+ VG_(do_syscall)(__NR_sched_yield);
+ else
VG_(nanosleep)(&ts);
+
VG_(set_running)(tid);
|
|
From: Jeremy F. <je...@go...> - 2005-03-01 01:17:02
|
CVS commit by fitzhardinge:
Put vg_intercept.o into the right product.
M +1 -2 Makefile.am 1.110
--- valgrind/coregrind/Makefile.am #1.109:1.110
@@ -56,5 +56,4 @@
vg_hashtable.c \
vg_instrument.c \
- vg_intercept.c \
vg_main.c \
vg_malloc2.c \
@@ -127,5 +126,5 @@
$(PERL) $(srcdir)/gen_toolint.pl struct < $(srcdir)/toolfuncs.def >> $@ || rm -f $@
-vg_inject_so_SOURCES =
+vg_inject_so_SOURCES = vg_intercept.c
vg_inject_so_CFLAGS = $(AM_CFLAGS) -fpic
vg_inject_so_LDADD = -ldl
|
|
From: Jeremy F. <je...@go...> - 2005-03-01 06:20:23
|
CVS commit by fitzhardinge:
Fix sigsuspend properly. This version should keep everyone happy. eff_sig_mask
has been replaced by tmp_sig_mask; unlike eff_sig_mask, tmp_sig_mask has a very
limit scope, and is just used to communicate the temp signal mask between the
sigsuspend syscall and any signal handlers invoked.
M +7 -6 core.h 1.93
M +1 -1 vg_scheduler.c 1.227
M +21 -22 vg_signals.c 1.135
M +12 -6 vg_syscalls.c 1.257
M +1 -1 linux/core_os.c 1.9
M +3 -3 x86/signal.c 1.16
M +2 -2 x86-linux/syscalls.c 1.26
--- valgrind/coregrind/vg_syscalls.c #1.256:1.257
@@ -1821,5 +1821,5 @@ PRE(sys_execve, Special)
;
- VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->eff_sig_mask, NULL);
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
}
@@ -5499,5 +5499,4 @@ PRE(sys_pause, MayBlock)
}
-// XXX: x86-specific
PRE(sys_sigsuspend, MayBlock)
{
@@ -5515,7 +5514,7 @@ PRE(sys_sigsuspend, MayBlock)
int, history0, int, history1,
vki_old_sigset_t, mask);
+ convert_sigset_to_rt((const vki_old_sigset_t *)arg3, &tst->tmp_sig_mask);
}
-// XXX: x86-specific
PRE(sys_rt_sigsuspend, MayBlock)
{
@@ -5527,7 +5526,9 @@ PRE(sys_rt_sigsuspend, MayBlock)
*/
PRINT("sys_rt_sigsuspend ( %p, %d )", arg1,arg2 );
- PRE_REG_READ2(int, "rt_sigsuspend", vki_sigset_t *, mask, vki_size_t, size)
- if (arg1 != (Addr)NULL)
+ PRE_REG_READ2(int, "rt_sigsuspend", vki_sigset_t *, mask, vki_size_t, size);
+ if (arg1 != (Addr)NULL) {
SYS_PRE_MEM_READ( "rt_sigsuspend(mask)", arg1, sizeof(vki_sigset_t) );
+ tst->tmp_sig_mask = *(vki_sigset_t *)arg1;
+ }
}
@@ -6099,4 +6100,9 @@ void VG_(client_syscall) ( ThreadId tid
tst->syscallno = syscallno;
+ /* Make sure the tmp signal mask matches the real signal
+ mask; sigsuspend may change this. */
+ vg_assert(tst->sig_mask.sig[0] == tst->tmp_sig_mask.sig[0]);
+ vg_assert(tst->sig_mask.sig[1] == tst->tmp_sig_mask.sig[1]);
+
sys = get_syscall_entry(syscallno);
flags = *(sys->flags_ptr);
@@ -6149,5 +6155,5 @@ void VG_(client_syscall) ( ThreadId tid
PRINT(" --> ...\n");
- mask = tst->eff_sig_mask;
+ mask = tst->sig_mask;
VG_(sanitize_client_sigmask)(tid, &mask);
--- valgrind/coregrind/core.h #1.92:1.93
@@ -632,10 +632,11 @@ struct _ThreadState {
vki_sigset_t sig_mask;
- /* Effective signal mask, eff_sig_mask, is usually identical to
- sig_mask, except when running sigsuspend. sigsuspend sets a
- temporary signal mask while it runs, which is retained while any
- signal handler is run; sig_mask comes into effect once the
- handler has finished. */
- vki_sigset_t eff_sig_mask;
+ /* tmp_sig_mask is usually the same as sig_mask, and is kept in
+ sync whenever sig_mask is changed. The only time they have
+ different values is during the execution of a sigsuspend, where
+ tmp_sig_mask is the temporary mask which sigsuspend installs.
+ It is only consulted to compute the signal mask applied to a
+ signal handler. */
+ vki_sigset_t tmp_sig_mask;
/* A little signal queue for signals we can't get the kernel to
--- valgrind/coregrind/vg_signals.c #1.134:1.135
@@ -673,5 +673,5 @@ void do_setmask ( ThreadId tid,
vg_assert(VG_(is_valid_tid)(tid));
if (oldset) {
- *oldset = VG_(threads)[tid].eff_sig_mask;
+ *oldset = VG_(threads)[tid].sig_mask;
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugExtraMsg,
@@ -683,5 +683,5 @@ void do_setmask ( ThreadId tid,
VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
- VG_(threads)[tid].eff_sig_mask = VG_(threads)[tid].sig_mask;
+ VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
}
}
@@ -820,5 +820,5 @@ void vg_push_signal_frame ( ThreadId tid
vg_scss.scss_per_sig[sigNo].scss_handler,
vg_scss.scss_per_sig[sigNo].scss_flags,
- &vg_scss.scss_per_sig[sigNo].scss_mask,
+ &tst->sig_mask,
vg_scss.scss_per_sig[sigNo].scss_restorer);
}
@@ -1413,5 +1413,5 @@ static void synth_fault_common(ThreadId
/* If they're trying to block the signal, force it to be delivered */
- if (VG_(sigismember)(&VG_(threads)[tid].eff_sig_mask, VKI_SIGSEGV))
+ if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
VG_(set_default_handler)(VKI_SIGSEGV);
@@ -1503,13 +1503,18 @@ void VG_(deliver_signal) ( ThreadId tid,
}
- /* Handler gets the union of the signal's mask and the thread's
- mask. The original sigmask has already been saved in the
- signal frame, and will be restored on signal return. */
- VG_(sigaddset_from_set)(&tst->eff_sig_mask, &handler->scss_mask);
- VG_(sigaddset_from_set)(&tst->eff_sig_mask, &VG_(threads)[tid].sig_mask);
+ /* At this point:
+ tst->sig_mask is the current signal mask
+ tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
+ handler->scss_mask is the mask set by the handler
- /* also mask this signal, unless they ask us not to */
- if (!(handler->scss_flags & VKI_SA_NOMASK))
- VG_(sigaddset)(&tst->eff_sig_mask, sigNo);
+ Handler gets a mask of tmp_sig_mask|handler_mask|signo
+ */
+ tst->sig_mask = tst->tmp_sig_mask;
+ if (!(handler->scss_flags & VKI_SA_NOMASK)) {
+ VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
+ VG_(sigaddset)(&tst->sig_mask, sigNo);
+
+ tst->tmp_sig_mask = tst->sig_mask;
+ }
}
@@ -1621,10 +1626,4 @@ void vg_async_signalhandler ( Int sigNo,
sigNo, tid, info->si_code);
- /* Update the thread's effective signal mask. The only syscall
- this should apply to is sigsuspend, which has a temporary signal
- mask set for signals delivered while it is blocked. The signal
- handler will restore this on signal return. */
- tst->eff_sig_mask = uc->uc_sigmask;
-
/* Update thread state properly */
VGA_(interrupted_syscall)(tid, uc,
@@ -1859,5 +1858,5 @@ void vg_sync_signalhandler ( Int sigNo,
ThreadState *tst = VG_(get_ThreadState)(VG_(get_lwp_tid)(VG_(gettid)()));
- if (VG_(sigismember)(&tst->eff_sig_mask, sigNo)) {
+ if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
/* signal is blocked, but they're not allowed to block faults */
VG_(set_default_handler)(sigNo);
@@ -1985,5 +1984,5 @@ void VG_(poll_signals)(ThreadId tid)
/* look for all the signals this thread isn't blocking */
for(i = 0; i < _VKI_NSIG_WORDS; i++)
- pollset.sig[i] = ~tst->eff_sig_mask.sig[i];
+ pollset.sig[i] = ~tst->sig_mask.sig[i];
VG_(sigdelset)(&pollset, VKI_SIGVGCHLD); /* already dealt with */
@@ -2120,5 +2119,5 @@ void VG_(sigstartup_actions) ( void )
vg_assert(VG_(threads)[VG_(master_tid)].status == VgTs_Init);
VG_(threads)[VG_(master_tid)].sig_mask = saved_procmask;
- VG_(threads)[VG_(master_tid)].eff_sig_mask = saved_procmask;
+ VG_(threads)[VG_(master_tid)].tmp_sig_mask = saved_procmask;
/* Calculate SKSS and apply it. This also sets the initial kernel
--- valgrind/coregrind/vg_scheduler.c #1.226:1.227
@@ -509,5 +509,5 @@ void mostly_clear_thread_record ( Thread
VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
- VG_(sigemptyset)(&VG_(threads)[tid].eff_sig_mask);
+ VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
VGA_(os_state_clear)(&VG_(threads)[tid]);
--- valgrind/coregrind/x86-linux/syscalls.c #1.25:1.26
@@ -327,6 +327,6 @@ static Int do_clone(ThreadId ptid,
/* inherit signal mask */
- ctst->sig_mask = ptst->eff_sig_mask;
- ctst->eff_sig_mask = ctst->sig_mask;
+ ctst->sig_mask = ptst->sig_mask;
+ ctst->tmp_sig_mask = ptst->sig_mask;
/* We don't really know where the client stack is, because its
--- valgrind/coregrind/linux/core_os.c #1.8:1.9
@@ -125,5 +125,5 @@ void VGA_(final_tidyup)(ThreadId tid)
the thread's block state*/
VG_(sigprocmask)(VKI_SIG_BLOCK, NULL, &VG_(threads)[tid].sig_mask);
- VG_(threads)[tid].eff_sig_mask = VG_(threads)[tid].sig_mask;
+ VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
/* and restore handlers to default */
--- valgrind/coregrind/x86/signal.c #1.15:1.16
@@ -454,5 +454,5 @@ static Addr build_sigframe(ThreadState *
VG_(memcpy)(&frame->sigContext, &uc.uc_mcontext,
sizeof(struct vki_sigcontext));
- frame->sigContext.oldmask = tst->sig_mask.sig[0];
+ frame->sigContext.oldmask = mask->sig[0];
VG_TRACK( post_mem_write, esp, offsetof(struct sigframe, vg) );
@@ -554,5 +554,5 @@ static Bool restore_vg_sigframe(ThreadSt
tst->sig_mask = frame->mask;
- tst->eff_sig_mask = frame->mask;
+ tst->tmp_sig_mask = frame->mask;
if (VG_(needs).shadow_regs) {
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 01:01:04
|
CVS commit by fitzhardinge:
Add a --trace-redir=yes option for tracing redirections and function
wrapping events.
M +2 -0 core.h 1.94
M +2 -0 vg_main.c 1.257
M +11 -14 vg_redir.c 1.7
--- valgrind/coregrind/vg_main.c #1.256:1.257
@@ -1453,4 +1453,5 @@ Bool VG_(clo_trace_syscalls) = False;
Bool VG_(clo_trace_signals) = False;
Bool VG_(clo_trace_symtab) = False;
+Bool VG_(clo_trace_redir) = False;
Bool VG_(clo_trace_sched) = False;
Bool VG_(clo_trace_pthreads) = False;
@@ -1690,4 +1691,5 @@ static void process_cmd_line_options( UI
else VG_BOOL_CLO("--trace-signals", VG_(clo_trace_signals))
else VG_BOOL_CLO("--trace-symtab", VG_(clo_trace_symtab))
+ else VG_BOOL_CLO("--trace-redir", VG_(clo_trace_redir))
else VG_BOOL_CLO("--trace-syscalls", VG_(clo_trace_syscalls))
else VG_BOOL_CLO("--trace-pthreads", VG_(clo_trace_pthreads))
--- valgrind/coregrind/core.h #1.93:1.94
@@ -276,4 +276,6 @@ extern Bool VG_(clo_trace_signals);
/* DEBUG: print symtab details? default: NO */
extern Bool VG_(clo_trace_symtab);
+/* DEBUG: print redirection details? default: NO */
+extern Bool VG_(clo_trace_redir);
/* DEBUG: print thread scheduling events? default: NO */
extern Bool VG_(clo_trace_sched);
--- valgrind/coregrind/vg_redir.c #1.6:1.7
@@ -37,7 +37,4 @@
/*------------------------------------------------------------*/
-/* Set to True for debug printing. */
-static const Bool verbose_redir = False;
-
/*
wraps and redirections, indexed by from_addr
@@ -146,5 +143,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
resolved = from_resolved(redir) && to_resolved(redir);
- if (0 && verbose_redir)
+ if (0 && VG_(clo_trace_redir))
VG_(printf)(" consider FROM binding %s:%s -> %s:%s in %s(%s)\n",
redir->from_lib, redir->from_sym,
@@ -159,5 +156,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
if (match_lib(redir->from_lib, si)) {
redir->from_addr = VG_(reverse_search_one_symtab)(si, redir->from_sym);
- if (verbose_redir && redir->from_addr != 0)
+ if (VG_(clo_trace_redir) && redir->from_addr != 0)
VG_(printf)(" bind FROM: %p = %s:%s\n",
redir->from_addr,redir->from_lib, redir->from_sym );
@@ -171,5 +168,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
if (match_lib(redir->to_lib, si)) {
redir->to_addr = VG_(reverse_search_one_symtab)(si, redir->to_sym);
- if (verbose_redir && redir->to_addr != 0)
+ if (VG_(clo_trace_redir) && redir->to_addr != 0)
VG_(printf)(" bind TO: %p = %s:%s\n",
redir->to_addr,redir->to_lib, redir->to_sym );
@@ -180,5 +177,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
resolved = from_resolved(redir) && to_resolved(redir);
- if (0 && verbose_redir)
+ if (0 && VG_(clo_trace_redir))
VG_(printf)("resolve_redir: %s:%s from=%p %s:%s to=%p\n",
redir->from_lib, redir->from_sym, redir->from_addr,
@@ -188,5 +185,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
switch(redir->type) {
case R_REDIRECT:
- if (VG_(clo_verbosity) > 2 || verbose_redir) {
+ if (VG_(clo_trace_redir)) {
VG_(message)(Vg_DebugMsg, " redir resolved (%s:%s=%p -> ",
redir->from_lib, redir->from_sym, redir->from_addr);
@@ -213,5 +210,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
translations causes complete unchaining.
*/
- if (VG_(clo_verbosity) > 2) {
+ if (VG_(clo_verbosity) > 2 && VG_(clo_trace_redir)) {
VG_(message)(Vg_UserMsg,
"Discarding translation due to redirect of already called function" );
@@ -230,5 +227,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
else {
/* XXX leak redir */
- if (verbose_redir)
+ if (VG_(clo_trace_redir))
VG_(message)(Vg_DebugMsg, " redir %s:%s:%p->%s:%s:%p duplicated\n",
redir->from_lib, redir->from_sym, redir->from_addr,
@@ -239,6 +236,6 @@ Bool VG_(resolve_redir)(CodeRedirect *re
case R_WRAPPER:
- if (VG_(clo_verbosity) > 2 || verbose_redir) {
- VG_(message)(Vg_DebugMsg, " redir resolved (%s:%s=%p -> wrapper)",
+ if (VG_(clo_trace_redir)) {
+ VG_(message)(Vg_DebugMsg, " wrapper resolved (%s:%s=%p -> wrapper)",
redir->from_lib, redir->from_sym, redir->from_addr);
}
@@ -266,5 +263,5 @@ void VG_(resolve_seg_redirs)(SegInfo *si
CodeRedirect *redir, *next;
- if (verbose_redir)
+ if (VG_(clo_trace_redir))
VG_(printf)("Considering redirs to/from %s(soname=%s)\n",
si->filename, si->soname);
@@ -299,5 +296,5 @@ static void add_redirect_sym(const Char
redir->to_addr = 0;
- if (VG_(clo_verbosity) >= 2)
+ if (VG_(clo_verbosity) >= 2 && VG_(clo_trace_redir))
VG_(message)(Vg_UserMsg,
"REDIRECT %s(%s) to %s(%s)",
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 01:01:05
|
CVS commit by fitzhardinge:
This subtly changes error output. Previously it printed a thread id for
each message unless it was thread 1. This is a bit misleading, because
its easy to misunderstand that a message with no tid is for thread 1.
This change makes it print the thread id whenever it starts talking
about a new thread. So if there are multiple messages pertaining to
a thread, the first is prefixed with the thread id, and the rest are
printed afterwards; if another thread emits a message, it is prefixed
with its thread id. The "last thread id printed" is initialized to 1,
so the output for single-threaded programs is unchanged.
M +4 -1 vg_errcontext.c 1.69
--- valgrind/coregrind/vg_errcontext.c #1.68:1.69
@@ -52,4 +52,5 @@ static UInt n_errs_suppressed = 0;
static Supp* is_suppressible_error ( Error* err );
+static ThreadId last_tid_printed = 1;
/*------------------------------------------------------------*/
@@ -221,6 +222,8 @@ static void pp_Error ( Error* err, Bool
if (printCount)
VG_(message)(Vg_UserMsg, "Observed %d times:", err->count );
- if (err->tid > 1)
+ if (err->tid > 0 && err->tid != last_tid_printed) {
VG_(message)(Vg_UserMsg, "Thread %d:", err->tid );
+ last_tid_printed = err->tid;
+ }
switch (err->ekind) {
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 05:06:10
|
CVS commit by fitzhardinge:
Thread modelling update. This version fixes up a couple of problems
with error reporting, turns off all the debug output by default, initial
stab at condition variables, but not enough to work, fails gracefully if
function wrapping hasn't managed to capture all the necessary functions.
M +9 -19 core.h 1.95
M +3 -1 vg_intercept.c 1.32
M +2 -0 vg_main.c 1.258
M +148 -39 vg_pthreadmodel.c 1.3
M +11 -4 vg_redir.c 1.8
M +32 -10 vg_symtab2.c 1.105
M +207 -59 vg_threadmodel.c 1.4
--- valgrind/coregrind/core.h #1.94:1.95
@@ -549,21 +549,4 @@ struct vg_mallocfunc_info {
extern Bool VG_(sk_malloc_called_by_scheduler);
-/* ---------------------------------------------------------------------
- Exports of vg_threadmodel.c
- ------------------------------------------------------------------ */
-
-extern void VG_(tm_threadcreate)(ThreadId creator, ThreadId tid, Bool detached);
-extern void VG_(tm_threadexit) (ThreadId tid);
-extern void VG_(tm_threadjoin) (ThreadId joiner, ThreadId joinee);
-extern void VG_(tm_switchto) (ThreadId tid);
-
-extern void VG_(tm_mutex_init) (ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_destroy)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_trylock)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_giveup) (ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_acquire)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_unlock) (ThreadId tid, Addr mutexp);
-
-
/* ---------------------------------------------------------------------
@@ -1108,7 +1091,8 @@ extern Bool VG_(is_wrapper_return)(Addr
/* Primary interface for adding wrappers for client-side functions. */
-extern void VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+extern CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
const FuncWrapper *wrapper);
+extern Bool VG_(is_resolved)(const CodeRedirect *redir);
/* ---------------------------------------------------------------------
@@ -1879,4 +1863,10 @@ extern void VG_(tm_error_print) (Error *
extern void VG_(tm_init) ();
+extern void VG_(tm_cond_init) (ThreadId tid, Addr condp);
+extern void VG_(tm_cond_destroy) (ThreadId tid, Addr condp);
+extern void VG_(tm_cond_wait) (ThreadId tid, Addr condp, Addr mutexp);
+extern void VG_(tm_cond_wakeup) (ThreadId tid, Addr condp, Addr mutexp);
+extern void VG_(tm_cond_signal) (ThreadId tid, Addr condp);
+
/* ----- pthreads ----- */
extern void VG_(pthread_init) ();
--- valgrind/coregrind/vg_threadmodel.c #1.3:1.4
@@ -67,4 +67,7 @@ struct mutex;
struct condvar;
+static const Bool debug_thread = False;
+static const Bool debug_mutex = False;
+
/* --------------------------------------------------
Thread lifetime
@@ -194,5 +197,6 @@ static void thread_setstate(struct threa
th->state = state;
-
+ if (debug_thread)
+ VG_(printf)("setting thread(%d) -> %s\n", th->tid, pp_threadstate(th));
thread_validate(th);
}
@@ -272,5 +276,5 @@ static void thread_report(ThreadId tid,
errdata.action = action;
- VG_(maybe_record_error)(tid, ThreadErr, 0, errstr, &errdata);
+ VG_(maybe_record_error)(VG_(get_running_tid)(), ThreadErr, 0, errstr, &errdata);
}
@@ -281,11 +285,13 @@ static void pp_thread_error(Error *err)
Char *errstr = VG_(get_error_string)(err);
- VG_(message)(Vg_UserMsg, "Found %s thread in state %s while %s\n",
+ VG_(message)(Vg_UserMsg, "Found %s thread in state %s while %s",
errstr, pp_threadstate(th), errdata->action);
VG_(pp_ExeContext)(VG_(get_error_where)(err));
- VG_(message)(Vg_UserMsg, "Thread was %s",
- th->state == TS_Dead ? "destroyed" : "created");
+ if (th) {
+ VG_(message)(Vg_UserMsg, " Thread %d was %s",
+ th->tid, th->state == TS_Dead ? "destroyed" : "created");
VG_(pp_ExeContext)(th->ec_created);
+ }
}
@@ -295,4 +301,5 @@ void VG_(tm_thread_create)(ThreadId crea
struct thread *th = thread_get(tid);
+ if (debug_thread)
VG_(printf)("thread %d creates %d %s\n", creator, tid, detached ? "detached" : "");
if (th != NULL) {
@@ -424,5 +431,5 @@ void VG_(tm_thread_join)(ThreadId joiner
/* now the joiner... */
if (joiner == NULL)
- thread_report(joinerid, THE_NotExist, "joining as joiner");
+ thread_report(joineeid, THE_NotExist, "joining as joiner");
else {
switch(joiner->state) {
@@ -433,5 +440,5 @@ void VG_(tm_thread_join)(ThreadId joiner
case TS_Zombie: /* back from the dead */
case TS_Dead:
- thread_report(joinerid, THE_NotAlive, "joining as joiner");
+ thread_report(joineeid, THE_NotAlive, "joining as joiner");
break;
@@ -439,11 +446,11 @@ void VG_(tm_thread_join)(ThreadId joiner
case TS_CVBlocked:
case TS_JoinBlocked:
- thread_report(joinerid, THE_Blocked, "joining as joiner");
+ thread_report(joineeid, THE_Blocked, "joining as joiner");
break;
}
if (joinee->detached)
- thread_report(joinerid, THE_Detached, "joining as joiner");
-
+ thread_report(joineeid, THE_Detached, "joining as joiner");
+ else {
/* block if the joinee hasn't exited yet */
if (joinee) {
@@ -453,5 +460,5 @@ void VG_(tm_thread_join)(ThreadId joiner
default:
- if (joinee->detached || joinee->state == TS_Zombie)
+ if (joinee->state == TS_Zombie)
do_thread_dead(joinee);
else
@@ -460,4 +467,5 @@ void VG_(tm_thread_join)(ThreadId joiner
}
}
+ }
}
@@ -570,5 +578,5 @@ struct mutex_error_data
};
-static struct mutex *mx_get(Addr mutexp);
+static struct mutex *mutex_get(Addr mutexp);
static const Char *pp_mutexstate(const struct mutex *mx)
@@ -615,4 +623,5 @@ static void mutex_setstate(ThreadId tid,
mx->state = st;
+ if (debug_mutex)
VG_(printf)("setting mutex(%p) -> %s\n", mx->mutex, pp_mutexstate(mx));
}
@@ -621,5 +630,5 @@ static void mutex_report(ThreadId tid, A
{
Char *errstr="?";
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
struct mutex_error_data errdata;
@@ -647,5 +656,5 @@ static void pp_mutex_error(Error *err)
Char *errstr = VG_(get_error_string)(err);
- VG_(message)(Vg_UserMsg, "Found %s mutex %p while %s\n",
+ VG_(message)(Vg_UserMsg, "Found %s mutex %p while %s",
errstr, mx ? mx->mutex : 0, errdata->action);
VG_(pp_ExeContext)(VG_(get_error_where)(err));
@@ -656,18 +665,18 @@ static void pp_mutex_error(Error *err)
break;
case MX_Locked:
- VG_(message)(Vg_UserMsg, "Mutex was locked by thread %d", mx->owner);
+ VG_(message)(Vg_UserMsg, " Mutex was locked by thread %d", mx->owner);
VG_(pp_ExeContext)(mx->ec_locked);
break;
case MX_Unlocking:
- VG_(message)(Vg_UserMsg, "Mutex being unlocked");
+ VG_(message)(Vg_UserMsg, " Mutex being unlocked");
VG_(pp_ExeContext)(mx->ec_locked);
break;
case MX_Free:
- VG_(message)(Vg_UserMsg, "Mutex was unlocked");
+ VG_(message)(Vg_UserMsg, " Mutex was unlocked");
VG_(pp_ExeContext)(mx->ec_locked);
break;
}
- VG_(message)(Vg_UserMsg, "Mutex was %s",
+ VG_(message)(Vg_UserMsg, " Mutex was %s",
mx->state == MX_Dead ? "destroyed" : "created");
VG_(pp_ExeContext)(mx->ec_create);
@@ -676,5 +685,5 @@ static void pp_mutex_error(Error *err)
static SkipList sk_mutex = SKIPLIST_INIT(struct mutex, mutex, VG_(cmp_Addr), NULL, VG_AR_CORE);
-static struct mutex *mx_get(Addr mutexp)
+static struct mutex *mutex_get(Addr mutexp)
{
return VG_(SkipList_Find_Exact)(&sk_mutex, &mutexp);
@@ -683,11 +692,13 @@ static struct mutex *mx_get(Addr mutexp)
static Bool mx_is_initialized(Addr mutexp)
{
- const struct mutex *mx = mx_get(mutexp);
+ const struct mutex *mx = mutex_get(mutexp);
return mx && mx->state != MX_Dead;
}
-static void mx_check_initialized(ThreadId tid, Addr mutexp, const Char *action)
+static struct mutex *mutex_check_initialized(ThreadId tid, Addr mutexp, const Char *action)
{
+ struct mutex *mx;
+
vg_assert(tid != VG_INVALID_THREADID);
@@ -696,9 +707,14 @@ static void mx_check_initialized(ThreadI
VG_(tm_mutex_init)(tid, mutexp);
}
+
+ mx = mutex_get(mutexp);
+ vg_assert(mx != NULL);
+
+ return mx;
}
static Bool mx_is_locked(Addr mutexp)
{
- const struct mutex *mx = mx_get(mutexp);
+ const struct mutex *mx = mutex_get(mutexp);
return mx && (mx->state == MX_Locked);
@@ -711,5 +727,5 @@ static Bool mx_is_locked(Addr mutexp)
void VG_(tm_mutex_init)(ThreadId tid, Addr mutexp)
{
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
if (mx == NULL) {
@@ -737,5 +753,5 @@ Bool VG_(tm_mutex_exists)(Addr mutexp)
void VG_(tm_mutex_destroy)(ThreadId tid, Addr mutexp)
{
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
if (mx == NULL)
@@ -774,7 +790,5 @@ void VG_(tm_mutex_trylock)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "trylocking");
-
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "trylocking");
thread_block_mutex(tid, mx);
@@ -793,6 +807,5 @@ void VG_(tm_mutex_giveup)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "giving up");
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "giving up");
thread_unblock_mutex(tid, mx, "giving up on mutex");
@@ -807,6 +820,5 @@ void VG_(tm_mutex_acquire)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "acquiring");
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "acquiring");
switch(mx->state) {
@@ -822,4 +834,5 @@ void VG_(tm_mutex_acquire)(ThreadId tid,
case MX_Locked:
+ if (debug_mutex)
VG_(printf)("mutex=%p mx->state=%s\n", mutexp, pp_mutexstate(mx));
VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
@@ -847,7 +860,5 @@ void VG_(tm_mutex_tryunlock)(ThreadId ti
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "try-unlocking");
- mx = mx_get(mutexp);
- vg_assert(mx != NULL);
+ mx = mutex_check_initialized(tid, mutexp, "try-unlocking");
th = thread_get(tid);
@@ -895,5 +906,4 @@ void VG_(tm_mutex_tryunlock)(ThreadId ti
mutex_setstate(tid, mx, MX_Unlocking);
- VG_TRACK( post_mutex_unlock, tid, (void *)mutexp );
}
@@ -909,5 +919,5 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
struct thread *th;
- mx_check_initialized(tid, mutexp, "unlocking mutex");
+ mx = mutex_check_initialized(tid, mutexp, "unlocking mutex");
th = thread_get(tid);
@@ -935,7 +945,4 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
}
- mx = mx_get(mutexp);
- vg_assert(mx != NULL);
-
switch(mx->state) {
case MX_Locked:
@@ -951,6 +958,6 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
case MX_Unlocking:
/* OK - we need to complete the unlock */
- mutex_setstate(tid, mx, MX_Free);
VG_TRACK( post_mutex_unlock, tid, (void *)mutexp );
+ mutex_setstate(tid, mx, MX_Free);
break;
@@ -965,9 +972,90 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
-------------------------------------------------- */
+struct condvar_waiter
+{
+ ThreadId waiter;
+
+ struct condvar *condvar;
+ struct mutex *mutex;
+
+ struct condvar_waiter *next;
+};
+
+struct condvar
+{
+ Addr condvar;
+
+ enum condvar_state {
+ CV_Dead,
+ CV_Alive,
+ } state;
+
+ struct condvar_waiter *waiters; // XXX skiplist?
+
+ ExeContext *ec_created; // where created
+ ExeContext *ec_signalled; // where last signalled
+};
+
+enum condvar_err {
+ CVE_NotExist,
+ CVE_NotInit,
+ CVE_ReInit,
+ CVE_Busy,
+ CVE_Blocked,
+};
+
+static SkipList sk_condvar = SKIPLIST_INIT(struct condvar, condvar, VG_(cmp_Addr),
+ NULL, VG_AR_CORE);
+
+static struct condvar *condvar_get(Addr condp)
+{
+ return VG_(SkipList_Find_Exact)(&sk_condvar, &condp);
+}
+
+static Bool condvar_is_initialized(Addr condp)
+{
+ const struct condvar *cv = condvar_get(condp);
+
+ return cv && cv->state != CV_Dead;
+}
+
+static void condvar_report(ThreadId tid, Addr condp, enum condvar_err err, const Char *action)
+{
+}
+
+static struct condvar *condvar_check_initialized(ThreadId tid, Addr condp, const Char *action)
+{
+ struct condvar *cv;
+ vg_assert(tid != VG_INVALID_THREADID);
+
+ if (!condvar_is_initialized(condp)) {
+ condvar_report(tid, condp, CVE_NotInit, action);
+ VG_(tm_cond_init)(tid, condp);
+ }
+
+ cv = condvar_get(condp);
+ vg_assert(cv != NULL);
+
+ return cv;
+}
+
/* Initialize a condition variable. Fails if:
- condp has already been initialized
*/
-void VG_(tm_cond_init)(void *condp)
+void VG_(tm_cond_init)(ThreadId tid, Addr condp)
{
+ struct condvar *cv = condvar_get(condp);
+
+ if (cv == NULL) {
+ cv = VG_(SkipNode_Alloc)(&sk_condvar);
+ cv->condvar = condp;
+ cv->waiters = NULL;
+ VG_(SkipList_Insert)(&sk_condvar, cv);
+ } else if (cv->state != CV_Dead) {
+ condvar_report(tid, condp, CVE_ReInit, "initializing");
+ /* ? what about existing waiters? */
+ }
+
+ cv->state = CV_Alive;
}
@@ -976,6 +1064,27 @@ void VG_(tm_cond_init)(void *condp)
- condp is currently being waited on
*/
-void VG_(tm_cond_destroy)(void *condp)
+void VG_(tm_cond_destroy)(ThreadId tid, Addr condp)
{
+ struct condvar *cv = condvar_get(condp);
+
+ if (cv == NULL)
+ condvar_report(tid, condp, CVE_NotExist, "destroying");
+ else {
+ if (cv->state != CV_Alive)
+ condvar_report(tid, condp, CVE_NotInit, "destroying");
+ if (cv->waiters != NULL)
+ condvar_report(tid, condp, CVE_Busy, "destroying");
+ cv->state = CV_Dead;
+ }
+}
+
+static struct condvar_waiter *get_waiter(const struct condvar *cv, ThreadId tid)
+{
+ struct condvar_waiter *w;
+
+ for(w = cv->waiters; w; w = w->next)
+ if (w->waiter == tid)
+ return w;
+ return NULL;
}
@@ -984,7 +1093,46 @@ void VG_(tm_cond_destroy)(void *condp)
- thread doesn't hold mutexp
- thread is blocked on some other object
+ - thread is already blocked on mutex
*/
-void VG_(tm_cond_wait)(ThreadId tid, void *condp, void *mutexp)
+void VG_(tm_cond_wait)(ThreadId tid, Addr condp, Addr mutexp)
{
+ struct thread *th = thread_get(tid);
+ struct mutex *mx;
+ struct condvar *cv;
+ struct condvar_waiter *waiter;
+
+ /* Condvar must exist */
+ cv = condvar_check_initialized(tid, condp, "waiting");
+
+ /* Mutex must exist */
+ mx = mutex_check_initialized(tid, mutexp, "waiting on condvar");
+
+ /* Thread must own mutex */
+ if (mx->state != MX_Locked) {
+ mutex_report(tid, mutexp, MXE_NotLocked, "waiting on condvar");
+ VG_(tm_mutex_trylock)(tid, mutexp);
+ VG_(tm_mutex_acquire)(tid, mutexp);
+ } else if (mx->owner != tid) {
+ mutex_report(tid, mutexp, MXE_NotOwner, "waiting on condvar");
+ mx->owner = tid;
+ }
+
+ /* Thread must not be already waiting for condvar */
+ waiter = get_waiter(cv, tid);
+ if (waiter != NULL)
+ condvar_report(tid, condp, CVE_Blocked, "waiting");
+ else {
+ waiter = VG_(arena_malloc)(VG_AR_CORE, sizeof(*waiter));
+ waiter->condvar = cv;
+ waiter->mutex = mx;
+ waiter->next = cv->waiters;
+ cv->waiters = waiter;
+ }
+
+ /* Thread is now blocking on condvar */
+ do_thread_block_condvar(th, cv);
+
+ /* (half) release mutex */
+ VG_(tm_mutex_tryunlock)(tid, mutexp);
}
@@ -993,5 +1141,5 @@ void VG_(tm_cond_wait)(ThreadId tid, voi
- thread is not waiting on condp
*/
-void VG_(tm_cond_wakeup)(ThreadId tid, void *condp)
+void VG_(tm_cond_wakeup)(ThreadId tid, Addr condp, Addr mutexp)
{
}
@@ -1000,5 +1148,5 @@ void VG_(tm_cond_wakeup)(ThreadId tid, v
- condp has not been initialized
*/
-void VG_(tm_cond_signal)(ThreadId tid, void *condp)
+void VG_(tm_cond_signal)(ThreadId tid, Addr condp)
{
}
--- valgrind/coregrind/vg_pthreadmodel.c #1.2:1.3
@@ -8,6 +8,6 @@
emulator for monitoring program execution on x86-Unixes.
- Copyright (C) 2000-2004 Julian Seward
- js...@ac...
+ Copyright (C) 2005 Jeremy Fitzhardinge
+ je...@go...
This program is free software; you can redistribute it and/or
@@ -57,6 +57,11 @@
#define __USE_GNU
+#define __USE_UNIX98
#include <pthread.h>
+static const Bool debug = False;
+
+static Bool check_wrappings(void);
+
#define ENTER(x) \
do { \
@@ -68,5 +73,5 @@
static const Char *pp_retval(enum return_type rt, Word retval)
{
- static Char buf[20];
+ static Char buf[50];
switch(rt) {
@@ -127,9 +132,10 @@ static ThreadId get_pthread_mapping(pthr
/* Create a mapping between a ThreadId and a pthread_t */
-void pthread_id_mapping(ThreadId tid, Addr idp, UInt idsz)
+static void pthread_id_mapping(ThreadId tid, Addr idp, UInt idsz)
{
pthread_t id = *(pthread_t *)idp;
struct pthread_map *m = VG_(SkipList_Find_Exact)(&sk_pthread_map, &id);
+ if (debug)
VG_(printf)("Thread %d maps to %p\n", tid, id);
@@ -150,4 +156,5 @@ static void check_thread_exists(ThreadId
{
if (!VG_(tm_thread_exists)(tid)) {
+ if (debug)
VG_(printf)("creating thread %d\n", tid);
VG_(tm_thread_create)(VG_INVALID_THREADID, tid, False);
@@ -174,4 +181,9 @@ static void *before_pthread_create(va_li
void *arg = va_arg(va, void *);
struct pthread_create_nonce *n;
+ struct vg_pthread_newthread_data *data;
+ ThreadState *tst;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_create);
@@ -179,20 +191,25 @@ static void *before_pthread_create(va_li
/* Data is in the client heap and is freed by the client in the
startfunc_wrapper. */
- if (startfunc_wrapper != 0) {
- struct vg_pthread_newthread_data *data;
- ThreadState *tst = VG_(get_ThreadState)(VG_(get_running_tid)());
+ vg_assert(startfunc_wrapper != 0);
+
+ tst = VG_(get_ThreadState)(VG_(get_running_tid)());
VG_(sk_malloc_called_by_scheduler) = True;
data = SK_(malloc)(sizeof(*data));
VG_(sk_malloc_called_by_scheduler) = False;
+
+ VG_TRACK(pre_mem_write, Vg_CorePThread, tst->tid, "new thread data",
+ (Addr)data, sizeof(*data));
data->startfunc = start;
data->arg = arg;
+ VG_TRACK(post_mem_write, (Addr)data, sizeof(*data));
/* Substitute arguments
- XXX hack: need an API to do this.
- */
+ XXX hack: need an API to do this. */
((Word *)tst->arch.m_esp)[3] = startfunc_wrapper;
((Word *)tst->arch.m_esp)[4] = (Word)data;
- }
+
+ if (debug)
+ VG_(printf)("starting thread at wrapper %p\n", startfunc_wrapper);
n = VG_(arena_malloc)(VG_AR_CORE, sizeof(*n));
@@ -208,4 +225,7 @@ static void after_pthread_create(void *n
ThreadId tid = VG_(get_running_tid)();
+ if (n == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0) {
if (!VG_(tm_thread_exists)(tid))
@@ -227,8 +247,13 @@ static void *before_pthread_join(va_list
{
pthread_t pt_joinee = va_arg(va, pthread_t);
- ThreadId joinee = get_pthread_mapping(pt_joinee);
+ ThreadId joinee;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_join);
+ joinee = get_pthread_mapping(pt_joinee);
+
VG_(tm_thread_join)(VG_(get_running_tid)(), joinee);
@@ -239,4 +264,7 @@ static void after_pthread_join(void *v,
{
/* nothing to be done? */
+ if (!check_wrappings())
+ return;
+
LEAVE(pthread_join, rt, retval);
}
@@ -249,9 +277,14 @@ static void *before_pthread_detach(va_li
{
pthread_t id = va_arg(va, pthread_t);
- struct pthread_detach_data *data = VG_(arena_malloc)(VG_AR_CORE, sizeof(*data));
- data->id = id;
+ struct pthread_detach_data *data;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_detach);
+ data = VG_(arena_malloc)(VG_AR_CORE, sizeof(*data));
+ data->id = id;
+
return data;
}
@@ -260,5 +293,10 @@ static void after_pthread_detach(void *n
{
struct pthread_detach_data *data = (struct pthread_detach_data *)nonce;
- ThreadId tid = get_pthread_mapping(data->id);
+ ThreadId tid;
+
+ if (data == NULL)
+ return;
+
+ tid = get_pthread_mapping(data->id);
VG_(arena_free)(VG_AR_CORE, data);
@@ -277,4 +315,7 @@ static void *before_pthread_self(va_list
to the return value. On Linux/glibc, it's a simple scalar, so it is
returned normally. */
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_self);
@@ -287,4 +328,7 @@ static void after_pthread_self(void *non
pthread_t ret = (pthread_t)retval;
+ if (!check_wrappings())
+ return;
+
pthread_id_mapping(VG_(get_running_tid)(), (Addr)&ret, sizeof(ret));
@@ -323,4 +367,7 @@ static void *before_pthread_mutex_init(v
const pthread_mutexattr_t *attr = va_arg(va, const pthread_mutexattr_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_init);
@@ -334,4 +381,7 @@ static void *before_pthread_mutex_init(v
static void after_pthread_mutex_init(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_init)(VG_(get_running_tid)(), (Addr)nonce);
@@ -344,4 +394,7 @@ static void *before_pthread_mutex_destro
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_destroy);
@@ -353,4 +406,7 @@ static void *before_pthread_mutex_destro
static void after_pthread_mutex_destroy(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
LEAVE(pthread_mutex_destroy, rt, retval);
}
@@ -360,6 +416,10 @@ static void *before_pthread_mutex_lock(v
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_lock);
+ if (debug)
VG_(printf)("%d locking %p\n", VG_(get_running_tid)(), mx);
check_thread_exists(VG_(get_running_tid)());
@@ -372,7 +432,11 @@ static void *before_pthread_mutex_lock(v
static void after_pthread_mutex_lock(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_acquire)(VG_(get_running_tid)(), (Addr)nonce);
else {
+ if (debug)
VG_(printf)("after mutex_lock failed: rt=%d ret=%d\n", rt, retval);
VG_(tm_mutex_giveup)(VG_(get_running_tid)(), (Addr)nonce);
@@ -386,6 +450,10 @@ static void *before_pthread_mutex_tryloc
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_trylock);
+ if (debug)
VG_(printf)("%d trylocking %p\n", VG_(get_running_tid)(), mx);
check_thread_exists(VG_(get_running_tid)());
@@ -398,7 +466,11 @@ static void *before_pthread_mutex_tryloc
static void after_pthread_mutex_trylock(void *nonce, enum return_type rt, Word retval)
{
+ if (nonce == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_acquire)(VG_(get_running_tid)(), (Addr)nonce);
else {
+ if (debug)
VG_(printf)("after mutex_trylock failed: rt=%d ret=%d\n", rt, retval);
VG_(tm_mutex_giveup)(VG_(get_running_tid)(), (Addr)nonce);
@@ -412,5 +484,9 @@ static void *before_pthread_mutex_unlock
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_unlock);
+
VG_(tm_mutex_tryunlock)(VG_(get_running_tid)(), (Addr)mx);
@@ -420,4 +496,7 @@ static void *before_pthread_mutex_unlock
static void after_pthread_mutex_unlock(void *nonce, enum return_type rt, Word retval)
{
+ if (nonce == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_unlock)(VG_(get_running_tid)(), (Addr)nonce); /* complete unlock */
@@ -432,4 +511,5 @@ static struct pt_wraps {
const Char *name;
FuncWrapper wrapper;
+ const CodeRedirect *redir;
} wraps[] = {
#define WRAP(func, extra) { #func extra, { before_##func, after_##func } }
@@ -448,4 +528,33 @@ static struct pt_wraps {
};
+/* Check to see if all the wrappers are resolved */
+static Bool check_wrappings()
+{
+ Int i;
+ static Bool ok = True;
+ static Bool checked = False;
+
+ if (checked)
+ return ok;
+
+ for(i = 0; i < sizeof(wraps)/sizeof(*wraps); i++) {
+ if (!VG_(is_resolved)(wraps[i].redir)) {
+ VG_(message)(Vg_DebugMsg, "Pthread wrapper for \"%s\" is not resolved",
+ wraps[i].name);
+ ok = False;
+ }
+ }
+
+ if (startfunc_wrapper == 0) {
+ VG_(message)(Vg_DebugMsg, "Pthread wrapper for thread start function is not resolved");
+ ok = False;
+ }
+
+ if (!ok)
+ VG_(message)(Vg_DebugMsg, "Missing intercepts; model disabled");
+
+ checked = True;
+ return ok;
+}
/*
@@ -457,7 +566,7 @@ void VG_(pthread_init)()
for(i = 0; i < sizeof(wraps)/sizeof(*wraps); i++) {
- VG_(printf)("adding pthread wrapper for %s\n", wraps[i].name);
- //VG_(add_wrapper)("soname:libpthread.so.0", wraps[i].name, &wraps[i].wrapper);
- VG_(add_wrapper)("soname:libpthread.so.0", wraps[i].name, &wraps[i].wrapper);
+ //VG_(printf)("adding pthread wrapper for %s\n", wraps[i].name);
+ wraps[i].redir = VG_(add_wrapper)("soname:libpthread.so.0",
+ wraps[i].name, &wraps[i].wrapper);
}
VG_(tm_init)();
--- valgrind/coregrind/vg_intercept.c #1.31:1.32
@@ -75,5 +75,5 @@ void *VG_WRAPPER(pthread_startfunc_wrapp
static pthread_t (*pthread_selfp)(void);
- //VALGRIND_PRINTF("intercepted thread start: real start is %p(%p)\n", func, arg);
+ //VALGRIND_PRINTF("intercepted thread start: real start is %p(%p)", func, arg);
/* Do this rather than a direct call so we don't make an explicit
@@ -85,4 +85,6 @@ void *VG_WRAPPER(pthread_startfunc_wrapp
if (pthread_selfp != NULL)
(*pthread_selfp)(); /* just calling this is enough */
+ else
+ VALGRIND_PRINTF("pthread_self pointer is NULL!");
/* Free the data the before_pthread_create wrapper left for us. */
--- valgrind/coregrind/vg_redir.c #1.7:1.8
@@ -128,4 +128,9 @@ static inline Bool to_resolved(const Cod
}
+Bool VG_(is_resolved)(const CodeRedirect *redir)
+{
+ return from_resolved(redir) && to_resolved(redir);
+}
+
/* Resolve a redir using si if possible, and add it to the resolved
list */
@@ -141,5 +146,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
return False;
- resolved = from_resolved(redir) && to_resolved(redir);
+ resolved = VG_(is_resolved)(redir);
if (0 && VG_(clo_trace_redir))
@@ -335,5 +340,5 @@ void VG_(add_redirect_addr)(const Char *
}
-void VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
const FuncWrapper *wrapper)
{
@@ -363,4 +368,6 @@ void VG_(add_wrapper)(const Char *from_l
unresolved_redir = redir;
}
+
+ return redir;
}
--- valgrind/coregrind/vg_symtab2.c #1.104:1.105
@@ -409,20 +409,42 @@ static Int compare_RiSym(void *va, void
/* Two symbols have the same address. Which name do we prefer?
- The shortest. Always. Hm, well, prefer the ones with '@' symbol versioning in them.
- If they're the same length, then alphabetical.
+ The general rule is to prefer the shorter symbol name. If the
+ symbol contains a '@', which means its versioned, then the length
+ up to the '@' is used for length comparison purposes (so
+ "foo@GLIBC_2.4.2" is considered shorter than "foobar"), but if two
+ symbols have the same length, the one with the version string is
+ preferred. If all else fails, use alphabetical ordering.
*/
static RiSym *prefersym(RiSym *a, RiSym *b)
{
- Int lena, lenb;
- Bool va = VG_(strchr)(a->name, '@') != NULL;
- Bool vb = VG_(strchr)(b->name, '@') != NULL;
+ Int lena, lenb; /* full length */
+ Int vlena, vlenb; /* length without version */
+ const Char *vpa, *vpb;
- lena = VG_(strlen)(a->name);
- lenb = VG_(strlen)(b->name);
- if (va || lena < lenb)
+ vlena = lena = VG_(strlen)(a->name);
+ vlenb = lenb = VG_(strlen)(b->name);
+
+ vpa = VG_(strchr)(a->name, '@');
+ vpb = VG_(strchr)(b->name, '@');
+
+ if (vpa)
+ vlena = vpa - a->name;
+ if (vpb)
+ vlenb = vpb - b->name;
+
+ /* Select the shortest unversioned name */
+ if (vlena < vlenb)
return a;
- else if (vb || lenb < lena)
+ else if (vlenb < vlena)
+ return b;
+
+ /* Equal lengths; select the versioned name */
+ if (vpa && !vpb)
+ return a;
+ if (vpb && !vpa)
return b;
+ /* Either both versioned or neither is versioned; select them
+ alphabetically */
if (VG_(strcmp)(a->name, b->name) < 0)
return a;
--- valgrind/coregrind/vg_main.c #1.257:1.258
@@ -2669,4 +2669,6 @@ void VG_(shutdown_actions)(ThreadId tid)
VGA_(reap_threads)(tid);
+ VG_(clo_model_pthreads) = False;
+
// Clean the client up before the final report
VGA_(final_tidyup)(tid);
|
|
From: Tom H. <th...@cy...> - 2005-03-03 19:09:52
|
CVS commit by thughes:
Store thread error details in the core pool instead of the tool pool.
M +2 -2 vg_threadmodel.c 1.5
--- valgrind/coregrind/vg_threadmodel.c #1.4:1.5
@@ -1161,5 +1161,5 @@ UInt VG_(tm_error_update_extra)(Error *e
case ThreadErr: {
struct thread_error_data *errdata = VG_(get_error_extra)(err);
- struct thread *new_th = VG_(malloc)(sizeof(struct thread));
+ struct thread *new_th = VG_(arena_malloc)(VG_AR_CORE, sizeof(struct thread));
VG_(memcpy)(new_th, errdata->th, sizeof(struct thread));
@@ -1172,5 +1172,5 @@ UInt VG_(tm_error_update_extra)(Error *e
case MutexErr: {
struct mutex_error_data *errdata = VG_(get_error_extra)(err);
- struct mutex *new_mx = VG_(malloc)(sizeof(struct mutex));
+ struct mutex *new_mx = VG_(arena_malloc)(VG_AR_CORE, sizeof(struct mutex));
VG_(memcpy)(new_mx, errdata->mx, sizeof(struct mutex));
|
|
From: Tom H. <th...@cy...> - 2005-03-03 19:09:04
|
CVS commit by thughes:
Actually make the call to VG_(tm_error_update_extra) when we decide to
save a thread error - without that call the error details aren't copied
and may be invalid when we decide to print them.
M +16 -16 vg_errcontext.c 1.70
--- valgrind/coregrind/vg_errcontext.c #1.69:1.70
@@ -517,11 +517,12 @@ void VG_(maybe_record_error) ( ThreadId
/* update `extra' */
- if (VG_(needs).skin_errors) {
switch (ekind) {
case ThreadErr:
case MutexErr:
+ vg_assert(VG_(needs).core_errors);
extra_size = VG_(tm_error_update_extra)(p);
break;
default:
+ vg_assert(VG_(needs).skin_errors);
extra_size = SK_(update_extra)(p);
break;
@@ -534,5 +535,4 @@ void VG_(maybe_record_error) ( ThreadId
p->extra = new_extra;
}
- }
p->next = vg_errors;
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 21:38:28
|
CVS commit by fitzhardinge:
Generate a SIGILL for the complex forms of the ENTER instruction, rather than
getting an assertion failure.
M +4 -0 vg_to_ucode.c 1.157
--- valgrind/coregrind/vg_to_ucode.c #1.156:1.157
@@ -5336,4 +5336,8 @@ static Addr disInstr ( UCodeBlock* cb, A
abyte = getUChar(eip); eip++;
+ if (sz != 4 || abyte != 0) {
+ VG_(message)(Vg_UserMsg, "Can't handle complex forms of ENTER");
+ goto decode_failure;
+ }
vg_assert(sz == 4);
vg_assert(abyte == 0);
|
|
From: Nicholas N. <nj...@cs...> - 2005-03-04 05:37:52
|
CVS commit by nethercote:
Change things back so that suppressions still use mangled names -- maybe
we should change this, but if we do it should be documented, and the old
style should still be allowed to work.
M +2 -2 vg_errcontext.c 1.72
--- valgrind/coregrind/vg_errcontext.c #1.71:1.72
@@ -363,5 +363,5 @@ static void gen_suppression(Error* err)
if (i > 0)
eip -= MIN_INSTR_SIZE; // point to calling line
- if ( VG_(get_fnname) (eip, buf, M_VG_ERRTXT) ) {
+ if ( VG_(get_fnname_nodemangle) (eip, buf, M_VG_ERRTXT) ) {
// Stop after "main"; if main() is recursive, stop after last main().
@@ -985,5 +985,5 @@ Bool supp_matches_callers(Error* err, Su
case FunName:
// Nb: mangled names used in suppressions
- (void)VG_(get_fnname)(a, caller_name, M_VG_ERRTXT);
+ (void)VG_(get_fnname_nodemangle)(a, caller_name, M_VG_ERRTXT);
break;
default: VG_(skin_panic)("supp_matches_callers");
|
|
From: Robert W. <rj...@du...> - 2005-03-05 19:48:07
|
CVS commit by rjwalsh: --num-callers default is now 12, so say that in the help. M +1 -1 vg_main.c 1.262 --- valgrind/coregrind/vg_main.c #1.261:1.262 @@ -1487,5 +1487,5 @@ void usage ( Bool debug_help ) " --log-socket=ipaddr:port log messages to socket ipaddr:port\n" " --demangle=no|yes automatically demangle C++ names? [yes]\n" -" --num-callers=<number> show <num> callers in stack traces [4]\n" +" --num-callers=<number> show <num> callers in stack traces [12]\n" " --error-limit=no|yes stop showing new errors if too many? [yes]\n" " --show-below-main=no|yes continue stack traces below main() [no]\n" |