|
From: <sv...@va...> - 2006-10-03 19:53:06
|
Author: sewardj
Date: 2006-10-03 20:53:02 +0100 (Tue, 03 Oct 2006)
New Revision: 6148
Log:
Minor adjustments to the scheduler, mostly cosmetic.
- rename VG_(kill_thread) to VG_(get_thread_out_of_syscall), which
is what it actually does.
- Remove 'semaphore' terminology in places and use 'lock' instead.
- Give an extra 'HChar* who' arg to VG_(set_running) and=20
VG_(set_sleeping), which is printed when --trace-sched=3Dyes.
This makes it easier to make sense of lock ownership changes
from the debug output.
- various other improvements to debug printing
- add a kludge to encourage the AIX scheduler to switch threads
more often when more than one is runnable (am not claiming to=20
understand this); otherwise CPU starvation can appear to happen
- more assertions in sema.c (the pipe-based lock); cycle the token
through 'A' to 'Z' to make strace/truss output more understandable;
fix longstanding bug wherein sema_down() tries to read two bytes
even though sema_up only writes one.
Modified:
branches/AIX5/coregrind/m_scheduler/scheduler.c
branches/AIX5/coregrind/m_scheduler/sema.c
branches/AIX5/coregrind/pub_core_scheduler.h
Modified: branches/AIX5/coregrind/m_scheduler/scheduler.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/AIX5/coregrind/m_scheduler/scheduler.c 2006-10-03 19:43:53 U=
TC (rev 6147)
+++ branches/AIX5/coregrind/m_scheduler/scheduler.c 2006-10-03 19:53:02 U=
TC (rev 6148)
@@ -58,6 +58,8 @@
*/
=20
#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h" // __NR_sched_yield
#include "pub_core_threadstate.h"
#include "pub_core_aspacemgr.h"
#include "pub_core_clreq.h" // for VG_USERREQ__*
@@ -72,7 +74,6 @@
#include "pub_core_mallocfree.h"
#include "pub_core_options.h"
#include "pub_core_replacemalloc.h"
-#include "pub_core_scheduler.h"
#include "pub_core_signals.h"
#include "pub_core_stacks.h"
#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
@@ -81,8 +82,8 @@
#include "pub_core_tooliface.h"
#include "pub_core_translate.h" // For VG_(translate)()
#include "pub_core_transtab.h"
-#include "vki_unistd.h"
#include "priv_sema.h"
+#include "pub_core_scheduler.h" // self
=20
/* #include "pub_core_debuginfo.h" */ // DEBUGGING HACK ONLY
=20
@@ -194,15 +195,30 @@
=20
When this returns, we'll actually be running.
*/
-void VG_(set_running)(ThreadId tid)
+void VG_(set_running)(ThreadId tid, HChar* who)
{
- ThreadState *tst =3D VG_(get_ThreadState)(tid);
+ ThreadState *tst;
=20
+#if 0
+ if (VG_(clo_trace_sched)) {
+ HChar buf[100];
+ vg_assert(VG_(strlen)(who) <=3D 100-50);
+ VG_(sprintf)(buf, "waiting for lock (%s)", who);
+ print_sched_event(tid, buf);
+ }
+#endif
+
+ /* First, acquire the lock. We can't do anything else safely prior
+ to this point. Even doing debug printing prior to this point
+ is, technically, wrong. */
+ ML_(sema_down)(&run_sema);
+
+ tst =3D VG_(get_ThreadState)(tid);
+
vg_assert(tst->status !=3D VgTs_Runnable);
=20
tst->status =3D VgTs_Runnable;
- =20
- ML_(sema_down)(&run_sema);
+
if (VG_(running_tid) !=3D VG_INVALID_THREADID)
VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
vg_assert(VG_(running_tid) =3D=3D VG_INVALID_THREADID);
@@ -210,8 +226,12 @@
=20
VG_(unknown_SP_update)(VG_(get_SP(tid)), VG_(get_SP(tid)));
=20
- if (VG_(clo_trace_sched))
- print_sched_event(tid, "now running");
+ if (VG_(clo_trace_sched)) {
+ HChar buf[150];
+ vg_assert(VG_(strlen)(who) <=3D 150-50);
+ VG_(sprintf)(buf, " acquired lock (%s)", who);
+ print_sched_event(tid, buf);
+ }
=20
// While thre modeling is disable, issue thread_run events here
// VG_(tm_thread_switchto)(tid);
@@ -225,7 +245,7 @@
but it may mean that we remain in a Runnable state and we're just
yielding the CPU to another thread).
*/
-void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate)
+void VG_(set_sleeping)(ThreadId tid, ThreadStatus sleepstate, HChar* who=
)
{
ThreadState *tst =3D VG_(get_ThreadState)(tid);
=20
@@ -239,16 +259,17 @@
vg_assert(VG_(running_tid) =3D=3D tid);
VG_(running_tid) =3D VG_INVALID_THREADID;
=20
+ if (VG_(clo_trace_sched)) {
+ Char buf[200];
+ vg_assert(VG_(strlen)(who) <=3D 200-100);
+ VG_(sprintf)(buf, "releasing lock (%s) -> %s",
+ who, VG_(name_of_ThreadStatus)(sleepstate));
+ print_sched_event(tid, buf);
+ }
+
/* Release the run_sema; this will reschedule any runnable
thread. */
ML_(sema_up)(&run_sema);
-
- if (VG_(clo_trace_sched)) {
- Char buf[50];
- VG_(sprintf)(buf, "now sleeping in state %s",=20
- VG_(name_of_ThreadStatus)(sleepstate));
- print_sched_event(tid, buf);
- }
}
=20
/* Clear out the ThreadState and release the semaphore. Leaves the
@@ -266,21 +287,24 @@
/* There should still be a valid exitreason for this thread */
vg_assert(VG_(threads)[tid].exitreason !=3D VgSrc_None);
=20
+ if (VG_(clo_trace_sched))
+ print_sched_event(tid, "release lock in VG_(exit_thread)");
+
ML_(sema_up)(&run_sema);
}
=20
-/* Kill a thread. This interrupts whatever a thread is doing, and
- makes it exit ASAP. This does not set the exitreason or
- exitcode. */
-void VG_(kill_thread)(ThreadId tid)
+/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
+ out of the syscall and onto doing the next thing, whatever that is.
+ If it isn't blocked in a syscall, has no effect on the thread. */
+void VG_(get_thread_out_of_syscall)(ThreadId tid)
{
vg_assert(VG_(is_valid_tid)(tid));
vg_assert(!VG_(is_running_thread)(tid));
- vg_assert(VG_(is_exiting)(tid));
=20
if (VG_(threads)[tid].status =3D=3D VgTs_WaitSys) {
if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg, "kill_thread zaps tid %d lwp %d",
+ VG_(message)(Vg_DebugMsg,=20
+ "get_thread_out_of_syscall zaps tid %d lwp %d",
tid, VG_(threads)[tid].os_state.lwpid);
VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
}
@@ -291,25 +315,19 @@
*/
void VG_(vg_yield)(void)
{
- struct vki_timespec ts =3D { 0, 1 };
ThreadId tid =3D VG_(running_tid);
=20
vg_assert(tid !=3D VG_INVALID_THREADID);
vg_assert(VG_(threads)[tid].os_state.lwpid =3D=3D VG_(gettid)());
=20
- VG_(set_sleeping)(tid, VgTs_Yielding);
+ VG_(set_sleeping)(tid, VgTs_Yielding, "VG_(vg_yield)");
=20
- //VG_(printf)("tid %d yielding EIP=3D%p\n", tid, VG_(threads)[tid].ar=
ch.m_eip);
-
/*=20
Tell the kernel we're yielding.
*/
- if (1)
- VG_(do_syscall0)(__NR_sched_yield);
- else
- VG_(nanosleep)(&ts);
+ VG_(do_syscall0)(__NR_sched_yield);
=20
- VG_(set_running)(tid);
+ VG_(set_running)(tid, "VG_(vg_yield)");
}
=20
=20
@@ -339,6 +357,11 @@
{
tst->os_state.lwpid =3D 0;
tst->os_state.threadgroup =3D 0;
+# if defined(VGO_aix5)
+ tst->os_state.cancel_async =3D False;
+ tst->os_state.cancel_disabled =3D False;
+ tst->os_state.cancel_progress =3D Canc_NoRequest;
+# endif
}
=20
static void os_state_init(ThreadState *tst)
@@ -433,7 +456,6 @@
ML_(sema_init)(&run_sema);
=20
for (i =3D 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
-
/* Paranoia .. completely zero it out. */
VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
=20
@@ -541,19 +563,6 @@
do_pre_run_checks(tst);
/* end Paranoia */
=20
- //if (0) {
- // Char buf[100];
- // Bool ok =3D VG_(get_fnname_if_entry) ( tst->arch.vex.guest_CIA,
- // buf, 100 );
- // if (ok) {
- // Addr r2actual =3D tst->arch.vex.guest_GPR2;
- // Addr r2tocptr =3D VG_(get_tocptr)( tst->arch.vex.guest_CIA );
- // if (1) VG_(printf)("R2 act 0x%016llx toc 0x%016llx %s\n",=20
- // r2actual, r2tocptr, buf);
- // if (r2tocptr !=3D 0) vg_assert(r2actual =3D=3D r2tocptr);
- // }
- //}
-
trc =3D 0;
dispatch_ctr_SAVED =3D VG_(dispatch_ctr);
=20
@@ -573,9 +582,36 @@
VG_(threads)[tid].arch.vex.guest_RESVN =3D 0;
# endif =20
=20
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ /* On AIX, we need to get a plausible value for SPRG3 for this
+ thread, since it's used I think as a thread-state pointer. It
+ is presumably set by the kernel for each dispatched thread and
+ cannot be changed by user space. It therefore seems safe enough
+ to copy the host's value of it into the guest state at the point
+ the thread is dispatched.
+ (Later): Hmm, looks like SPRG3 is only used in 32-bit mode.
+ Oh well. */
+ { UWord host_sprg3;
+ __asm__ __volatile__( "mfspr %0,259\n" : "=3Db"(host_sprg3) );
+ VG_(threads)[tid].arch.vex.guest_SPRG3_RO =3D host_sprg3;
+ vg_assert(sizeof(VG_(threads)[tid].arch.vex.guest_SPRG3_RO) =3D=3D s=
izeof(void*));
+ }
+# endif
+
/* there should be no undealt-with signals */
//vg_assert(VG_(threads)[tid].siginfo.si_signo =3D=3D 0);
=20
+ if (0) {
+ vki_sigset_t m;
+ Int i, err =3D VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
+ vg_assert(err =3D=3D 0);
+ VG_(printf)("tid %d: entering code with unblocked signals: ", tid)=
;
+ for (i =3D 1; i <=3D _VKI_NSIG; i++)
+ if (!VG_(sigismember)(&m, i))
+ VG_(printf)("%d ", i);
+ VG_(printf)("\n");
+ }
+
vg_assert(VG_(in_generated_code) =3D=3D False);
VG_(in_generated_code) =3D True;
=20
@@ -791,15 +827,55 @@
=20
VG_(dispatch_ctr) =3D SCHEDULING_QUANTUM + 1;
=20
- while(!VG_(is_exiting)(tid)) {
+ while (!VG_(is_exiting)(tid)) {
+
if (VG_(dispatch_ctr) =3D=3D 1) {
- /* Our slice is done, so yield the CPU to another thread. This
- doesn't sleep between sleeping and running, since that would
- take too much time. */
- VG_(set_sleeping)(tid, VgTs_Yielding);
- /* nothing */
- VG_(set_running)(tid);
=20
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ /* Note: count runnable threads before dropping The Lock. */
+ Int rt =3D VG_(count_runnable_threads)();
+# endif
+
+ /* Our slice is done, so yield the CPU to another thread. On
+ Linux, this doesn't sleep between sleeping and running,
+ since that would take too much time. On AIX, we have to
+ prod the scheduler to get it consider other threads; not
+ doing so appears to cause very long delays before other
+ runnable threads get rescheduled. */
+
+ /* 4 July 06: it seems that a zero-length nsleep is needed to
+ cause async thread cancellation (canceller.c) to terminate
+ in finite time; else it is in some kind of race/starvation
+ situation and completion is arbitrarily delayed (although
+ this is not a deadlock).
+
+ Unfortunately these sleeps cause MPI jobs not to terminate
+ sometimes (some kind of livelock). So sleeping once
+ every N opportunities appears to work. */
+
+ /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
+ sys_yield also helps the problem, whilst not crashing apps. =
*/
+
+ VG_(set_sleeping)(tid, VgTs_Yielding,=20
+ "VG_(scheduler):timeslice");
+ /* ------------ now we don't have The Lock ------------ */
+
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ static Int ctr=3D0;
+ vg_assert(__NR_AIX5__nsleep !=3D __NR_AIX5_UNKNOWN);
+ vg_assert(__NR_AIX5_yield !=3D __NR_AIX5_UNKNOWN);
+ if (1 && rt > 0 && ((++ctr % 3) =3D=3D 0)) {=20
+ //struct vki_timespec ts;
+ //ts.tv_sec =3D 0;
+ //ts.tv_nsec =3D 0*1000*1000;
+ //VG_(do_syscall2)(__NR_AIX5__nsleep, (UWord)&ts, (UWord)NUL=
L);
+ VG_(do_syscall0)(__NR_AIX5_yield);
+ }
+# endif
+
+ VG_(set_running)(tid, "VG_(scheduler):timeslice");
+ /* ------------ now we do have The Lock ------------ */
+
/* OK, do some relatively expensive housekeeping stuff */
scheduler_sanity(tid);
VG_(sanity_check_general)(False);
@@ -1056,7 +1132,7 @@
VG_(threads)[tid].exitreason =3D src;
if (src =3D=3D VgSrc_FatalSig)
VG_(threads)[tid].os_state.fatalsig =3D VKI_SIGKILL;
- VG_(kill_thread)(tid);
+ VG_(get_thread_out_of_syscall)(tid);
}
}
=20
@@ -1116,7 +1192,7 @@
if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
VG_(message)(Vg_DebugMsg,=20
"__libc_freeres() done; really quitting!");
- VG_(threads)[tid].exitreason =3D VgSrc_ExitSyscall;
+ VG_(threads)[tid].exitreason =3D VgSrc_ExitThread;
break;
=20
default:
@@ -1299,26 +1375,55 @@
void scheduler_sanity ( ThreadId tid )
{
Bool bad =3D False;
+ static UInt lasttime =3D 0;
+ UInt now;
+ Int lwpid =3D VG_(gettid)();
=20
if (!VG_(is_running_thread)(tid)) {
VG_(message)(Vg_DebugMsg,
- "Thread %d is supposed to be running, but doesn't own run_sema (own=
ed by %d)\n",=20
+ "Thread %d is supposed to be running, "
+ "but doesn't own run_sema (owned by %d)\n",=20
tid, VG_(running_tid));
bad =3D True;
}
=20
- if (VG_(gettid)() !=3D VG_(threads)[tid].os_state.lwpid) {
+ if (lwpid !=3D VG_(threads)[tid].os_state.lwpid) {
VG_(message)(Vg_DebugMsg,
"Thread %d supposed to be in LWP %d, but we're actual=
ly %d\n",
tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)())=
;
bad =3D True;
}
+
+ if (lwpid !=3D run_sema.owner_thread) {
+ VG_(message)(Vg_DebugMsg,
+ "Thread %d doesn't own the run_sema\n",
+ tid);
+ bad =3D True;
+ }
+
+ /* Periodically show the state of all threads, for debugging
+ purposes. */
+ now =3D VG_(read_millisecond_timer)();
+ if (0 && (!bad) && (lasttime + 4000/*ms*/ <=3D now)) {
+ lasttime =3D now;
+ VG_(printf)("\n------------ Sched State at %d ms ------------\n",
+ (Int)now);
+ VG_(show_sched_status)();
+ }
+
+ /* core_panic also shows the sched status, which is why we don't
+ show it above if bad=3D=3DTrue. */
+ if (bad)
+ VG_(core_panic)("scheduler_sanity: failed");
}
=20
void VG_(sanity_check_general) ( Bool force_expensive )
{
ThreadId tid;
=20
+ static UInt next_slow_check_at =3D 1;
+ static UInt slow_check_interval =3D 25;
+
if (VG_(clo_sanity_level) < 1) return;
=20
/* --- First do all the tests that we can do quickly. ---*/
@@ -1335,11 +1440,18 @@
=20
/* --- Now some more expensive checks. ---*/
=20
- /* Once every 25 times, check some more expensive stuff. */
+ /* Once every now and again, check some more expensive stuff.
+ Gradually increase the interval between such checks so as not to
+ burden long-running programs too much. */
if ( force_expensive
- || VG_(clo_sanity_level) > 1
- || (VG_(clo_sanity_level) =3D=3D 1 && (sanity_fast_count % 25) =3D=3D=
0)) {
+ || VG_(clo_sanity_level) > 1
+ || (VG_(clo_sanity_level) =3D=3D 1=20
+ && sanity_fast_count =3D=3D next_slow_check_at)) {
=20
+ if (0) VG_(printf)("SLOW at %d\n", sanity_fast_count-1);
+
+ next_slow_check_at =3D sanity_fast_count - 1 + slow_check_interval=
;
+ slow_check_interval++;
sanity_slow_count++;
=20
if (VG_(needs).sanity_checks) {
Modified: branches/AIX5/coregrind/m_scheduler/sema.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/AIX5/coregrind/m_scheduler/sema.c 2006-10-03 19:43:53 UTC (r=
ev 6147)
+++ branches/AIX5/coregrind/m_scheduler/sema.c 2006-10-03 19:53:02 UTC (r=
ev 6148)
@@ -29,6 +29,8 @@
*/
=20
#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_vki.h"
#include "pub_core_libcassert.h"
#include "pub_core_libcfile.h"
#include "pub_core_libcproc.h" // For VG_(gettid)()
@@ -39,45 +41,78 @@
pipe-based token passing scheme.
*/
=20
+/* Cycle the char passed through the pipe through 'A' .. 'Z' to make
+ it easier to make sense of strace/truss output - makes it possible
+ to see more clearly the change of ownership of the lock. Need to
+ be careful to reinitialise it at fork() time. */
+static Char sema_char =3D '!'; /* will cause assertion failures if used
+ before sema_init */
+
void ML_(sema_init)(vg_sema_t *sema)
{
- Int res;
- VG_(pipe)(sema->pipe);
+ Char buf[2];
+ Int res, r;
+ r =3D VG_(pipe)(sema->pipe);
+ vg_assert(r =3D=3D 0);
+
+ vg_assert(sema->pipe[0] !=3D sema->pipe[1]);
+
sema->pipe[0] =3D VG_(safe_fd)(sema->pipe[0]);
sema->pipe[1] =3D VG_(safe_fd)(sema->pipe[1]);
=20
+ if (0)=20
+ VG_(debugLog)(0,"zz","sema_init: %d %d\n", sema->pipe[0],=20
+ sema->pipe[1]);
+ vg_assert(sema->pipe[0] !=3D sema->pipe[1]);
+
sema->owner_thread =3D -1;
=20
/* create initial token */
- res =3D VG_(write)(sema->pipe[1], "T", 1);
+ sema_char =3D 'A';
+ buf[0] =3D sema_char;=20
+ buf[1] =3D 0;
+ sema_char++;
+ res =3D VG_(write)(sema->pipe[1], buf, 1);
vg_assert(res =3D=3D 1);
}
=20
void ML_(sema_deinit)(vg_sema_t *sema)
{
+ vg_assert(sema->owner_thread !=3D -1); /* must be initialised */
+ vg_assert(sema->pipe[0] !=3D sema->pipe[1]);
VG_(close)(sema->pipe[0]);
VG_(close)(sema->pipe[1]);
sema->pipe[0] =3D sema->pipe[1] =3D -1;
+ sema->owner_thread =3D -1;
}
=20
/* get a token */
void ML_(sema_down)(vg_sema_t *sema)
{
- Char buf[2] =3D { 'x' };
+ Char buf[2];
Int ret;
Int lwpid =3D VG_(gettid)();
=20
vg_assert(sema->owner_thread !=3D lwpid); /* can't have it already */
+ vg_assert(sema->pipe[0] !=3D sema->pipe[1]);
=20
again:
- ret =3D VG_(read)(sema->pipe[0], buf, 2);
+ buf[0] =3D buf[1] =3D 0;
+ ret =3D VG_(read)(sema->pipe[0], buf, 1);
=20
+ if (ret !=3D 1)=20
+ VG_(debugLog)(0, "scheduler",=20
+ "VG_(sema_down): read returned %d\n", ret);
+
if (ret =3D=3D -VKI_EINTR)
goto again;
=20
vg_assert(ret =3D=3D 1); /* should get exactly 1 token */
- vg_assert(buf[0] =3D=3D 'T');
+ vg_assert(buf[0] >=3D 'A' && buf[0] <=3D 'Z');
+ vg_assert(buf[1] =3D=3D 0);
=20
+ if (sema_char =3D=3D 'Z') sema_char =3D 'A'; else sema_char++;
+
sema->owner_thread =3D lwpid;
}
=20
@@ -85,12 +120,21 @@
void ML_(sema_up)(vg_sema_t *sema)
{
Int ret;
-
+ Char buf[2];
+ buf[0] =3D sema_char;=20
+ buf[1] =3D 0;
+ vg_assert(sema->owner_thread !=3D -1); /* must be initialised */
+ vg_assert(sema->pipe[0] !=3D sema->pipe[1]);
vg_assert(sema->owner_thread =3D=3D VG_(gettid)()); /* must have it *=
/
=20
sema->owner_thread =3D 0;
=20
- ret =3D VG_(write)(sema->pipe[1], "T", 1);
+ ret =3D VG_(write)(sema->pipe[1], buf, 1);
+
+ if (ret !=3D 1)=20
+ VG_(debugLog)(0, "scheduler",=20
+ "VG_(sema_up):write returned %d\n", ret);
+
vg_assert(ret =3D=3D 1);
}
=20
Modified: branches/AIX5/coregrind/pub_core_scheduler.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/AIX5/coregrind/pub_core_scheduler.h 2006-10-03 19:43:53 UTC =
(rev 6147)
+++ branches/AIX5/coregrind/pub_core_scheduler.h 2006-10-03 19:53:02 UTC =
(rev 6148)
@@ -43,31 +43,31 @@
/* A thread exits. tid must currently be running. */
extern void VG_(exit_thread)(ThreadId tid);
=20
-/* Kill a thread. This interrupts whatever a thread is doing, and
- makes it exit ASAP. This does not set the exitreason or
- exitcode. */
-extern void VG_(kill_thread)(ThreadId tid);
+/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
+ out of the syscall and onto doing the next thing, whatever that is.
+ If it isn't blocked in a syscall, has no effect on the thread. */
+extern void VG_(get_thread_out_of_syscall)(ThreadId tid);
=20
/* Nuke all threads except tid. */
extern void VG_(nuke_all_threads_except) ( ThreadId me,
VgSchedReturnCode reason );
=20
/* Make a thread the running thread. The thread must previously been
- sleeping, and not holding the CPU semaphore. This will set the
+ sleeping, and not holding the CPU lock. This will set the
thread state to VgTs_Runnable, and the thread will attempt to take
- the CPU semaphore. By the time it returns, tid will be the running
+ the CPU lock. By the time it returns, tid will be the running
thread. */
-extern void VG_(set_running) ( ThreadId tid );
+extern void VG_(set_running) ( ThreadId tid, HChar* who );
=20
/* Set a thread into a sleeping state. Before the call, the thread
- must be runnable, and holding the CPU semaphore. When this call
+ must be runnable, and holding the CPU lock. When this call
returns, the thread will be set to the specified sleeping state,
- and will not be holding the CPU semaphore. Note that another
+ and will not be holding the CPU lock. Note that another
thread could be running by the time this call returns, so the
caller must be careful not to touch any shared state. It is also
the caller's responsibility to actually block until the thread is
ready to run again. */
-extern void VG_(set_sleeping) ( ThreadId tid, ThreadStatus state );
+extern void VG_(set_sleeping) ( ThreadId tid, ThreadStatus state, HChar*=
who );
=20
/* Yield the CPU for a while */
extern void VG_(vg_yield)(void);
|