|
From: Nicholas N. <nj...@ca...> - 2004-09-10 17:42:25
|
CVS commit by nethercote:
Arch-abstraction:
- added x86-linux/core_platform.h
- factored out getting/setting of system call arguments, put it in
platform-specific part.
A x86-linux/core_platform.h 1.1 [GPL (v2+)]
M +3 -1 core.h 1.22
M +9 -8 vg_proxylwp.c 1.21
M +10 -10 vg_scheduler.c 1.178
M +7 -7 vg_signals.c 1.84
M +8 -8 vg_syscalls.c 1.135
M +1 -1 x86/core_arch.h 1.6
M +1 -0 x86-linux/Makefile.am 1.2
--- valgrind/coregrind/core.h #1.21:1.22
@@ -86,5 +86,7 @@
#include "core_asm.h" // asm stuff
#include "tool.h" // tool stuff
-#include "core_arch.h" // arch-specific stuff; eg. x86/arch.h
+#include "core_arch.h" // arch-specific stuff, eg. x86/core_arch.h
+#include "core_platform.h" // platform-specific stuff,
+ // eg. x86-linux/core_platform.h
#include "valgrind.h"
--- valgrind/coregrind/vg_proxylwp.c #1.20:1.21
@@ -431,5 +431,5 @@ void VG_(proxy_handlesig)(const vki_ksig
the proxy and machine state here. */
vg_assert(px->state == PXS_RunSyscall);
- vg_assert(px->tst->arch.m_eax == -VKI_ERESTARTSYS);
+ vg_assert(PLATFORM_SYSCALL_RET(px->tst->arch) == -VKI_ERESTARTSYS);
} else if (sys_after <= eip && eip <= sys_done) {
/* We're after the syscall. Either it was interrupted by the
@@ -440,5 +440,5 @@ void VG_(proxy_handlesig)(const vki_ksig
px->state == PXS_SysDone);
px->state = PXS_SysDone;
- px->tst->arch.m_eax = eax;
+ PLATFORM_SYSCALL_RET(px->tst->arch) = eax;
}
px_printf(" signalled in state %s\n", pxs_name(px->state));
@@ -557,5 +557,5 @@ static Int proxylwp(void *v)
reply.u.syscallno = tst->syscallno;
- tst->arch.m_eax = -VKI_ERESTARTSYS;
+ PLATFORM_SYSCALL_RET(tst->arch) = -VKI_ERESTARTSYS;
px->state = PXS_IntReply;
break;
@@ -727,5 +727,5 @@ static Int proxylwp(void *v)
px_printf("RunSyscall in SigACK: rejecting syscall %d with ERESTARTSYS\n",
reply.u.syscallno);
- tst->arch.m_eax = -VKI_ERESTARTSYS;
+ PLATFORM_SYSCALL_RET(tst->arch) = -VKI_ERESTARTSYS;
} else {
Int syscallno = tst->syscallno;
@@ -734,5 +734,5 @@ static Int proxylwp(void *v)
/* If we're interrupted before we get to the syscall
itself, we want the syscall restarted. */
- tst->arch.m_eax = -VKI_ERESTARTSYS;
+ PLATFORM_SYSCALL_RET(tst->arch) = -VKI_ERESTARTSYS;
/* set our process group ID to match parent */
@@ -1143,5 +1143,6 @@ static void sys_wait_results(Bool block,
if (VG_(clo_trace_syscalls))
VG_(message)(Vg_DebugMsg, "sys_wait_results: got PX_RunSyscall for TID %d: syscall %d result %d",
- res.tid, tst->syscallno, tst->arch.m_eax);
+ res.tid, tst->syscallno,
+ PLATFORM_SYSCALL_RET(tst->arch));
if (tst->status != VgTs_WaitSys)
@@ -1302,6 +1303,6 @@ Int VG_(sys_issue)(int tid)
req.request = PX_RunSyscall;
- tst->syscallno = tst->arch.m_eax;
- tst->arch.m_eax = -VKI_ERESTARTSYS;
+ tst->syscallno = PLATFORM_SYSCALL_NUM(tst->arch);
+ PLATFORM_SYSCALL_RET(tst->arch) = -VKI_ERESTARTSYS;
res = VG_(write)(proxy->topx, &req, sizeof(req));
--- valgrind/coregrind/vg_scheduler.c #1.177:1.178
@@ -468,10 +468,10 @@ void handle_signal_return ( ThreadId tid
if (VG_(threads)[tid].status == VgTs_Sleeping
- && VG_(threads)[tid].arch.m_eax == __NR_nanosleep) {
+ && PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_nanosleep) {
/* We interrupted a nanosleep(). The right thing to do is to
write the unused time to nanosleep's second param, but that's
too much effort ... we just say that 1 nanosecond was not
used, and return EINTR. */
- rem = (struct vki_timespec *)VG_(threads)[tid].arch.m_ecx; /* arg2 */
+ rem = (struct vki_timespec*)PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch);
if (rem != NULL) {
rem->tv_sec = 0;
@@ -527,5 +527,5 @@ void sched_do_syscall ( ThreadId tid )
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- syscall_no = VG_(threads)[tid].arch.m_eax; /* syscall number */
+ syscall_no = PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch);
/* Special-case nanosleep because we can. But should we?
@@ -537,5 +537,5 @@ void sched_do_syscall ( ThreadId tid )
UInt t_now, t_awaken;
struct vki_timespec* req;
- req = (struct vki_timespec*)VG_(threads)[tid].arch.m_ebx; /* arg1 */
+ req = (struct vki_timespec*)PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
@@ -925,10 +925,10 @@ VgSchedReturnCode do_scheduler ( Int* ex
the unprotected malloc/free system. */
- if (VG_(threads)[tid].arch.m_eax == __NR_exit
- || VG_(threads)[tid].arch.m_eax == __NR_exit_group
+ if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit
+ || PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_exit_group
) {
/* If __NR_exit, remember the supplied argument. */
- *exitcode = VG_(threads)[tid].arch.m_ebx; /* syscall arg1 */
+ *exitcode = PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
/* Only run __libc_freeres if the tool says it's ok and
@@ -963,11 +963,11 @@ VgSchedReturnCode do_scheduler ( Int* ex
/* We've dealt with __NR_exit at this point. */
- vg_assert(VG_(threads)[tid].arch.m_eax != __NR_exit &&
- VG_(threads)[tid].arch.m_eax != __NR_exit_group);
+ vg_assert(PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit &&
+ PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) != __NR_exit_group);
/* Trap syscalls to __NR_sched_yield and just have this
thread yield instead. Not essential, just an
optimisation. */
- if (VG_(threads)[tid].arch.m_eax == __NR_sched_yield) {
+ if (PLATFORM_SYSCALL_NUM(VG_(threads)[tid].arch) == __NR_sched_yield) {
SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
goto stage1; /* find a new thread to run */
--- valgrind/coregrind/vg_signals.c #1.83:1.84
@@ -490,6 +490,6 @@ void VG_(do__NR_sigaltstack) ( ThreadId
vg_assert(VG_(is_valid_tid)(tid));
- ss = (vki_kstack_t*)(VG_(threads)[tid].arch.m_ebx);
- oss = (vki_kstack_t*)(VG_(threads)[tid].arch.m_ecx);
+ ss = (vki_kstack_t*)(PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch));
+ oss = (vki_kstack_t*)(PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch));
m_esp = VG_(threads)[tid].arch.m_esp;
@@ -543,7 +543,7 @@ void VG_(do__NR_sigaction) ( ThreadId ti
vg_assert(VG_(is_valid_tid)(tid));
- signo = VG_(threads)[tid].arch.m_ebx; /* int sigNo */
- new_act = (vki_ksigaction*)(VG_(threads)[tid].arch.m_ecx);
- old_act = (vki_ksigaction*)(VG_(threads)[tid].arch.m_edx);
+ signo = PLATFORM_SYSCALL_ARG1(VG_(threads)[tid].arch);
+ new_act = (vki_ksigaction*)PLATFORM_SYSCALL_ARG2(VG_(threads)[tid].arch);
+ old_act = (vki_ksigaction*)PLATFORM_SYSCALL_ARG3(VG_(threads)[tid].arch);
if (VG_(clo_trace_signals))
@@ -1868,6 +1868,6 @@ void VG_(deliver_signal) ( ThreadId tid,
if (tst->status == VgTs_WaitSys) {
/* blocked in a syscall; we assume it should be interrupted */
- if (tst->arch.m_eax == -VKI_ERESTARTSYS)
- tst->arch.m_eax = -VKI_EINTR;
+ if (PLATFORM_SYSCALL_RET(tst->arch) == -VKI_ERESTARTSYS)
+ PLATFORM_SYSCALL_RET(tst->arch) = -VKI_EINTR;
}
--- valgrind/coregrind/vg_syscalls.c #1.134:1.135
@@ -1011,12 +1011,12 @@ static Bool fd_allowed(Int fd, const Cha
POST(new) __attribute__((alias(STR(after_##old))))
-#define SYSNO (tst->arch.m_eax) /* in PRE(x) */
-#define res (tst->arch.m_eax) /* in POST(x) */
-#define arg1 (tst->arch.m_ebx)
-#define arg2 (tst->arch.m_ecx)
-#define arg3 (tst->arch.m_edx)
-#define arg4 (tst->arch.m_esi)
-#define arg5 (tst->arch.m_edi)
-#define arg6 (tst->arch.m_ebp)
+#define SYSNO PLATFORM_SYSCALL_NUM(tst->arch) // in PRE(x)
+#define res PLATFORM_SYSCALL_RET(tst->arch) // in POST(x)
+#define arg1 PLATFORM_SYSCALL_ARG1(tst->arch)
+#define arg2 PLATFORM_SYSCALL_ARG2(tst->arch)
+#define arg3 PLATFORM_SYSCALL_ARG3(tst->arch)
+#define arg4 PLATFORM_SYSCALL_ARG4(tst->arch)
+#define arg5 PLATFORM_SYSCALL_ARG5(tst->arch)
+#define arg6 PLATFORM_SYSCALL_ARG6(tst->arch)
PRE(exit_group)
--- valgrind/coregrind/x86/core_arch.h #1.5:1.6
@@ -1,5 +1,5 @@
/*--------------------------------------------------------------------*/
-/*--- Arch-specific stuff for the core. x86/core_arch.h ---*/
+/*--- x86-specific stuff for the core. x86/core_arch.h ---*/
/*--------------------------------------------------------------------*/
--- valgrind/coregrind/x86-linux/Makefile.am #1.1:1.2
@@ -5,3 +5,4 @@
noinst_HEADERS = \
+ core_platform.h \
vki_unistd.h
|