|
From: <sv...@va...> - 2005-03-30 18:43:03
|
Author: sewardj
Date: 2005-03-30 19:42:59 +0100 (Wed, 30 Mar 2005)
New Revision: 3483
Modified:
trunk/coregrind/amd64/dispatch.S
Log:
Get rid of the use of VG_(instr_ptr_offset) since we know what that is
at system-build time: OFFSET_amd64_RIP. This saves an instruction on
the fast path, and reduces the number of PIE-difficulties by one.
Modified: trunk/coregrind/amd64/dispatch.S
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/amd64/dispatch.S 2005-03-30 18:26:52 UTC (rev 3482)
+++ trunk/coregrind/amd64/dispatch.S 2005-03-30 18:42:59 UTC (rev 3483)
@@ -31,6 +31,7 @@
=20
#include "core_asm.h"
#include "amd64_private_asm.h"
+#include "libvex_guest_offsets.h"
=20
=20
/*------------------------------------------------------------*/
@@ -63,8 +64,7 @@
movq %rdi, %rbp
=09
/* fetch %RIP into %rax */
- movq VG_(instr_ptr_offset), %rsi
- movq (%rbp, %rsi, 1), %rax
+ movq OFFSET_amd64_RIP(%rbp), %rax
=20
/* set host FPU control word to the default mode expected=20
by VEX-generated code. See comments in libvex.h for
@@ -90,8 +90,7 @@
=20
dispatch_boring:
/* save the jump address in the guest state */
- movq VG_(instr_ptr_offset), %rsi
- movq %rax, (%rbp, %rsi, 1)
+ movq %rax, OFFSET_amd64_RIP(%rbp)
=20
/* Are we out of timeslice? If yes, defer to scheduler. */
subl $1, VG_(dispatch_ctr)
@@ -182,10 +181,9 @@
cmpq $VG_TRC_INNER_COUNTERZERO, %rbp
jz counter_is_zero
=20
- /* save %eax in %EIP and defer to sched */
- movq VG_(instr_ptr_offset), %rsi
+ /* save %rax in %RIP and defer to sched */
movq 0(%rsp), %rdi
- movq %rax, (%rdi, %rsi, 1)
+ movq %rax, OFFSET_amd64_RIP(%rdi)
movq %rbp, %rax
jmp run_innerloop_exit
=20
|