You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
1
(26) |
2
(35) |
3
(18) |
4
(14) |
|
5
(12) |
6
(13) |
7
(11) |
8
(15) |
9
(8) |
10
(13) |
11
(25) |
|
12
(13) |
13
(24) |
14
(7) |
15
(6) |
16
(8) |
17
(6) |
18
(7) |
|
19
(8) |
20
(7) |
21
(5) |
22
(7) |
23
(6) |
24
(7) |
25
(6) |
|
26
(7) |
27
(7) |
28
(5) |
29
(5) |
30
(5) |
|
|
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 23:25:50
|
CVS commit by nethercote:
Removed x86/ume_archdefs.h; moved CLIENT_BASE into x86/core_arch.h.
(CLIENT_BASE wasn't really part of UME.)
M +1 -2 stage1.c 1.19
M +4 -0 ume.c 1.22
M +0 -1 vg_main.c 1.201
M +1 -2 x86/Makefile.am 1.9
M +12 -1 x86/core_arch.h 1.2
R x86/ume_archdefs.h 1.2
--- valgrind/coregrind/stage1.c #1.18:1.19
@@ -45,5 +45,4 @@
#include "ume.h"
#include "ume_arch.h"
-#include "ume_archdefs.h"
static int stack[SIGSTKSZ*4];
@@ -210,5 +209,5 @@ static void hoops(void)
}
-int main(int argc, char **argv)
+int main(void)
{
struct rlimit rlim;
--- valgrind/coregrind/ume.c #1.21:1.22
@@ -483,4 +483,8 @@ static int load_ELF(char *hdr, int len,
}
break;
+
+ default:
+ // do nothing
+ break;
}
}
--- valgrind/coregrind/vg_main.c #1.200:1.201
@@ -34,5 +34,4 @@
#include "ume.h"
#include "ume_arch.h"
-#include "ume_archdefs.h"
#include <dirent.h>
--- valgrind/coregrind/x86/Makefile.am #1.8:1.9
@@ -6,6 +6,5 @@
noinst_HEADERS = \
core_arch.h \
- core_arch_asm.h \
- ume_archdefs.h
+ core_arch_asm.h
noinst_LIBRARIES = libarch.a
--- valgrind/coregrind/x86/core_arch.h #1.1:1.2
@@ -1,5 +1,5 @@
/*--------------------------------------------------------------------*/
-/*--- x86/core_arch.h ---*/
+/*--- Arch-specific stuff for the core. x86/core_arch.h ---*/
/*--------------------------------------------------------------------*/
@@ -87,4 +88,6 @@ typedef struct _LDT_ENTRY {
// Architecture-specific part of a ThreadState
+// XXX: eventually this should be made abstract, ie. the fields not visible
+// to the core...
typedef struct {
/* Pointer to this thread's Local (Segment) Descriptor Table.
@@ -143,4 +146,12 @@ typedef struct {
arch_thread_t;
+
+/* ---------------------------------------------------------------------
+ Constants involving memory layout
+ ------------------------------------------------------------------ */
+
+// base address of client address space
+#define CLIENT_BASE 0x00000000ul
+
#endif // __X86_CORE_ARCH_H
|
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 23:09:01
|
On Thu, 2 Sep 2004, Eric Estievenart wrote: > The question is then: What, at that time, decided you > to do the big-bang mmap ? Originally we didn't use big-bang. Jeremy put it in with the FV memory layout changes; AIUI he experimented with direct-offset shadow addressing but the performance was no better, so the committed version didn't use it, even though the code to support it was all present. > The important points in this idea are: > > - No more address address space separation between > Valgrind and client This is unacceptable. > The only question I have no answer > is why was there a barrier between vg and client. I feel > it is not needed. It is needed. The separation between client and Valgrind is important, and was one of the main motivations for the FV rearrangement in the first place -- by using a segment selector in client code, we can ensure that a buggy client cannot touch any of Valgrind's memory. N |
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 14:24:30
|
CVS commit by nethercote:
Removed x86/ume_arch_defs.c, which just defined the never-used variable
CLIENT_START.
M +0 -1 Makefile.am 1.8
R ume_archdefs.c 1.2
--- valgrind/coregrind/x86/Makefile.am #1.7:1.8
@@ -12,5 +12,4 @@
EXTRA_DIST = \
- ume_archdefs.c \
ume_entry.S \
ume_go.c
|
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 14:04:55
|
CVS commit by nethercote:
Avoid spurious warning about using posix_memalign()
M +28 -14 memalign2.c 1.2
--- valgrind/memcheck/tests/memalign2.c #1.1:1.2
@@ -1,2 +1,15 @@
+
+// These #defines attempt to ensure that posix_memalign() is declared, and
+// so no spurious warning is given about using it.
+
+// Advertise compliance of the code to the XSI (a POSIX superset that
+// defines what a system must be like to be called "UNIX")
+#undef _XOPEN_SOURCE
+#define _XOPEN_SOURCE 600
+
+// Advertise compliance to POSIX
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112L
+
#include <stdlib.h>
#include <stdio.h>
@@ -33,22 +46,23 @@ int main ( void )
p = memalign(4097, 100); assert(0 == (long)p % 8192);
- res = posix_memalign(&p, -1,100); assert(EINVAL == res);
- res = posix_memalign(&p, 0, 100); assert(0 == res && 0 == (long)p % 8);
- res = posix_memalign(&p, 1, 100); assert(EINVAL == res);
- res = posix_memalign(&p, 2, 100); assert(EINVAL == res);
- res = posix_memalign(&p, 3, 100); assert(EINVAL == res);
- res = posix_memalign(&p, sizeof(void*), 100);
- assert(0 == res &&
- 0 == (long)p % sizeof(void*));
+ #define PM(a,b,c) posix_memalign((void**)a, b, c)
- res = posix_memalign(&p, 31, 100); assert(EINVAL == res);
- res = posix_memalign(&p, 32, 100); assert(0 == res &&
+ res = PM(&p, -1,100); assert(EINVAL == res);
+ res = PM(&p, 0, 100); assert(0 == res && 0 == (long)p % 8);
+ res = PM(&p, 1, 100); assert(EINVAL == res);
+ res = PM(&p, 2, 100); assert(EINVAL == res);
+ res = PM(&p, 3, 100); assert(EINVAL == res);
+ res = PM(&p, sizeof(void*), 100);
+ assert(0 == res && 0 == (long)p % sizeof(void*));
+
+ res = PM(&p, 31, 100); assert(EINVAL == res);
+ res = PM(&p, 32, 100); assert(0 == res &&
0 == (long)p % 32);
- res = posix_memalign(&p, 33, 100); assert(EINVAL == res);
+ res = PM(&p, 33, 100); assert(EINVAL == res);
- res = posix_memalign(&p, 4095, 100); assert(EINVAL == res);
- res = posix_memalign(&p, 4096, 100); assert(0 == res &&
+ res = PM(&p, 4095, 100); assert(EINVAL == res);
+ res = PM(&p, 4096, 100); assert(0 == res &&
0 == (long)p % 4096);
- res = posix_memalign(&p, 4097, 100); assert(EINVAL == res);
+ res = PM(&p, 4097, 100); assert(EINVAL == res);
return 0;
|
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 13:45:46
|
CVS commit by nethercote:
Arch-abstraction:
- Added include/x86/: contains tool_arch.h, Makefile.am, .cvsignore.
- Added coregrind/x86/state.c. Contains some arch-specific code for dealing
with x86 registers -- eg. setting up the baseBlock, loading/saving the whole
register state. It is compiled into coregrind/x86/libarch.a and linked via
${VG_ARCH} with the core.
Relatedly, also added coregrind/x86/{core_arch.h,core_arch_asm.h}.
- Correspondingly abstracted the register state out of ThreadState. This
affected every place that touches registers, and there are a lot of them.
(Eventually all the register touching should be abstracted out in an
arch-neutral way, but not yet; one step at a time.)
- Added some declarations about register loading/saving functions to core.h;
all architectures will have to provide these functions.
- Rejigged the build system so that the arch-specific stuff is all done via
${VG_ARCH}, rather than naming e.g. x86/ directly. Appropriate -I arguments
are used so that all the headers are found, etc.
A coregrind/x86/core_arch.h 1.1 [GPL (v2+)]
A coregrind/x86/core_arch_asm.h 1.1 [GPL (v2+)]
A coregrind/x86/state.c 1.1 [GPL (v2+)]
A include/x86/.cvsignore 1.1
A include/x86/Makefile.am 1.1
A include/x86/tool_arch.h 1.1 [GPL (v2+)]
M +3 -1 Makefile.core-AM_CPPFLAGS.am 1.2
M +2 -1 Makefile.tool.am 1.2
M +1 -0 configure.in 1.124
M +1 -0 valgrind.spec.in 1.16
M +2 -2 coregrind/Makefile.am 1.82
M +12 -104 coregrind/core.h 1.9
M +2 -7 coregrind/core_asm.h 1.2
M +1 -0 coregrind/valgrind.vs 1.5
M +4 -4 coregrind/vg_execontext.c 1.18
M +24 -24 coregrind/vg_from_ucode.c 1.84
M +6 -6 coregrind/vg_ldt.c 1.18
M +16 -16 coregrind/vg_main.c 1.200
M +15 -15 coregrind/vg_proxylwp.c 1.20
M +54 -220 coregrind/vg_scheduler.c 1.174
M +78 -78 coregrind/vg_signals.c 1.83
M +8 -8 coregrind/vg_symtab2.c 1.87
M +11 -11 coregrind/vg_syscalls.c 1.134
M +7 -7 coregrind/vg_translate.c 1.90
M +12 -0 coregrind/x86/Makefile.am 1.7
M +3 -0 include/Makefile.am 1.8
M +1 -3 include/tool.h.base 1.4
M +1 -0 include/tool_asm.h 1.2
M +1 -1 memcheck/mc_main.c 1.53
--- valgrind/Makefile.core-AM_CPPFLAGS.am #1.1:1.2
@@ -1,4 +1,6 @@
add_includes = -I$(top_builddir)/coregrind -I$(top_srcdir)/coregrind \
- -I$(top_builddir)/include -I$(top_srcdir)/include
+ -I$(top_srcdir)/coregrind/$(VG_ARCH) \
+ -I$(top_builddir)/include -I$(top_srcdir)/include \
+ -I$(top_srcdir)/include/$(VG_ARCH)
AM_CPPFLAGS = $(add_includes)
--- valgrind/Makefile.tool.am #1.1:1.2
@@ -6,5 +6,6 @@
## Need $(top_builddir)/include because tool.h is built from tool.h.base;
## otherwise it will not work if builddir != srcdir.
-add_includes = -I$(top_builddir)/include -I$(top_srcdir)/include
+add_includes = -I$(top_builddir)/include -I$(top_srcdir)/include \
+ -I$(top_srcdir)/include/$(VG_ARCH)
AM_CPPFLAGS = $(add_includes)
--- valgrind/configure.in #1.123:1.124
@@ -392,4 +392,5 @@
tests/unused/Makefile
include/Makefile
+ include/x86/Makefile
auxprogs/Makefile
coregrind/Makefile
--- valgrind/valgrind.spec.in #1.15:1.16
@@ -41,4 +41,5 @@
/usr/include/valgrind/vg_kerneliface.h
/usr/include/valgrind/vg_skin.h
+/usr/include/valgrind/x86/tool_arch.h
/usr/bin/valgrind
/usr/bin/cg_annotate
--- valgrind/coregrind/Makefile.am #1.81:1.82
@@ -2,8 +2,7 @@
include $(top_srcdir)/Makefile.core-AM_CPPFLAGS.am
-SUBDIRS = x86 demangle . docs
+SUBDIRS = $(VG_ARCH) demangle . docs
AM_CPPFLAGS += -DVG_LIBDIR="\"$(valdir)"\" -I$(srcdir)/demangle \
- -I$(srcdir)/x86 \
-DKICKSTART_BASE=$(KICKSTART_BASE)
AM_CFLAGS = $(WERROR) -Winline -Wall -Wshadow -O -fno-omit-frame-pointer \
@@ -93,4 +92,5 @@
demangle/dyn-string.o \
demangle/safe-ctype.o \
+ ${VG_ARCH}/libarch.a \
-ldl
--- valgrind/coregrind/core.h #1.8:1.9
@@ -86,4 +86,5 @@
#include "core_asm.h" // asm stuff
#include "tool.h" // tool stuff
+#include "core_arch.h" // arch-specific stuff; eg. x86/arch.h
#include "valgrind.h"
@@ -575,24 +576,4 @@ struct vg_mallocfunc_info {
/* ---------------------------------------------------------------------
- Constants pertaining to the simulated CPU state, VG_(baseBlock),
- which need to go here to avoid ugly circularities.
- ------------------------------------------------------------------ */
-
-/* How big is the saved SSE/SSE2 state? Note that this subsumes the
- FPU state. On machines without SSE, we just save/restore the FPU
- state into the first part of this area. */
-/* A general comment about SSE save/restore: It appears that the 7th
- word (which is the MXCSR) has to be &ed with 0x0000FFBF in order
- that restoring from it later does not cause a GP fault (which is
- delivered as a segfault). I guess this will have to be done
- any time we do fxsave :-( 7th word means word offset 6 or byte
- offset 24 from the start address of the save area.
- */
-#define VG_SIZE_OF_SSESTATE 512
-/* ... and in words ... */
-#define VG_SIZE_OF_SSESTATE_W ((VG_SIZE_OF_SSESTATE+3)/4)
-
-
-/* ---------------------------------------------------------------------
Exports of vg_defaults.c
------------------------------------------------------------------ */
@@ -601,35 +582,4 @@ extern Bool VG_(sk_malloc_called_by_sche
-/* ---------------------------------------------------------------------
- Exports of vg_ldt.c
- ------------------------------------------------------------------ */
-
-/* This is the hardware-format for a segment descriptor, ie what the
- x86 actually deals with. It is 8 bytes long. It's ugly. */
-
-typedef struct _LDT_ENTRY {
- union {
- struct {
- UShort LimitLow;
- UShort BaseLow;
- unsigned BaseMid : 8;
- unsigned Type : 5;
- unsigned Dpl : 2;
- unsigned Pres : 1;
- unsigned LimitHi : 4;
- unsigned Sys : 1;
- unsigned Reserved_0 : 1;
- unsigned Default_Big : 1;
- unsigned Granularity : 1;
- unsigned BaseHi : 8;
- } Bits;
- struct {
- UInt word1;
- UInt word2;
- } Words;
- }
- LdtEnt;
-} VgLdtEntry;
-
/* Maximum number of LDT entries supported (by the x86). */
#define VG_M_LDT_ENTRIES 8192
@@ -894,55 +844,6 @@ typedef
vki_kstack_t altstack;
- /* Pointer to this thread's Local (Segment) Descriptor Table.
- Starts out as NULL, indicating there is no table, and we hope to
- keep it that way. If the thread does __NR_modify_ldt to create
- entries, we allocate a 8192-entry table at that point. This is
- a straight copy of the Linux kernel's scheme. Don't forget to
- deallocate this at thread exit. */
- VgLdtEntry* ldt;
-
- /* TLS table. This consists of a small number (currently 3) of
- entries from the Global Descriptor Table. */
- VgLdtEntry tls[VKI_GDT_TLS_ENTRIES];
-
- /* Saved machine context. Note the FPU state, %EIP and segment
- registers are not shadowed.
-
- Although the segment registers are 16 bits long, storage
- management here and in VG_(baseBlock) is
- simplified if we pretend they are 32 bits. */
- UInt m_cs;
- UInt m_ss;
- UInt m_ds;
- UInt m_es;
- UInt m_fs;
- UInt m_gs;
-
- UInt m_eax;
- UInt m_ebx;
- UInt m_ecx;
- UInt m_edx;
- UInt m_esi;
- UInt m_edi;
- UInt m_ebp;
- UInt m_esp;
- UInt m_eflags;
- UInt m_eip;
-
- /* The SSE/FPU state. This array does not (necessarily) have the
- required 16-byte alignment required to get stuff in/out by
- fxsave/fxrestore. So we have to do it "by hand".
- */
- UInt m_sse[VG_SIZE_OF_SSESTATE_W];
-
- UInt sh_eax;
- UInt sh_ebx;
- UInt sh_ecx;
- UInt sh_edx;
- UInt sh_esi;
- UInt sh_edi;
- UInt sh_ebp;
- UInt sh_esp;
- UInt sh_eflags;
+ /* Architecture-specific thread state */
+ arch_thread_t arch;
}
ThreadState;
@@ -1008,5 +909,5 @@ extern void VG_(scheduler_handle_fatal_s
/* Write a value to a client's thread register, and shadow (if necessary) */
#define SET_THREAD_REG( zztid, zzval, zzreg, zzREG, zzevent, zzargs... ) \
- do { VG_(threads)[zztid].m_##zzreg = (zzval); \
+ do { VG_(threads)[zztid].arch.m_##zzreg = (zzval); \
VG_TRACK( zzevent, zztid, R_##zzREG, ##zzargs ); \
} while (0)
@@ -1669,5 +1570,10 @@ extern Int VGOFF_(tls_ptr);
extern Int VGOFF_(helper_undefined_instruction);
-#endif /* ndef __CORE_H */
+// ---------------------------------------------------------------------
+// Architecture-specific things defined in eg. x86/*.c
+// ---------------------------------------------------------------------
+
+extern void VGA_(load_state) ( arch_thread_t*, ThreadId tid );
+extern void VGA_(save_state) ( arch_thread_t*, ThreadId tid );
@@ -1678,4 +1584,6 @@ extern Int VGOFF_(helper_undefined_instr
#include "config.h"
+#endif /* ndef __CORE_H */
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
--- valgrind/coregrind/core_asm.h #1.1:1.2
@@ -32,5 +32,6 @@
#define __CORE_ASM_H
-#include "tool_asm.h"
+#include "tool_asm.h" // tool asm stuff
+#include "core_arch_asm.h" // arch-specific asm stuff
/* This file is included in all Valgrind source files, including
@@ -54,10 +55,4 @@
#define VG_TRC_UNRESUMABLE_SIGNAL 37 /* TRC only; got sigsegv/sigbus */
-/* size of call instruction put into generated code at jump sites */
-#define VG_PATCHME_CALLSZ 5
-
-/* size of jmp instruction which overwrites the call */
-#define VG_PATCHME_JMPSZ 5
-
/* maximum number of normal jumps which can appear in a basic block */
#define VG_MAX_JUMPS 2
--- valgrind/coregrind/valgrind.vs #1.4:1.5
@@ -5,4 +5,5 @@
vgProf_*;
vgOff_*;
+ vgArch_*;
local:
--- valgrind/coregrind/vg_execontext.c #1.17:1.18
@@ -315,7 +315,7 @@ void get_needed_regs(ThreadId tid, Addr*
/* thread in thread table */
ThreadState* tst = & VG_(threads)[ tid ];
- *eip = tst->m_eip;
- *ebp = tst->m_ebp;
- *esp = tst->m_esp;
+ *eip = tst->arch.m_eip;
+ *ebp = tst->arch.m_ebp;
+ *esp = tst->arch.m_esp;
*stack_highest_word = tst->stack_highest_word;
}
@@ -371,5 +371,5 @@ Addr VG_(get_EIP) ( ThreadId tid )
ret = VG_(baseBlock)[VGOFF_(m_eip)];
else
- ret = VG_(threads)[ tid ].m_eip;
+ ret = VG_(threads)[ tid ].arch.m_eip;
return ret;
--- valgrind/coregrind/vg_from_ucode.c #1.83:1.84
@@ -3428,12 +3428,12 @@ UInt VG_(get_thread_archreg) ( ThreadId
switch (arch) {
- case R_EAX: return tst->m_eax;
- case R_ECX: return tst->m_ecx;
- case R_EDX: return tst->m_edx;
- case R_EBX: return tst->m_ebx;
- case R_ESP: return tst->m_esp;
- case R_EBP: return tst->m_ebp;
- case R_ESI: return tst->m_esi;
- case R_EDI: return tst->m_edi;
+ case R_EAX: return tst->arch.m_eax;
+ case R_ECX: return tst->arch.m_ecx;
+ case R_EDX: return tst->arch.m_edx;
+ case R_EBX: return tst->arch.m_ebx;
+ case R_ESP: return tst->arch.m_esp;
+ case R_EBP: return tst->arch.m_ebp;
+ case R_ESI: return tst->arch.m_esi;
+ case R_EDI: return tst->arch.m_edi;
default: VG_(core_panic)( "get_thread_archreg");
}
@@ -3492,12 +3492,12 @@ UInt VG_(get_thread_shadow_archreg) ( Th
switch (archreg) {
- case R_EAX: return tst->sh_eax;
- case R_ECX: return tst->sh_ecx;
- case R_EDX: return tst->sh_edx;
- case R_EBX: return tst->sh_ebx;
- case R_ESP: return tst->sh_esp;
- case R_EBP: return tst->sh_ebp;
- case R_ESI: return tst->sh_esi;
- case R_EDI: return tst->sh_edi;
+ case R_EAX: return tst->arch.sh_eax;
+ case R_ECX: return tst->arch.sh_ecx;
+ case R_EDX: return tst->arch.sh_edx;
+ case R_EBX: return tst->arch.sh_ebx;
+ case R_ESP: return tst->arch.sh_esp;
+ case R_EBP: return tst->arch.sh_ebp;
+ case R_ESI: return tst->arch.sh_esi;
+ case R_EDI: return tst->arch.sh_edi;
default: VG_(core_panic)( "get_thread_shadow_archreg");
}
@@ -3512,12 +3512,12 @@ void VG_(set_thread_shadow_archreg) ( Th
switch (archreg) {
- case R_EAX: tst->sh_eax = val; break;
- case R_ECX: tst->sh_ecx = val; break;
- case R_EDX: tst->sh_edx = val; break;
- case R_EBX: tst->sh_ebx = val; break;
- case R_ESP: tst->sh_esp = val; break;
- case R_EBP: tst->sh_ebp = val; break;
- case R_ESI: tst->sh_esi = val; break;
- case R_EDI: tst->sh_edi = val; break;
+ case R_EAX: tst->arch.sh_eax = val; break;
+ case R_ECX: tst->arch.sh_ecx = val; break;
+ case R_EDX: tst->arch.sh_edx = val; break;
+ case R_EBX: tst->arch.sh_ebx = val; break;
+ case R_ESP: tst->arch.sh_esp = val; break;
+ case R_EBP: tst->arch.sh_ebp = val; break;
+ case R_ESI: tst->arch.sh_esi = val; break;
+ case R_EDI: tst->arch.sh_edi = val; break;
default: VG_(core_panic)( "set_thread_shadow_archreg");
}
--- valgrind/coregrind/vg_ldt.c #1.17:1.18
@@ -321,5 +321,5 @@ Int read_ldt ( ThreadId tid, UChar* ptr,
tid, ptr, bytecount );
- ldt = (Char*)(VG_(threads)[tid].ldt);
+ ldt = (Char*)(VG_(threads)[tid].arch.ldt);
err = 0;
if (ldt == NULL)
@@ -352,5 +352,5 @@ Int write_ldt ( ThreadId tid, void* ptr,
tid, ptr, bytecount, oldmode );
- ldt = VG_(threads)[tid].ldt;
+ ldt = VG_(threads)[tid].arch.ldt;
ldt_info = (struct vki_modify_ldt_ldt_s*)ptr;
@@ -373,5 +373,5 @@ Int write_ldt ( ThreadId tid, void* ptr,
if (ldt == NULL) {
ldt = VG_(allocate_LDT_for_thread)( NULL );
- VG_(threads)[tid].ldt = ldt;
+ VG_(threads)[tid].arch.ldt = ldt;
}
@@ -418,5 +418,5 @@ Int VG_(sys_set_thread_area) ( ThreadId
if (idx == -1) {
for (idx = 0; idx < VKI_GDT_TLS_ENTRIES; idx++) {
- VgLdtEntry* tls = VG_(threads)[tid].tls + idx;
+ VgLdtEntry* tls = VG_(threads)[tid].arch.tls + idx;
if (tls->LdtEnt.Words.word1 == 0 && tls->LdtEnt.Words.word2 == 0)
@@ -432,5 +432,5 @@ Int VG_(sys_set_thread_area) ( ThreadId
}
- translate_to_hw_format(info, VG_(threads)[tid].tls + idx, 0);
+ translate_to_hw_format(info, VG_(threads)[tid].arch.tls + idx, 0);
VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid,
@@ -454,5 +454,5 @@ Int VG_(sys_get_thread_area) ( ThreadId
return -VKI_EINVAL;
- tls = VG_(threads)[tid].tls + idx - VKI_GDT_TLS_MIN;
+ tls = VG_(threads)[tid].arch.tls + idx - VKI_GDT_TLS_MIN;
info->base_addr = ( tls->LdtEnt.Bits.BaseHi << 24 ) |
--- valgrind/coregrind/vg_main.c #1.199:1.200
@@ -267,20 +267,20 @@ void VG_(start_debugger) ( Int tid )
ThreadState* tst = & VG_(threads)[ tid ];
- regs.cs = tst->m_cs;
- regs.ss = tst->m_ss;
- regs.ds = tst->m_ds;
- regs.es = tst->m_es;
- regs.fs = tst->m_fs;
- regs.gs = tst->m_gs;
- regs.eax = tst->m_eax;
- regs.ebx = tst->m_ebx;
- regs.ecx = tst->m_ecx;
- regs.edx = tst->m_edx;
- regs.esi = tst->m_esi;
- regs.edi = tst->m_edi;
- regs.ebp = tst->m_ebp;
- regs.esp = tst->m_esp;
- regs.eflags = tst->m_eflags;
- regs.eip = tst->m_eip;
+ regs.cs = tst->arch.m_cs;
+ regs.ss = tst->arch.m_ss;
+ regs.ds = tst->arch.m_ds;
+ regs.es = tst->arch.m_es;
+ regs.fs = tst->arch.m_fs;
+ regs.gs = tst->arch.m_gs;
+ regs.eax = tst->arch.m_eax;
+ regs.ebx = tst->arch.m_ebx;
+ regs.ecx = tst->arch.m_ecx;
+ regs.edx = tst->arch.m_edx;
+ regs.esi = tst->arch.m_esi;
+ regs.edi = tst->arch.m_edi;
+ regs.ebp = tst->arch.m_ebp;
+ regs.esp = tst->arch.m_esp;
+ regs.eflags = tst->arch.m_eflags;
+ regs.eip = tst->arch.m_eip;
}
--- valgrind/coregrind/vg_proxylwp.c #1.19:1.20
@@ -152,11 +152,11 @@ static void thread_syscall(Int syscallno
{
do_thread_syscall(syscallno, /* syscall no. */
- tst->m_ebx, /* arg 1 */
- tst->m_ecx, /* arg 2 */
- tst->m_edx, /* arg 3 */
- tst->m_esi, /* arg 4 */
- tst->m_edi, /* arg 5 */
- tst->m_ebp, /* arg 6 */
- &tst->m_eax, /* result */
+ tst->arch.m_ebx, /* arg 1 */
+ tst->arch.m_ecx, /* arg 2 */
+ tst->arch.m_edx, /* arg 3 */
+ tst->arch.m_esi, /* arg 4 */
+ tst->arch.m_edi, /* arg 5 */
+ tst->arch.m_ebp, /* arg 6 */
+ &tst->arch.m_eax, /* result */
state, /* state to update */
poststate); /* state when syscall has finished */
@@ -431,5 +431,5 @@ void VG_(proxy_handlesig)(const vki_ksig
the proxy and machine state here. */
vg_assert(px->state == PXS_RunSyscall);
- vg_assert(px->tst->m_eax == -VKI_ERESTARTSYS);
+ vg_assert(px->tst->arch.m_eax == -VKI_ERESTARTSYS);
} else if (sys_after <= eip && eip <= sys_done) {
/* We're after the syscall. Either it was interrupted by the
@@ -440,5 +440,5 @@ void VG_(proxy_handlesig)(const vki_ksig
px->state == PXS_SysDone);
px->state = PXS_SysDone;
- px->tst->m_eax = eax;
+ px->tst->arch.m_eax = eax;
}
px_printf(" signalled in state %s\n", pxs_name(px->state));
@@ -557,5 +557,5 @@ static Int proxylwp(void *v)
reply.u.syscallno = tst->syscallno;
- tst->m_eax = -VKI_ERESTARTSYS;
+ tst->arch.m_eax = -VKI_ERESTARTSYS;
px->state = PXS_IntReply;
break;
@@ -727,5 +727,5 @@ static Int proxylwp(void *v)
px_printf("RunSyscall in SigACK: rejecting syscall %d with ERESTARTSYS\n",
reply.u.syscallno);
- tst->m_eax = -VKI_ERESTARTSYS;
+ tst->arch.m_eax = -VKI_ERESTARTSYS;
} else {
Int syscallno = tst->syscallno;
@@ -734,5 +734,5 @@ static Int proxylwp(void *v)
/* If we're interrupted before we get to the syscall
itself, we want the syscall restarted. */
- tst->m_eax = -VKI_ERESTARTSYS;
+ tst->arch.m_eax = -VKI_ERESTARTSYS;
/* set our process group ID to match parent */
@@ -1143,5 +1143,5 @@ static void sys_wait_results(Bool block,
if (VG_(clo_trace_syscalls))
VG_(message)(Vg_DebugMsg, "sys_wait_results: got PX_RunSyscall for TID %d: syscall %d result %d",
- res.tid, tst->syscallno, tst->m_eax);
+ res.tid, tst->syscallno, tst->arch.m_eax);
if (tst->status != VgTs_WaitSys)
@@ -1302,6 +1302,6 @@ Int VG_(sys_issue)(int tid)
req.request = PX_RunSyscall;
- tst->syscallno = tst->m_eax;
- tst->m_eax = -VKI_ERESTARTSYS;
+ tst->syscallno = tst->arch.m_eax;
+ tst->arch.m_eax = -VKI_ERESTARTSYS;
res = VG_(write)(proxy->topx, &req, sizeof(req));
--- valgrind/coregrind/vg_scheduler.c #1.173:1.174
@@ -163,5 +163,5 @@ ThreadId VG_(first_matching_thread_stack
if (VG_(threads)[tid].status == VgTs_Empty) continue;
if (tid == tid_to_skip) continue;
- if ( p ( VG_(threads)[tid].m_esp,
+ if ( p ( VG_(threads)[tid].arch.m_esp,
VG_(threads)[tid].stack_highest_word, d ) )
return tid;
@@ -195,6 +195,6 @@ void VG_(pp_sched_status) ( void )
VG_(threads)[i].associated_cv );
VG_(pp_ExeContext)(
- VG_(get_ExeContext2)( VG_(threads)[i].m_eip, VG_(threads)[i].m_ebp,
- VG_(threads)[i].m_esp,
+ VG_(get_ExeContext2)( VG_(threads)[i].arch.m_eip, VG_(threads)[i].arch.m_ebp,
+ VG_(threads)[i].arch.m_esp,
VG_(threads)[i].stack_highest_word)
);
@@ -273,78 +273,11 @@ ThreadId VG_(get_current_or_recent_tid)
}
-static UInt insertDflag(UInt eflags, Int d)
-{
- vg_assert(d == 1 || d == -1);
- eflags &= ~EFlagD;
- if (d < 0) eflags |= EFlagD;
- return eflags;
-}
-
-static Int extractDflag(UInt eflags)
-{
- return ( eflags & EFlagD ? -1 : 1 );
-}
-
-/* Junk to fill up a thread's shadow regs with when shadow regs aren't
- being used. */
-#define VG_UNUSED_SHADOW_REG_VALUE 0x27182818
-
/* Copy the saved state of a thread into VG_(baseBlock), ready for it
to be run. */
static void load_thread_state ( ThreadId tid )
{
- Int i;
vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
- VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
- VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)VG_(threads)[tid].tls;
- VG_(baseBlock)[VGOFF_(m_cs)] = VG_(threads)[tid].m_cs;
- VG_(baseBlock)[VGOFF_(m_ss)] = VG_(threads)[tid].m_ss;
- VG_(baseBlock)[VGOFF_(m_ds)] = VG_(threads)[tid].m_ds;
- VG_(baseBlock)[VGOFF_(m_es)] = VG_(threads)[tid].m_es;
- VG_(baseBlock)[VGOFF_(m_fs)] = VG_(threads)[tid].m_fs;
- VG_(baseBlock)[VGOFF_(m_gs)] = VG_(threads)[tid].m_gs;
-
- VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
- VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
- VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
- VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
- VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
- VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
- VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
- VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
- VG_(baseBlock)[VGOFF_(m_eflags)]
- = VG_(threads)[tid].m_eflags & ~EFlagD;
- VG_(baseBlock)[VGOFF_(m_dflag)]
- = extractDflag(VG_(threads)[tid].m_eflags);
- VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
-
- for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- VG_(baseBlock)[VGOFF_(m_ssestate) + i]
- = VG_(threads)[tid].m_sse[i];
-
- if (VG_(needs).shadow_regs) {
- VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
- VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
- VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
- VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
- VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
- VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
- VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
- VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
- VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
- } else {
- /* Fields shouldn't be used -- check their values haven't changed. */
- vg_assert(
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eax &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebx &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ecx &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edx &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esi &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_edi &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_ebp &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_esp &&
- VG_UNUSED_SHADOW_REG_VALUE == VG_(threads)[tid].sh_eflags);
- }
+ VGA_(load_state)(&VG_(threads)[tid].arch, tid);
vg_tid_currently_in_baseBlock = tid;
@@ -361,106 +294,7 @@ static void load_thread_state ( ThreadId
static void save_thread_state ( ThreadId tid )
{
- Int i;
- const UInt junk = 0xDEADBEEF;
-
vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
-
- /* We don't copy out the LDT entry, because it can never be changed
- by the normal actions of the thread, only by the modify_ldt
- syscall, in which case we will correctly be updating
- VG_(threads)[tid].ldt. This printf happens iff the following
- assertion fails. */
- if ((void*)VG_(threads)[tid].ldt != (void*)VG_(baseBlock)[VGOFF_(ldt)])
- VG_(printf)("VG_(threads)[%d].ldt=%p VG_(baseBlock)[VGOFF_(ldt)]=%p\n",
- tid, (void*)VG_(threads)[tid].ldt,
- (void*)VG_(baseBlock)[VGOFF_(ldt)]);
-
- vg_assert((void*)VG_(threads)[tid].ldt
- == (void*)VG_(baseBlock)[VGOFF_(ldt)]);
-
- /* We don't copy out the TLS entry, because it can never be changed
- by the normal actions of the thread, only by the set_thread_area
- syscall, in which case we will correctly be updating
- VG_(threads)[tid].tls. This printf happens iff the following
- assertion fails. */
- if ((void*)VG_(threads)[tid].tls != (void*)VG_(baseBlock)[VGOFF_(tls_ptr)])
- VG_(printf)("VG_(threads)[%d].tls=%p VG_(baseBlock)[VGOFF_(tls_ptr)]=%p\n",
- tid, (void*)VG_(threads)[tid].tls,
- (void*)VG_(baseBlock)[VGOFF_(tls_ptr)]);
-
- vg_assert((void*)VG_(threads)[tid].tls
- == (void*)VG_(baseBlock)[VGOFF_(tls_ptr)]);
-
- VG_(threads)[tid].m_cs = VG_(baseBlock)[VGOFF_(m_cs)];
- VG_(threads)[tid].m_ss = VG_(baseBlock)[VGOFF_(m_ss)];
- VG_(threads)[tid].m_ds = VG_(baseBlock)[VGOFF_(m_ds)];
- VG_(threads)[tid].m_es = VG_(baseBlock)[VGOFF_(m_es)];
- VG_(threads)[tid].m_fs = VG_(baseBlock)[VGOFF_(m_fs)];
- VG_(threads)[tid].m_gs = VG_(baseBlock)[VGOFF_(m_gs)];
-
- VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
- VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
- VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
- VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
- VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
- VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
- VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
- VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
- VG_(threads)[tid].m_eflags
- = insertDflag(VG_(baseBlock)[VGOFF_(m_eflags)],
- VG_(baseBlock)[VGOFF_(m_dflag)]);
- VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
-
- for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- VG_(threads)[tid].m_sse[i]
- = VG_(baseBlock)[VGOFF_(m_ssestate) + i];
-
- if (VG_(needs).shadow_regs) {
- VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
- VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
- VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
- VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
- VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
- VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
- VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
- VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
- VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
- } else {
- /* Fill with recognisable junk */
- VG_(threads)[tid].sh_eax =
- VG_(threads)[tid].sh_ebx =
- VG_(threads)[tid].sh_ecx =
- VG_(threads)[tid].sh_edx =
- VG_(threads)[tid].sh_esi =
- VG_(threads)[tid].sh_edi =
- VG_(threads)[tid].sh_ebp =
- VG_(threads)[tid].sh_esp =
- VG_(threads)[tid].sh_eflags = VG_UNUSED_SHADOW_REG_VALUE;
- }
-
- /* Fill it up with junk. */
- VG_(baseBlock)[VGOFF_(ldt)] = junk;
- VG_(baseBlock)[VGOFF_(tls_ptr)] = junk;
- VG_(baseBlock)[VGOFF_(m_cs)] = junk;
- VG_(baseBlock)[VGOFF_(m_ss)] = junk;
- VG_(baseBlock)[VGOFF_(m_ds)] = junk;
- VG_(baseBlock)[VGOFF_(m_es)] = junk;
- VG_(baseBlock)[VGOFF_(m_fs)] = junk;
- VG_(baseBlock)[VGOFF_(m_gs)] = junk;
-
- VG_(baseBlock)[VGOFF_(m_eax)] = junk;
- VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
- VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
- VG_(baseBlock)[VGOFF_(m_edx)] = junk;
- VG_(baseBlock)[VGOFF_(m_esi)] = junk;
- VG_(baseBlock)[VGOFF_(m_edi)] = junk;
- VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
- VG_(baseBlock)[VGOFF_(m_esp)] = junk;
- VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
- VG_(baseBlock)[VGOFF_(m_eip)] = junk;
-
- for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- VG_(baseBlock)[VGOFF_(m_ssestate) + i] = junk;
+ VGA_(save_state)(&VG_(threads)[tid].arch, tid);
vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
@@ -522,6 +356,6 @@ void mostly_clear_thread_record ( Thread
{
vg_assert(tid >= 0 && tid < VG_N_THREADS);
- VG_(threads)[tid].ldt = NULL;
- VG_(clear_TLS_for_thread)(VG_(threads)[tid].tls);
+ VG_(threads)[tid].arch.ldt = NULL;
+ VG_(clear_TLS_for_thread)(VG_(threads)[tid].arch.tls);
VG_(threads)[tid].tid = tid;
VG_(threads)[tid].status = VgTs_Empty;
@@ -588,5 +422,5 @@ void VG_(scheduler_init) ( void )
vg_tid_currently_in_baseBlock = tid_main;
vg_tid_last_in_baseBlock = tid_main;
- VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)VG_(threads)[tid_main].tls;
+ VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)VG_(threads)[tid_main].arch.tls;
save_thread_state ( tid_main );
@@ -633,10 +467,10 @@ void handle_signal_return ( ThreadId tid
if (VG_(threads)[tid].status == VgTs_Sleeping
- && VG_(threads)[tid].m_eax == __NR_nanosleep) {
+ && VG_(threads)[tid].arch.m_eax == __NR_nanosleep) {
/* We interrupted a nanosleep(). The right thing to do is to
write the unused time to nanosleep's second param, but that's
too much effort ... we just say that 1 nanosecond was not
used, and return EINTR. */
- rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
+ rem = (struct vki_timespec *)VG_(threads)[tid].arch.m_ecx; /* arg2 */
if (rem != NULL) {
rem->tv_sec = 0;
@@ -692,5 +526,5 @@ void sched_do_syscall ( ThreadId tid )
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
+ syscall_no = VG_(threads)[tid].arch.m_eax; /* syscall number */
/* Special-case nanosleep because we can. But should we?
@@ -702,5 +536,5 @@ void sched_do_syscall ( ThreadId tid )
UInt t_now, t_awaken;
struct vki_timespec* req;
- req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
+ req = (struct vki_timespec*)VG_(threads)[tid].arch.m_ebx; /* arg1 */
if (req->tv_sec < 0 || req->tv_nsec < 0 || req->tv_nsec >= 1000000000) {
@@ -1003,8 +837,8 @@ VgSchedReturnCode do_scheduler ( Int* ex
if (VG_(bbs_done) > 31700000 + 0) {
dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
- VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
+ VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].arch.m_eip,
/*debugging*/True);
}
- vg_assert(VG_(threads)[tid].m_eip != 0);
+ vg_assert(VG_(threads)[tid].arch.m_eip != 0);
# endif
@@ -1012,7 +846,7 @@ VgSchedReturnCode do_scheduler ( Int* ex
# if 0
- if (0 == VG_(threads)[tid].m_eip) {
+ if (0 == VG_(threads)[tid].arch.m_eip) {
VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
- vg_assert(0 != VG_(threads)[tid].m_eip);
+ vg_assert(0 != VG_(threads)[tid].arch.m_eip);
}
# endif
@@ -1026,9 +860,9 @@ VgSchedReturnCode do_scheduler ( Int* ex
/* Trivial event. Miss in the fast-cache. Do a full
lookup for it. */
- trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
+ trans_addr = VG_(search_transtab) ( VG_(threads)[tid].arch.m_eip );
if (trans_addr == (Addr)0) {
/* Not found; we need to request a translation. */
- VG_(translate)( tid, VG_(threads)[tid].m_eip, /*debug*/False );
- trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
+ VG_(translate)( tid, VG_(threads)[tid].arch.m_eip, /*debug*/False );
+ trans_addr = VG_(search_transtab) ( VG_(threads)[tid].arch.m_eip );
if (trans_addr == (Addr)0)
VG_(core_panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
@@ -1038,5 +872,5 @@ VgSchedReturnCode do_scheduler ( Int* ex
if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
- UInt reqno = *(UInt*)(VG_(threads)[tid].m_eax);
+ UInt reqno = *(UInt*)(VG_(threads)[tid].arch.m_eax);
/* VG_(printf)("request 0x%x\n", reqno); */
@@ -1068,5 +902,5 @@ VgSchedReturnCode do_scheduler ( Int* ex
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)VG_(threads)[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].arch.m_esp;
VG_(printf)("\nBEFORE\n");
for (i = 10; i >= -10; i--)
@@ -1086,10 +920,10 @@ VgSchedReturnCode do_scheduler ( Int* ex
the unprotected malloc/free system. */
- if (VG_(threads)[tid].m_eax == __NR_exit
- || VG_(threads)[tid].m_eax == __NR_exit_group
+ if (VG_(threads)[tid].arch.m_eax == __NR_exit
+ || VG_(threads)[tid].arch.m_eax == __NR_exit_group
) {
/* If __NR_exit, remember the supplied argument. */
- *exitcode = VG_(threads)[tid].m_ebx; /* syscall arg1 */
+ *exitcode = VG_(threads)[tid].arch.m_ebx; /* syscall arg1 */
/* Only run __libc_freeres if the tool says it's ok and
@@ -1106,5 +940,5 @@ VgSchedReturnCode do_scheduler ( Int* ex
}
VG_(nuke_all_threads_except) ( tid );
- VG_(threads)[tid].m_eip = (UInt)__libc_freeres_wrapper;
+ VG_(threads)[tid].arch.m_eip = (UInt)__libc_freeres_wrapper;
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
goto stage1; /* party on, dudes (but not for much longer :) */
@@ -1123,11 +957,11 @@ VgSchedReturnCode do_scheduler ( Int* ex
/* We've dealt with __NR_exit at this point. */
- vg_assert(VG_(threads)[tid].m_eax != __NR_exit &&
- VG_(threads)[tid].m_eax != __NR_exit_group);
+ vg_assert(VG_(threads)[tid].arch.m_eax != __NR_exit &&
+ VG_(threads)[tid].arch.m_eax != __NR_exit_group);
/* Trap syscalls to __NR_sched_yield and just have this
thread yield instead. Not essential, just an
optimisation. */
- if (VG_(threads)[tid].m_eax == __NR_sched_yield) {
+ if (VG_(threads)[tid].arch.m_eax == __NR_sched_yield) {
SET_SYSCALL_RETVAL(tid, 0); /* syscall returns with success */
goto stage1; /* find a new thread to run */
@@ -1138,5 +972,5 @@ VgSchedReturnCode do_scheduler ( Int* ex
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)VG_(threads)[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].arch.m_esp;
VG_(printf)("AFTER\n");
for (i = 10; i >= -10; i--)
@@ -1320,7 +1154,7 @@ void make_thread_jump_to_cancelhdlr ( Th
/* Push a suitable arg, and mark it as readable. */
- SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
- * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)PTHREAD_CANCELED;
- VG_TRACK( post_mem_write, VG_(threads)[tid].m_esp, sizeof(void*) );
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 4);
+ * (UInt*)(VG_(threads)[tid].arch.m_esp) = (UInt)PTHREAD_CANCELED;
+ VG_TRACK( post_mem_write, VG_(threads)[tid].arch.m_esp, sizeof(void*) );
/* Push a bogus return address. It will not return, but we still
@@ -1328,9 +1162,9 @@ void make_thread_jump_to_cancelhdlr ( Th
Don't mark as readable; any attempt to read this is and internal
valgrind bug since thread_exit_wrapper should not return. */
- SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 4);
- * (UInt*)(VG_(threads)[tid].m_esp) = 0xBEADDEEF;
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 4);
+ * (UInt*)(VG_(threads)[tid].arch.m_esp) = 0xBEADDEEF;
/* .cancel_pend will hold &thread_exit_wrapper */
- VG_(threads)[tid].m_eip = (UInt)VG_(threads)[tid].cancel_pend;
+ VG_(threads)[tid].arch.m_eip = (UInt)VG_(threads)[tid].cancel_pend;
VG_(proxy_abort_syscall)(tid);
@@ -1414,9 +1248,9 @@ void cleanup_after_thread_exited ( Threa
/* Deallocate its LDT, if it ever had one. */
- VG_(deallocate_LDT_for_thread)( VG_(threads)[tid].ldt );
- VG_(threads)[tid].ldt = NULL;
+ VG_(deallocate_LDT_for_thread)( VG_(threads)[tid].arch.ldt );
+ VG_(threads)[tid].arch.ldt = NULL;
/* Clear its TLS array. */
- VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
+ VG_(clear_TLS_for_thread)( VG_(threads)[tid].arch.tls );
/* Not interested in the timeout anymore */
@@ -1920,17 +1754,17 @@ void do__apply_in_new_thread ( ThreadId
/* We inherit our parent's LDT. */
- if (VG_(threads)[parent_tid].ldt == NULL) {
+ if (VG_(threads)[parent_tid].arch.ldt == NULL) {
/* We hope this is the common case. */
VG_(baseBlock)[VGOFF_(ldt)] = 0;
} else {
/* No luck .. we have to take a copy of the parent's. */
- VG_(threads)[tid].ldt
- = VG_(allocate_LDT_for_thread)( VG_(threads)[parent_tid].ldt );
- VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].ldt;
+ VG_(threads)[tid].arch.ldt
+ = VG_(allocate_LDT_for_thread)( VG_(threads)[parent_tid].arch.ldt );
+ VG_(baseBlock)[VGOFF_(ldt)] = (UInt)VG_(threads)[tid].arch.ldt;
}
/* Initialise the thread's TLS array */
- VG_(clear_TLS_for_thread)( VG_(threads)[tid].tls );
- VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)VG_(threads)[tid].tls;
+ VG_(clear_TLS_for_thread)( VG_(threads)[tid].arch.tls );
+ VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)VG_(threads)[tid].arch.tls;
save_thread_state(tid);
@@ -1981,23 +1815,23 @@ void do__apply_in_new_thread ( ThreadId
VG_(threads)[tid].stack_size
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
- VG_TRACK ( ban_mem_stack, VG_(threads)[tid].m_esp,
+ VG_TRACK ( ban_mem_stack, VG_(threads)[tid].arch.m_esp,
VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
/* push two args */
- SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
+ SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - 8);
- VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
+ VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].arch.m_esp, 2 * 4 );
VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
- (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
+ (Addr)VG_(threads)[tid].arch.m_esp, 2 * 4 );
/* push arg and (bogus) return address */
- * (UInt*)(VG_(threads)[tid].m_esp+4) = (UInt)arg;
- * (UInt*)(VG_(threads)[tid].m_esp)
+ * (UInt*)(VG_(threads)[tid].arch.m_esp+4) = (UInt)arg;
+ * (UInt*)(VG_(threads)[tid].arch.m_esp)
= (UInt)&do__apply_in_new_thread_bogusRA;
- VG_TRACK ( post_mem_write, VG_(threads)[tid].m_esp, 2 * 4 );
+ VG_TRACK ( post_mem_write, VG_(threads)[tid].arch.m_esp, 2 * 4 );
/* this is where we start */
- VG_(threads)[tid].m_eip = (UInt)fn;
+ VG_(threads)[tid].arch.m_eip = (UInt)fn;
if (VG_(clo_trace_sched)) {
@@ -3002,5 +2836,5 @@ static
void do_client_request ( ThreadId tid )
{
- UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
+ UInt* arg = (UInt*)(VG_(threads)[tid].arch.m_eax);
UInt req_no = arg[0];
@@ -3431,5 +3265,5 @@ void scheduler_sanity ( void )
Int
stack_used = (Addr)VG_(threads)[i].stack_highest_word
- - (Addr)VG_(threads)[i].m_esp;
+ - (Addr)VG_(threads)[i].arch.m_esp;
Int
stack_avail = VG_(threads)[i].stack_size
--- valgrind/coregrind/vg_signals.c #1.82:1.83
@@ -490,7 +490,7 @@ void VG_(do__NR_sigaltstack) ( ThreadId
vg_assert(VG_(is_valid_tid)(tid));
- ss = (vki_kstack_t*)(VG_(threads)[tid].m_ebx);
- oss = (vki_kstack_t*)(VG_(threads)[tid].m_ecx);
- m_esp = VG_(threads)[tid].m_esp;
+ ss = (vki_kstack_t*)(VG_(threads)[tid].arch.m_ebx);
+ oss = (vki_kstack_t*)(VG_(threads)[tid].arch.m_ecx);
+ m_esp = VG_(threads)[tid].arch.m_esp;
if (VG_(clo_trace_signals))
@@ -507,5 +507,5 @@ void VG_(do__NR_sigaltstack) ( ThreadId
if (ss != NULL) {
- if (on_sig_stack(tid, VG_(threads)[tid].m_esp)) {
+ if (on_sig_stack(tid, VG_(threads)[tid].arch.m_esp)) {
SET_SYSCALL_RETVAL(tid, -VKI_EPERM);
return;
@@ -543,7 +543,7 @@ void VG_(do__NR_sigaction) ( ThreadId ti
vg_assert(VG_(is_valid_tid)(tid));
- signo = VG_(threads)[tid].m_ebx; /* int sigNo */
- new_act = (vki_ksigaction*)(VG_(threads)[tid].m_ecx);
- old_act = (vki_ksigaction*)(VG_(threads)[tid].m_edx);
+ signo = VG_(threads)[tid].arch.m_ebx; /* int sigNo */
+ new_act = (vki_ksigaction*)(VG_(threads)[tid].arch.m_ecx);
+ old_act = (vki_ksigaction*)(VG_(threads)[tid].arch.m_edx);
if (VG_(clo_trace_signals))
@@ -922,5 +922,5 @@ static void synth_ucontext(ThreadId tid,
uc->uc_stack = tst->altstack;
-#define SC(reg) sc->reg = tst->m_##reg
+#define SC(reg) sc->reg = tst->arch.m_##reg
SC(gs);
SC(fs);
@@ -974,5 +974,5 @@ void vg_push_signal_frame ( ThreadId tid
already using. Logic from get_sigframe in
arch/i386/kernel/signal.c. */
- sas_ss_flags(tid, tst->m_esp) == 0
+ sas_ss_flags(tid, tst->arch.m_esp) == 0
) {
esp_top_of_frame
@@ -987,5 +987,5 @@ void vg_push_signal_frame ( ThreadId tid
} else {
- esp_top_of_frame = tst->m_esp;
+ esp_top_of_frame = tst->arch.m_esp;
/* Signal delivery to tools */
@@ -1026,5 +1026,5 @@ void vg_push_signal_frame ( ThreadId tid
VG_(memcpy)(&frame->sigInfo, siginfo, sizeof(vki_ksiginfo_t));
if (sigNo == VKI_SIGFPE) {
- frame->sigInfo._sifields._sigfault._addr = (void *)tst->m_eip;
+ frame->sigInfo._sifields._sigfault._addr = (void *)tst->arch.m_eip;
}
VG_TRACK( post_mem_write, (Addr)&frame->sigInfo, sizeof(frame->sigInfo) );
@@ -1054,27 +1054,27 @@ void vg_push_signal_frame ( ThreadId tid
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- frame->m_sse[i] = tst->m_sse[i];
+ frame->m_sse[i] = tst->arch.m_sse[i];
- frame->m_eax = tst->m_eax;
- frame->m_ecx = tst->m_ecx;
- frame->m_edx = tst->m_edx;
- frame->m_ebx = tst->m_ebx;
- frame->m_ebp = tst->m_ebp;
- frame->m_esp = tst->m_esp;
- frame->m_esi = tst->m_esi;
- frame->m_edi = tst->m_edi;
- frame->m_eflags = tst->m_eflags;
- frame->m_eip = tst->m_eip;
+ frame->m_eax = tst->arch.m_eax;
+ frame->m_ecx = tst->arch.m_ecx;
+ frame->m_edx = tst->arch.m_edx;
+ frame->m_ebx = tst->arch.m_ebx;
+ frame->m_ebp = tst->arch.m_ebp;
+ frame->m_esp = tst->arch.m_esp;
+ frame->m_esi = tst->arch.m_esi;
+ frame->m_edi = tst->arch.m_edi;
+ frame->m_eflags = tst->arch.m_eflags;
+ frame->m_eip = tst->arch.m_eip;
if (VG_(needs).shadow_regs) {
- frame->sh_eax = tst->sh_eax;
- frame->sh_ecx = tst->sh_ecx;
- frame->sh_edx = tst->sh_edx;
- frame->sh_ebx = tst->sh_ebx;
- frame->sh_ebp = tst->sh_ebp;
- frame->sh_esp = tst->sh_esp;
- frame->sh_esi = tst->sh_esi;
- frame->sh_edi = tst->sh_edi;
- frame->sh_eflags = tst->sh_eflags;
+ frame->sh_eax = tst->arch.sh_eax;
+ frame->sh_ecx = tst->arch.sh_ecx;
+ frame->sh_edx = tst->arch.sh_edx;
+ frame->sh_ebx = tst->arch.sh_ebx;
+ frame->sh_ebp = tst->arch.sh_ebp;
+ frame->sh_esp = tst->arch.sh_esp;
+ frame->sh_esi = tst->arch.sh_esi;
+ frame->sh_edi = tst->arch.sh_edi;
+ frame->sh_eflags = tst->arch.sh_eflags;
}
@@ -1093,8 +1093,8 @@ void vg_push_signal_frame ( ThreadId tid
vg_assert(& VG_(threads)[tid] == tst);
/* Set the thread so it will next run the handler. */
- /* tst->m_esp = esp; */
+ /* tst->arch.m_esp = esp; */
SET_SIGNAL_ESP(tid, esp);
- tst->m_eip = (Addr)vg_scss.scss_per_sig[sigNo].scss_handler;
+ tst->arch.m_eip = (Addr)vg_scss.scss_per_sig[sigNo].scss_handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
@@ -1102,5 +1102,5 @@ void vg_push_signal_frame ( ThreadId tid
if (0)
VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p, status=%d\n",
- esp, tst->m_eip, tst->status);
+ esp, tst->arch.m_eip, tst->status);
}
@@ -1120,5 +1120,5 @@ Int vg_pop_signal_frame ( ThreadId tid )
/* Correctly reestablish the frame base address. */
- esp = tst->m_esp;
+ esp = tst->arch.m_esp;
frame = (VgSigFrame*)
(esp -4 /* because the handler's RET pops the RA */
@@ -1136,27 +1136,27 @@ Int vg_pop_signal_frame ( ThreadId tid )
/* restore machine state */
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
- tst->m_sse[i] = frame->m_sse[i];
+ tst->arch.m_sse[i] = frame->m_sse[i];
- tst->m_eax = frame->m_eax;
- tst->m_ecx = frame->m_ecx;
- tst->m_edx = frame->m_edx;
- tst->m_ebx = frame->m_ebx;
- tst->m_ebp = frame->m_ebp;
- tst->m_esp = frame->m_esp;
- tst->m_esi = frame->m_esi;
- tst->m_edi = frame->m_edi;
- tst->m_eflags = frame->m_eflags;
- tst->m_eip = frame->m_eip;
+ tst->arch.m_eax = frame->m_eax;
+ tst->arch.m_ecx = frame->m_ecx;
+ tst->arch.m_edx = frame->m_edx;
+ tst->arch.m_ebx = frame->m_ebx;
+ tst->arch.m_ebp = frame->m_ebp;
+ tst->arch.m_esp = frame->m_esp;
+ tst->arch.m_esi = frame->m_esi;
+ tst->arch.m_edi = frame->m_edi;
+ tst->arch.m_eflags = frame->m_eflags;
+ tst->arch.m_eip = frame->m_eip;
if (VG_(needs).shadow_regs) {
- tst->sh_eax = frame->sh_eax;
- tst->sh_ecx = frame->sh_ecx;
- tst->sh_edx = frame->sh_edx;
- tst->sh_ebx = frame->sh_ebx;
- tst->sh_ebp = frame->sh_ebp;
- tst->sh_esp = frame->sh_esp;
- tst->sh_esi = frame->sh_esi;
- tst->sh_edi = frame->sh_edi;
- tst->sh_eflags = frame->sh_eflags;
+ tst->arch.sh_eax = frame->sh_eax;
+ tst->arch.sh_ecx = frame->sh_ecx;
+ tst->arch.sh_edx = frame->sh_edx;
+ tst->arch.sh_ebx = frame->sh_ebx;
+ tst->arch.sh_ebp = frame->sh_ebp;
+ tst->arch.sh_esp = frame->sh_esp;
+ tst->arch.sh_esi = frame->sh_esi;
+ tst->arch.sh_edi = frame->sh_edi;
+ tst->arch.sh_eflags = frame->sh_eflags;
}
@@ -1474,22 +1474,22 @@ static void fill_prstatus(const ThreadSt
regs->gs = VG_(baseBlock)[VGOFF_(m_gs)];
} else {
- regs->eflags = tst->m_eflags;
- regs->esp = tst->m_esp;
- regs->eip = tst->m_eip;
+ regs->eflags = tst->arch.m_eflags;
+ regs->esp = tst->arch.m_esp;
+ regs->eip = tst->arch.m_eip;
- regs->ebx = tst->m_ebx;
- regs->ecx = tst->m_ecx;
- regs->edx = tst->m_edx;
- regs->esi = tst->m_esi;
- regs->edi = tst->m_edi;
- regs->ebp = tst->m_ebp;
- regs->eax = tst->m_eax;
+ regs->ebx = tst->arch.m_ebx;
+ regs->ecx = tst->arch.m_ecx;
+ regs->edx = tst->arch.m_edx;
+ regs->esi = tst->arch.m_esi;
+ regs->edi = tst->arch.m_edi;
+ regs->ebp = tst->arch.m_ebp;
+ regs->eax = tst->arch.m_eax;
- regs->cs = tst->m_cs;
- regs->ds = tst->m_ds;
- regs->ss = tst->m_ss;
- regs->es = tst->m_es;
- regs->fs = tst->m_fs;
- regs->gs = tst->m_gs;
+ regs->cs = tst->arch.m_cs;
+ regs->ds = tst->arch.m_ds;
+ regs->ss = tst->arch.m_ss;
+ regs->es = tst->arch.m_es;
+ regs->fs = tst->arch.m_fs;
+ regs->gs = tst->arch.m_gs;
}
}
@@ -1502,5 +1502,5 @@ static void fill_fpu(const ThreadState *
from = (const Char *)&VG_(baseBlock)[VGOFF_(m_ssestate)];
} else {
- from = (const Char *)&tst->m_sse;
+ from = (const Char *)&tst->arch.m_sse;
}
@@ -1528,5 +1528,5 @@ static void fill_xfpu(const ThreadState
from = (UShort *)&VG_(baseBlock)[VGOFF_(m_ssestate)];
else
- from = (UShort *)tst->m_sse;
+ from = (UShort *)tst->arch.m_sse;
VG_(memcpy)(xfpu, from, sizeof(*xfpu));
@@ -1868,6 +1868,6 @@ void VG_(deliver_signal) ( ThreadId tid,
if (tst->status == VgTs_WaitSys) {
/* blocked in a syscall; we assume it should be interrupted */
- if (tst->m_eax == -VKI_ERESTARTSYS)
- tst->m_eax = -VKI_EINTR;
+ if (tst->arch.m_eax == -VKI_ERESTARTSYS)
+ tst->arch.m_eax = -VKI_EINTR;
}
@@ -2077,5 +2077,5 @@ void vg_sync_signalhandler ( Int sigNo,
Addr fault = (Addr)info->_sifields._sigfault._addr;
Addr esp = VG_(is_running_thread)(tid) ?
- VG_(baseBlock)[VGOFF_(m_esp)] : VG_(threads)[tid].m_esp;
+ VG_(baseBlock)[VGOFF_(m_esp)] : VG_(threads)[tid].arch.m_esp;
Segment *seg;
--- valgrind/coregrind/vg_symtab2.c #1.86:1.87
@@ -2025,12 +2025,12 @@ static UInt *regaddr(ThreadId tid, Int r
switch(regno) {
- case R_EAX: ret = &tst->m_eax; break;
- case R_ECX: ret = &tst->m_ecx; break;
- case R_EDX: ret = &tst->m_edx; break;
- case R_EBX: ret = &tst->m_ebx; break;
- case R_ESP: ret = &tst->m_esp; break;
- case R_EBP: ret = &tst->m_ebp; break;
- case R_ESI: ret = &tst->m_esi; break;
- case R_EDI: ret = &tst->m_edi; break;
+ case R_EAX: ret = &tst->arch.m_eax; break;
+ case R_ECX: ret = &tst->arch.m_ecx; break;
+ case R_EDX: ret = &tst->arch.m_edx; break;
+ case R_EBX: ret = &tst->arch.m_ebx; break;
+ case R_ESP: ret = &tst->arch.m_esp; break;
+ case R_EBP: ret = &tst->arch.m_ebp; break;
+ case R_ESI: ret = &tst->arch.m_esi; break;
+ case R_EDI: ret = &tst->arch.m_edi; break;
default:
break;
--- valgrind/coregrind/vg_syscalls.c #1.133:1.134
@@ -1011,12 +1011,12 @@ static Bool fd_allowed(Int fd, const Cha
POST(new) __attribute__((alias(STR(after_##old))))
-#define SYSNO (tst->m_eax) /* in PRE(x) */
-#define res (tst->m_eax) /* in POST(x) */
-#define arg1 (tst->m_ebx)
-#define arg2 (tst->m_ecx)
-#define arg3 (tst->m_edx)
-#define arg4 (tst->m_esi)
-#define arg5 (tst->m_edi)
-#define arg6 (tst->m_ebp)
+#define SYSNO (tst->arch.m_eax) /* in PRE(x) */
+#define res (tst->arch.m_eax) /* in POST(x) */
+#define arg1 (tst->arch.m_ebx)
+#define arg2 (tst->arch.m_ecx)
+#define arg3 (tst->arch.m_edx)
+#define arg4 (tst->arch.m_esi)
+#define arg5 (tst->arch.m_edi)
+#define arg6 (tst->arch.m_ebp)
PRE(exit_group)
@@ -6285,5 +6285,5 @@ static void restart_syscall(ThreadId tid
SYSNO = tst->syscallno;
- tst->m_eip -= 2; /* sizeof(int $0x80) */
+ tst->arch.m_eip -= 2; /* sizeof(int $0x80) */
/* Make sure our caller is actually sane, and we're really backing
@@ -6293,10 +6293,10 @@ static void restart_syscall(ThreadId tid
*/
{
- UChar *p = (UChar *)tst->m_eip;
+ UChar *p = (UChar *)tst->arch.m_eip;
if (p[0] != 0xcd || p[1] != 0x80)
VG_(message)(Vg_DebugMsg,
"?! restarting over syscall at %p %02x %02x\n",
- tst->m_eip, p[0], p[1]);
+ tst->arch.m_eip, p[0], p[1]);
vg_assert(p[0] == 0xcd && p[1] == 0x80);
--- valgrind/coregrind/vg_translate.c #1.89:1.90
@@ -1590,6 +1590,6 @@ static void vg_improve ( UCodeBlock* cb
{
Int i, j, k, m, n, ar, tr, told, actual_areg;
- Int areg_map[8];
- Bool annul_put[8];
+ Int areg_map[N_ARCH_REGS];
+ Bool annul_put[N_ARCH_REGS];
Int tempUse[VG_MAX_REGS_USED];
Bool isWrites[VG_MAX_REGS_USED];
@@ -1637,5 +1637,5 @@ static void vg_improve ( UCodeBlock* cb
{ Int q; \
/* Invalidate any old binding(s) to tempreg. */ \
- for (q = 0; q < 8; q++) \
+ for (q = 0; q < N_ARCH_REGS; q++) \
if (areg_map[q] == tempreg) areg_map[q] = -1; \
/* Add the new binding. */ \
@@ -1644,5 +1644,5 @@ static void vg_improve ( UCodeBlock* cb
/* Set up the A-reg map. */
- for (i = 0; i < 8; i++) areg_map[i] = -1;
+ for (i = 0; i < N_ARCH_REGS; i++) areg_map[i] = -1;
/* Scan insns. */
@@ -1736,5 +1736,5 @@ static void vg_improve ( UCodeBlock* cb
if (!wr) continue;
tr = tempUse[j];
- for (m = 0; m < 8; m++)
+ for (m = 0; m < N_ARCH_REGS; m++)
if (areg_map[m] == tr) areg_map[m] = -1;
}
@@ -1750,5 +1750,5 @@ static void vg_improve ( UCodeBlock* cb
actually required by other analyses (cache simulation), it's
simplest to be consistent for all end-uses. */
- for (j = 0; j < 8; j++)
+ for (j = 0; j < N_ARCH_REGS; j++)
annul_put[j] = False;
@@ -1776,5 +1776,5 @@ static void vg_improve ( UCodeBlock* cb
else if (u->opcode == JMP || u->opcode == JIFZ
|| u->opcode == CALLM) {
- for (j = 0; j < 8; j++)
+ for (j = 0; j < N_ARCH_REGS; j++)
annul_put[j] = False;
}
--- valgrind/coregrind/x86/Makefile.am #1.6:1.7
@@ -1,5 +1,14 @@
+include $(top_srcdir)/Makefile.all.am
+include $(top_srcdir)/Makefile.core-AM_CPPFLAGS.am
+
+AM_CFLAGS = $(WERROR) -Winline -Wall -Wshadow -O -fomit-frame-pointer -g
+
noinst_HEADERS = \
+ core_arch.h \
+ core_arch_asm.h \
ume_archdefs.h
+noinst_LIBRARIES = libarch.a
+
EXTRA_DIST = \
ume_archdefs.c \
@@ -10,4 +19,7 @@
CLEANFILES = stage2.lds
+libarch_a_SOURCES = \
+ state.c
+
# Extract ld's default linker script and hack it to our needs
stage2.lds: Makefile
--- valgrind/include/Makefile.am #1.7:1.8
@@ -1,2 +1,5 @@
+
+SUBDIRS = $(VG_ARCH) .
+
EXTRA_DIST = \
vg_profile.c \
--- valgrind/include/tool.h.base #1.3:1.4
@@ -36,7 +36,5 @@
#include "tool_asm.h" // asm stuff
-
-// XXX: here temporarily, will eventually go in arch-specific headers...
-#define REGPARM(x) __attribute__((regparm (x)))
+#include "tool_arch.h" // arch-specific tool stuff
/* ---------------------------------------------------------------------
--- valgrind/include/tool_asm.h #1.1:1.2
@@ -44,4 +44,5 @@
#define VGP_(str) VGAPPEND(vgProf_,str)
#define VGOFF_(str) VGAPPEND(vgOff_,str)
+#define VGA_(str) VGAPPEND(vgArch_,str)
/* Tool-specific ones. Note that final name still starts with "vg". */
--- valgrind/memcheck/mc_main.c #1.52:1.53
@@ -723,5 +723,5 @@ static void mc_post_regs_write_init ( vo
{
UInt i;
- for (i = R_EAX; i <= R_EDI; i++)
+ for (i = FIRST_ARCH_REG; i <= LAST_ARCH_REG; i++)
VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
|
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 08:49:17
|
On Thu, 2 Sep 2004, Julian Seward wrote: > This is great! It's the first example I've seen of using > diff to convert a picture of a cube into a picture of a > 4-D hypercube. Most ingenious. (: You can thank 'diff'; the first diff I looked at just had all the additions in one part, and all the subtractions in another. But when I committed it decided to do the interleaving thing... |
|
From: Nicholas N. <nj...@ca...> - 2004-09-03 08:40:58
|
On Fri, 3 Sep 2004, Chris January wrote:
>> Now that 2.2.0 is out is there any chance of getting the
>> first of my debugging support patches into CVS?
>
> Should I treat the silence as an implicit 'no'?
No. More likely that no-one has any strong opinions, or are busy with
other things at the moment.
I looked at your patch, but haven't run it. I'm confused by a few
things.
- The single_step field you added to ThreadState -- it doesn't seem ever
be set.
- You've changed a few test conditions by one, and changed the 'jz' in the
dispatch loop to 'jle', but have not explained why this is necessary.
Some comments in the code would be helpful.
- You added 'stopped' to ThreadState, but I also can't see how it is set.
- 'stopped' is used like this:
+ if (VG_(threads)[tid_next].status == VgTs_Runnable &&
+ !VG_(threads)[tid_next].stopped)
should 'stopped' rather be folded into the 'status' type somehow? Eg.
have a VgTs_RunnableButStopped state, or similar?
- You said:
> These should be the only changes to existing parts of
> the core that are required to support live debugging. Obviously this
> patch doesn't add full debugging support in and of itself.
What more is required to get full debugging support? How much code will
it be?
- This code is unnecessarily repetitive:
+ if (trc == VG_TRC_EBP_JMP_BREAKPOINT) {
+ /* A software breakpoint. */
+ vg_tid_currently_in_baseBlock = vg_tid_last_in_baseBlock;
+ VG_(kkill) (0, VKI_SIGTRAP);
+ vg_tid_currently_in_baseBlock = 0;
+ break;
+ }
+
+ if (trc == VG_TRC_INNER_COUNTERZERO &&
+ VG_(threads)[tid].single_step) {
+ vg_tid_currently_in_baseBlock = vg_tid_last_in_baseBlock;
+ VG_(kkill) (0, VKI_SIGTRAP);
+ vg_tid_currently_in_baseBlock = 0;
+ break;
+ }
Finally, I still don't understand what the end result is intended to be
with these proposed changes -- do you start GDB, then load your program
under a Valgrind tool, then run it? Or something else?
N
|
|
From: Julian S. <js...@ac...> - 2004-09-03 08:28:54
|
> > Now that 2.2.0 is out is there any chance of getting the > > first of my debugging support patches into CVS? > > Should I treat the silence as an implicit 'no'? Not necessarily. What you should take it as is there are a bunch of other large-scale structural changes to the software which we're dealing with right now, so thinking about your patch in detail is going to be delayed by that. Your best bet is to shout again in a month or so, and see what happens then. J |
|
From: Tom H. <th...@cy...> - 2004-09-03 08:00:17
|
In message <013a01c4918b$ed40dff0$0207a8c0@avocado>
Chris January <ch...@at...> wrote:
>> Now that 2.2.0 is out is there any chance of getting the
>> first of my debugging support patches into CVS?
>
> Should I treat the silence as an implicit 'no'?
I don't have a problem with it.
Tom
--
Tom Hughes (th...@cy...)
Software Engineer, Cyberscience Corporation
http://www.cyberscience.com/
|
|
From: Chris J. <ch...@at...> - 2004-09-03 07:54:00
|
> Now that 2.2.0 is out is there any chance of getting the > first of my debugging support patches into CVS? Should I treat the silence as an implicit 'no'? Chris |
|
From: Tom H. <th...@cy...> - 2004-09-03 06:15:22
|
In message <109...@dr...>
Robert Walsh <rj...@du...> wrote:
> Do we have a good idea why there are regression test failures on certain
> platforms?
There's no single cause, equally none of the outstanding failures
is very easy to fix - the problems are generally in the tests rather
than in valgrind.
Tom
--
Tom Hughes (th...@cy...)
Software Engineer, Cyberscience Corporation
http://www.cyberscience.com/
|
|
From: Tom H. <th...@cy...> - 2004-09-03 03:14:30
|
Nightly build on standard ( Red Hat 7.2 ) started at 2004-09-03 02:00:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow map_unmap: valgrind ./map_unmap mq: valgrind ./mq mremap: valgrind ./mremap munmap_exe: valgrind ./munmap_exe pth_blockedsig: valgrind ./pth_blockedsig pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert rcrl: valgrind ./rcrl readline1: valgrind ./readline1 resolv: valgrind ./resolv rlimit_nofile: valgrind ./rlimit_nofile seg_override: valgrind ./seg_override sem: valgrind ./sem semlimit: valgrind ./semlimit sha1_test: valgrind ./sha1_test shortpush: valgrind ./shortpush shorts: valgrind ./shorts smc1: valgrind ./smc1 Could not read `smc1.stderr.exp' make: *** [regtest] Error 2 |
|
From: <js...@ac...> - 2004-09-03 02:55:33
|
Nightly build on phoenix ( SuSE 9.1 ) started at 2004-09-03 03:50:00 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow sem: valgrind ./sem semlimit: valgrind ./semlimit sha1_test: valgrind ./sha1_test shortpush: valgrind ./shortpush shorts: valgrind ./shorts smc1: valgrind ./smc1 susphello: valgrind ./susphello syscall-restart1: valgrind ./syscall-restart1 syscall-restart2: valgrind ./syscall-restart2 system: valgrind ./system yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 174 tests, 4 stderr failures, 0 stdout failures ================= corecheck/tests/as_mmap (stderr) corecheck/tests/fdleak_fcntl (stderr) memcheck/tests/writev (stderr) memcheck/tests/zeropage (stderr) make: *** [regtest] Error 1 |
|
From: Robert W. <rj...@du...> - 2004-09-03 02:36:21
|
Hi all, Do we have a good idea why there are regression test failures on certain platforms? Regards, Robert. --=20 Robert Walsh Amalgamated Durables, Inc. - "We don't make the things you buy." Email: rj...@du... |
|
From: Tom H. <to...@co...> - 2004-09-03 02:26:20
|
Nightly build on dunsmere ( Fedora Core 2 ) started at 2004-09-03 03:20:04 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow smc1: valgrind ./smc1 susphello: valgrind ./susphello syscall-restart1: valgrind ./syscall-restart1 syscall-restart2: valgrind ./syscall-restart2 system: valgrind ./system yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 179 tests, 8 stderr failures, 1 stdout failure ================= corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_socketpair (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/writev (stderr) none/tests/exec-sigmask (stdout) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-09-03 02:20:26
|
Nightly build on audi ( Red Hat 9 ) started at 2004-09-03 03:15:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow shorts: valgrind ./shorts smc1: valgrind ./smc1 susphello: valgrind ./susphello syscall-restart1: valgrind ./syscall-restart1 syscall-restart2: valgrind ./syscall-restart2 system: valgrind ./system yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 179 tests, 8 stderr failures, 0 stdout failures ================= corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_socketpair (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/writev (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-09-03 02:13:22
|
Nightly build on ginetta ( Red Hat 8.0 ) started at 2004-09-03 03:10:02 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow seg_override: valgrind ./seg_override sem: valgrind ./sem semlimit: valgrind ./semlimit sha1_test: valgrind ./sha1_test shortpush: valgrind ./shortpush shorts: valgrind ./shorts smc1: valgrind ./smc1 susphello: valgrind ./susphello syscall-restart1: valgrind ./syscall-restart1 syscall-restart2: valgrind ./syscall-restart2 system: valgrind ./system yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 179 tests, 3 stderr failures, 0 stdout failures ================= helgrind/tests/race (stderr) helgrind/tests/race2 (stderr) memcheck/tests/writev (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-09-03 02:08:21
|
Nightly build on alvis ( Red Hat 7.3 ) started at 2004-09-03 03:05:03 BST Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow -- Finished tests in none/tests ---------------------------------------- == 179 tests, 14 stderr failures, 1 stdout failure ================= addrcheck/tests/toobig-allocs (stderr) helgrind/tests/deadlock (stderr) helgrind/tests/race (stderr) helgrind/tests/race2 (stderr) memcheck/tests/badjump (stderr) memcheck/tests/brk (stderr) memcheck/tests/brk2 (stderr) memcheck/tests/error_counts (stdout) memcheck/tests/mismatches (stderr) memcheck/tests/new_nothrow (stderr) memcheck/tests/new_override (stderr) memcheck/tests/toobig-allocs (stderr) memcheck/tests/writev (stderr) none/tests/coolo_sigaction (stderr) none/tests/gxx304 (stderr) make: *** [regtest] Error 1 |