You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
1
|
2
(13) |
3
(29) |
|
4
(18) |
5
(12) |
6
(12) |
7
(22) |
8
(9) |
9
(14) |
10
(6) |
|
11
|
12
|
13
(1) |
14
(5) |
15
(11) |
16
(7) |
17
(5) |
|
18
(1) |
19
(8) |
20
(7) |
21
(12) |
22
(5) |
23
(17) |
24
(6) |
|
25
(27) |
26
(17) |
27
(2) |
28
(10) |
29
(3) |
30
(8) |
31
(20) |
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 17:25:15
|
CVS commit by nethercote:
"VG_AR_SKIN" --> "VG_AR_TOOL"
M +2 -2 vg_include.h 1.178
M +6 -6 vg_malloc2.c 1.19
M +1 -1 vg_mylibc.c 1.69
M +2 -2 vg_skiplist.c 1.3
--- valgrind/coregrind/vg_include.h #1.177:1.178
@@ -359,5 +359,5 @@ void VG_(sanity_check_needs)(void);
CORE for the core's general use.
- SKIN for the skin to use (and the only one it uses).
+ TOOL for the tool to use (and the only one it uses).
SYMTAB for Valgrind's symbol table storage.
JITTER for small storage during translation.
@@ -376,5 +376,5 @@ typedef Int ArenaId;
#define VG_AR_CORE 0
-#define VG_AR_SKIN 1
+#define VG_AR_TOOL 1
#define VG_AR_SYMTAB 2
#define VG_AR_JITTER 3
--- valgrind/coregrind/vg_malloc2.c #1.18:1.19
@@ -262,5 +262,5 @@ void ensure_mm_init ( void )
arena_init ( &vg_arena[VG_AR_CORE], "core", 1, True, 262144, False );
- arena_init ( &vg_arena[VG_AR_SKIN], "tool", 1, True, 262144, False );
+ arena_init ( &vg_arena[VG_AR_TOOL], "tool", 1, True, 262144, False );
arena_init ( &vg_arena[VG_AR_SYMTAB], "symtab", 1, True, 262144, False );
@@ -1363,25 +1363,25 @@ void* VG_(arena_realloc) ( ArenaId aid,
void* VG_(malloc) ( Int nbytes )
{
- return VG_(arena_malloc) ( VG_AR_SKIN, nbytes );
+ return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
}
void VG_(free) ( void* ptr )
{
- VG_(arena_free) ( VG_AR_SKIN, ptr );
+ VG_(arena_free) ( VG_AR_TOOL, ptr );
}
void* VG_(calloc) ( Int nmemb, Int nbytes )
{
- return VG_(arena_calloc) ( VG_AR_SKIN, /*alignment*/4, nmemb, nbytes );
+ return VG_(arena_calloc) ( VG_AR_TOOL, /*alignment*/4, nmemb, nbytes );
}
void* VG_(realloc) ( void* ptr, Int size )
{
- return VG_(arena_realloc) ( VG_AR_SKIN, ptr, /*alignment*/4, size );
+ return VG_(arena_realloc) ( VG_AR_TOOL, ptr, /*alignment*/4, size );
}
void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB )
{
- return VG_(arena_malloc_aligned) ( VG_AR_SKIN, req_alignB, req_pszB );
+ return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
}
--- valgrind/coregrind/vg_mylibc.c #1.68:1.69
@@ -1038,5 +1038,5 @@ __inline__ Char* VG_(arena_strdup) ( Are
Char* VG_(strdup) ( const Char* s )
{
- return VG_(arena_strdup) ( VG_AR_SKIN, s );
+ return VG_(arena_strdup) ( VG_AR_TOOL, s );
}
--- valgrind/coregrind/vg_skiplist.c #1.2:1.3
@@ -144,5 +144,5 @@ void *VG_(SkipNode_Alloc)(const SkipList
if (l->arena == -1)
- *(Short *)&l->arena = VG_AR_SKIN;
+ *(Short *)&l->arena = VG_AR_TOOL;
ret = VG_(arena_malloc)(l->arena, size);
@@ -289,5 +289,5 @@ void VG_(SkipList_Insert)(SkipList *l, v
if (l->arena == -1)
- *(Short *)&l->arena = VG_AR_SKIN;
+ *(Short *)&l->arena = VG_AR_TOOL;
l->head = VG_(arena_malloc)(l->arena, size);
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 17:14:55
|
CVS commit by nethercote:
Made code more concise. In particular:
- introduced DIS() and DIP() macros to shorten debug printing
- introduce jmp_lit(), jcc_lit(), jmp_treg() for common UCode sequences
- replace many unnecessary dis?dis_buf:NULL tests with dis_buf, by
changing the tests in disAMode()
Overall, reduced code size by about 230 lines.
M +505 -744 vg_to_ucode.c 1.127
--- valgrind/coregrind/vg_to_ucode.c #1.126:1.127
@@ -34,9 +34,14 @@
/*------------------------------------------------------------*/
-/*--- Renamings of frequently-used global functions. ---*/
+/*--- Debugging output ---*/
/*------------------------------------------------------------*/
-#define dis VG_(print_codegen)
+#define DIP(format, args...) \
+ if (VG_(print_codegen)) \
+ VG_(printf)(format, ## args)
+#define DIS(buf, format, args...) \
+ if (VG_(print_codegen)) \
+ VG_(sprintf)(buf, format, ## args)
/*------------------------------------------------------------*/
@@ -389,4 +393,32 @@ void VG_(set_cond_field) ( UCodeBlock* c
}
+/*------------------------------------------------------------*/
+/*--- JMP helpers ---*/
+/*------------------------------------------------------------*/
+
+static __inline__
+void jmp_lit( UCodeBlock* cb, Addr d32 )
+{
+ uInstr1 (cb, JMP, 0, Literal, 0);
+ uLiteral(cb, d32);
+ uCond (cb, CondAlways);
+}
+
+static __inline__
+void jmp_treg( UCodeBlock* cb, Int t )
+{
+ uInstr1 (cb, JMP, 0, TempReg, t);
+ uCond (cb, CondAlways);
+}
+
+static __inline__
+void jcc_lit( UCodeBlock* cb, Addr d32, Condcode cond )
+{
+ uInstr1 (cb, JMP, 0, Literal, 0);
+ uLiteral (cb, d32);
+ uCond (cb, cond);
+ uFlagsRWU(cb, FlagsOSZACP, FlagsEmpty, FlagsEmpty);
+}
+
/*------------------------------------------------------------*/
@@ -443,6 +475,6 @@ void handleSegOverride ( UCodeBlock* cb,
returned, as a pair (length << 24) | temp. Note that this fn should
not be called if the R/M part of the address denotes a register
- instead of memory. If buf is non-NULL, text of the addressing mode
- is placed therein. */
+ instead of memory. If VG_(print_codegen) is true, text of the addressing
+ mode is placed therein. */
static
@@ -469,6 +501,5 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, rm, TempReg, tmp);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s(%s)", sorbTxt(sorb),
- nameIReg(4,rm));
+ DIS(buf, "%s(%s)", sorbTxt(sorb), nameIReg(4,rm));
return (1<<24 | tmp);
}
@@ -484,8 +515,7 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, rm, TempReg, tmq);
uInstr2(cb, LEA1, 4, TempReg, tmq, TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s%d(%s)", sorbTxt(sorb),
- d, nameIReg(4,rm));
+ DIS(buf, "%s%d(%s)", sorbTxt(sorb), d, nameIReg(4,rm));
return (2<<24 | tmp);
}
@@ -501,8 +531,7 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, rm, TempReg, tmq);
uInstr2(cb, LEA1, 4, TempReg, tmq, TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s0x%x(%s)", sorbTxt(sorb),
- d, nameIReg(4,rm));
+ DIS(buf, "%s0x%x(%s)", sorbTxt(sorb), d, nameIReg(4,rm));
return (5<<24 | tmp);
}
@@ -521,5 +550,5 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s(0x%x)", sorbTxt(sorb), d);
+ DIS(buf, "%s(0x%x)", sorbTxt(sorb), d);
return (5<<24 | tmp);
}
@@ -557,10 +586,9 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr3(cb, LEA2, 4, TempReg, base_tmp, TempReg, index_tmp,
TempReg, tmp);
- LAST_UINSTR(cb).lit32 = 0;
+ uLiteral(cb, 0);
LAST_UINSTR(cb).extra4b = 1 << scale;
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s(%s,%s,%d)", sorbTxt(sorb),
- nameIReg(4,base_r),
- nameIReg(4,index_r),1<<scale);
+ DIS(buf, "%s(%s,%s,%d)", sorbTxt(sorb),
+ nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
return (2<<24 | tmp);
}
@@ -574,9 +602,9 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr3(cb, LEA2, 4, TempReg, tmp, TempReg, index_tmp,
TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
LAST_UINSTR(cb).extra4b = 1 << scale;
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s0x%x(,%s,%d)", sorbTxt(sorb), d,
- nameIReg(4,index_r),1<<scale);
+ DIS(buf, "%s0x%x(,%s,%d)", sorbTxt(sorb), d,
+ nameIReg(4,index_r), 1<<scale);
return (6<<24 | tmp);
}
@@ -585,6 +613,5 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, base_r, TempReg, tmp);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s(%s,,)",
- sorbTxt(sorb), nameIReg(4,base_r));
+ DIS(buf, "%s(%s,,)", sorbTxt(sorb), nameIReg(4,base_r));
return (2<<24 | tmp);
}
@@ -595,5 +622,5 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s0x%x()", sorbTxt(sorb), d);
+ DIS(buf, "%s0x%x()", sorbTxt(sorb), d);
return (6<<24 | tmp);
}
@@ -622,8 +649,7 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, base_r, TempReg, tmq);
uInstr2(cb, LEA1, 4, TempReg, tmq, TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s%d(%s,,)", sorbTxt(sorb),
- d, nameIReg(4,base_r));
+ DIS(buf, "%s%d(%s,,)", sorbTxt(sorb), d, nameIReg(4,base_r));
return (3<<24 | tmp);
} else {
@@ -634,10 +660,9 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr3(cb, LEA2, 4, TempReg, base_tmp, TempReg, index_tmp,
TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
LAST_UINSTR(cb).extra4b = 1 << scale;
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s%d(%s,%s,%d)",
- sorbTxt(sorb), d, nameIReg(4,base_r),
- nameIReg(4,index_r), 1<<scale);
+ DIS(buf, "%s%d(%s,%s,%d)", sorbTxt(sorb), d,
+ nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
return (3<<24 | tmp);
}
@@ -665,8 +690,7 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr2(cb, GET, 4, ArchReg, base_r, TempReg, tmq);
uInstr2(cb, LEA1, 4, TempReg, tmq, TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s%d(%s,,)",
- sorbTxt(sorb), d, nameIReg(4,base_r));
+ DIS(buf, "%s%d(%s,,)", sorbTxt(sorb), d, nameIReg(4,base_r));
return (6<<24 | tmp);
} else {
@@ -677,10 +701,9 @@ UInt disAMode ( UCodeBlock* cb, UChar so
uInstr3(cb, LEA2, 4, TempReg, base_tmp, TempReg, index_tmp,
TempReg, tmp);
- LAST_UINSTR(cb).lit32 = d;
+ uLiteral(cb, d);
LAST_UINSTR(cb).extra4b = 1 << scale;
handleSegOverride(cb, sorb, tmp);
- if (buf) VG_(sprintf)(buf,"%s%d(%s,%s,%d)",
- sorbTxt(sorb), d, nameIReg(4,base_r),
- nameIReg(4,index_r), 1<<scale);
+ DIS(buf, "%s%d(%s,%s,%d)", sorbTxt(sorb), d,
+ nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
return (6<<24 | tmp);
}
@@ -783,6 +806,5 @@ void codegen_XOR_reg_with_itself ( UCode
Int ge_reg, Int tmp )
{
- if (dis)
- VG_(printf)("xor%c %s, %s\n", nameISize(size),
+ DIP("xor%c %s, %s\n", nameISize(size),
nameIReg(size,ge_reg), nameIReg(size,ge_reg) );
uInstr2(cb, MOV, size, Literal, 0, TempReg, tmp);
@@ -857,5 +879,5 @@ Addr dis_op2_E_G ( UCodeBlock* cb,
if (keep)
uInstr2(cb, PUT, size, TempReg, tmp, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("%s%c %s,%s\n", t_x86opc, nameISize(size),
+ DIP("%s%c %s,%s\n", t_x86opc, nameISize(size),
nameIReg(size,eregOfRM(rm)),
nameIReg(size,gregOfRM(rm)));
@@ -868,5 +890,5 @@ Addr dis_op2_E_G ( UCodeBlock* cb,
? True : False;
if (reversible) {
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf);
Int tmpa = LOW24(pair);
uInstr2(cb, LOAD, size, TempReg, tmpa, TempReg, tmpa);
@@ -883,9 +905,9 @@ Addr dis_op2_E_G ( UCodeBlock* cb,
if (keep)
uInstr2(cb, PUT, size, TempReg, tmpa, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("%s%c %s,%s\n", t_x86opc, nameISize(size),
+ DIP("%s%c %s,%s\n", t_x86opc, nameISize(size),
dis_buf,nameIReg(size,gregOfRM(rm)));
return HI8(pair)+eip0;
} else {
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf);
Int tmpa = LOW24(pair);
Int tmp2 = newTemp(cb);
@@ -896,5 +918,5 @@ Addr dis_op2_E_G ( UCodeBlock* cb,
if (keep)
uInstr2(cb, PUT, size, TempReg, tmp2, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("%s%c %s,%s\n", t_x86opc, nameISize(size),
+ DIP("%s%c %s,%s\n", t_x86opc, nameISize(size),
dis_buf,nameIReg(size,gregOfRM(rm)));
return HI8(pair)+eip0;
@@ -960,5 +982,5 @@ Addr dis_op2_G_E ( UCodeBlock* cb,
if (keep)
uInstr2(cb, PUT, size, TempReg, tmp, ArchReg, eregOfRM(rm));
- if (dis) VG_(printf)("%s%c %s,%s\n", t_x86opc, nameISize(size),
+ DIP("%s%c %s,%s\n", t_x86opc, nameISize(size),
nameIReg(size,gregOfRM(rm)),
nameIReg(size,eregOfRM(rm)));
@@ -968,5 +990,5 @@ Addr dis_op2_G_E ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf);
Int tmpa = LOW24(pair);
Int tmpv = newTemp(cb);
@@ -985,5 +1007,5 @@ Addr dis_op2_G_E ( UCodeBlock* cb,
uInstr2(cb, STORE, size, TempReg, tmpv, TempReg, tmpa);
}
- if (dis) VG_(printf)("%s%c %s,%s\n", t_x86opc, nameISize(size),
+ DIP("%s%c %s,%s\n", t_x86opc, nameISize(size),
nameIReg(size,gregOfRM(rm)), dis_buf);
return HI8(pair)+eip0;
@@ -1021,5 +1043,5 @@ Addr dis_mov_E_G ( UCodeBlock* cb,
uInstr2(cb, GET, size, ArchReg, eregOfRM(rm), TempReg, tmpv);
uInstr2(cb, PUT, size, TempReg, tmpv, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("mov%c %s,%s\n", nameISize(size),
+ DIP("mov%c %s,%s\n", nameISize(size),
nameIReg(size,eregOfRM(rm)),
nameIReg(size,gregOfRM(rm)));
@@ -1029,10 +1051,10 @@ Addr dis_mov_E_G ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf);
Int tmpa = LOW24(pair);
Int tmpb = newTemp(cb);
uInstr2(cb, LOAD, size, TempReg, tmpa, TempReg, tmpb);
uInstr2(cb, PUT, size, TempReg, tmpb, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("mov%c %s,%s\n", nameISize(size),
+ DIP("mov%c %s,%s\n", nameISize(size),
dis_buf,nameIReg(size,gregOfRM(rm)));
return HI8(pair)+eip0;
@@ -1070,5 +1092,5 @@ Addr dis_mov_G_E ( UCodeBlock* cb,
uInstr2(cb, GET, size, ArchReg, gregOfRM(rm), TempReg, tmpv);
uInstr2(cb, PUT, size, TempReg, tmpv, ArchReg, eregOfRM(rm));
- if (dis) VG_(printf)("mov%c %s,%s\n", nameISize(size),
+ DIP("mov%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)),
nameIReg(size,eregOfRM(rm)));
@@ -1078,10 +1100,10 @@ Addr dis_mov_G_E ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf);
Int tmpa = LOW24(pair);
Int tmpv = newTemp(cb);
uInstr2(cb, GET, size, ArchReg, gregOfRM(rm), TempReg, tmpv);
uInstr2(cb, STORE, size, TempReg, tmpv, TempReg, tmpa);
- if (dis) VG_(printf)("mov%c %s,%s\n", nameISize(size),
+ DIP("mov%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)), dis_buf);
return HI8(pair)+eip0;
@@ -1115,5 +1137,5 @@ Addr dis_op_imm_A ( UCodeBlock* cb,
if (keep)
uInstr2(cb, PUT, size, TempReg, tmp, ArchReg, R_EAX);
- if (dis) VG_(printf)("%s%c $0x%x, %s\n", t_x86opc, nameISize(size),
+ DIP("%s%c $0x%x, %s\n", t_x86opc, nameISize(size),
lit, nameIReg(size,R_EAX));
return eip+size;
@@ -1136,6 +1158,5 @@ Addr dis_movx_E_G ( UCodeBlock* cb,
LAST_UINSTR(cb).signed_widen = sign_extend;
uInstr2(cb, PUT, szd, TempReg, tmpv, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("mov%c%c%c %s,%s\n",
- sign_extend ? 's' : 'z',
+ DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
nameISize(szs), nameISize(szd),
nameIReg(szs,eregOfRM(rm)),
@@ -1146,5 +1167,5 @@ Addr dis_movx_E_G ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf);
Int tmpa = LOW24(pair);
uInstr2(cb, LOAD, szs, TempReg, tmpa, TempReg, tmpa);
@@ -1153,9 +1174,7 @@ Addr dis_movx_E_G ( UCodeBlock* cb,
LAST_UINSTR(cb).signed_widen = sign_extend;
uInstr2(cb, PUT, szd, TempReg, tmpa, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("mov%c%c%c %s,%s\n",
- sign_extend ? 's' : 'z',
+ DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
nameISize(szs), nameISize(szd),
- dis_buf,
- nameIReg(szd,gregOfRM(rm)));
+ dis_buf, nameIReg(szd,gregOfRM(rm)));
return HI8(pair)+eip;
}
@@ -1251,10 +1270,8 @@ Addr dis_Grp1 ( UCodeBlock* cb,
uInstr2(cb, PUT, sz, TempReg, t1, ArchReg, eregOfRM(modrm));
eip += (am_sz + d_sz);
- if (dis)
- VG_(printf)("%s%c $0x%x, %s\n",
- nameGrp1(gregOfRM(modrm)), nameISize(sz), d32,
+ DIP("%s%c $0x%x, %s\n", nameGrp1(gregOfRM(modrm)), nameISize(sz), d32,
nameIReg(sz,eregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL);
+ pair = disAMode ( cb, sorb, eip, dis_buf);
t1 = LOW24(pair);
t2 = newTemp(cb);
@@ -1283,8 +1300,6 @@ Addr dis_Grp1 ( UCodeBlock* cb,
uInstr2(cb, STORE, sz, TempReg, t2, TempReg, t1);
}
- if (dis)
- VG_(printf)("%s%c $0x%x, %s\n",
- nameGrp1(gregOfRM(modrm)), nameISize(sz), d32,
- dis_buf);
+ DIP("%s%c $0x%x, %s\n", nameGrp1(gregOfRM(modrm)), nameISize(sz),
+ d32, dis_buf);
}
return eip;
@@ -1338,5 +1353,5 @@ Addr dis_Grp2 ( UCodeBlock* cb,
uInstr2(cb, PUT, sz, TempReg, t1, ArchReg, eregOfRM(modrm));
eip += (am_sz + d_sz);
- if (dis) {
+ if (VG_(print_codegen)) {
if (orig_src_tag == Literal)
VG_(printf)("%s%c $0x%x, %s\n",
@@ -1350,5 +1365,5 @@ Addr dis_Grp2 ( UCodeBlock* cb,
}
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL);
+ pair = disAMode ( cb, sorb, eip, dis_buf);
t1 = LOW24(pair);
t2 = newTemp(cb);
@@ -1371,5 +1386,5 @@ Addr dis_Grp2 ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, uopc);
uInstr2(cb, STORE, sz, TempReg, t2, TempReg, t1);
- if (dis) {
+ if (VG_(print_codegen)) {
if (orig_src_tag == Literal)
VG_(printf)("%s%c $0x%x, %s\n",
@@ -1424,5 +1439,5 @@ Addr dis_Grp8_BT ( UCodeBlock* cb,
Int t1, t2, t_fetched, t_mask;
UInt pair;
- UChar dis_buf[50];
+ Char dis_buf[50];
UInt v_mask;
@@ -1473,11 +1488,8 @@ Addr dis_Grp8_BT ( UCodeBlock* cb,
eip += (am_sz + 1);
- if (dis)
- VG_(printf)("%s%c $0x%x, %s\n",
- nameGrp8(gregOfRM(modrm)), nameISize(sz),
- src_val,
- nameIReg(sz,eregOfRM(modrm)));
+ DIP("%s%c $0x%x, %s\n", nameGrp8(gregOfRM(modrm)), nameISize(sz),
+ src_val, nameIReg(sz,eregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL);
+ pair = disAMode ( cb, sorb, eip, dis_buf);
t1 = LOW24(pair);
t2 = newTemp(cb);
@@ -1493,8 +1505,6 @@ Addr dis_Grp8_BT ( UCodeBlock* cb,
uInstr2(cb, STORE, sz, TempReg, t2, TempReg, t1);
}
- if (dis)
- VG_(printf)("%s%c $0x%x, %s\n",
- nameGrp8(gregOfRM(modrm)), nameISize(sz), src_val,
- dis_buf);
+ DIP("%s%c $0x%x, %s\n", nameGrp8(gregOfRM(modrm)), nameISize(sz),
+ src_val, dis_buf);
}
return eip;
@@ -1540,5 +1550,5 @@ static void codegen_mul_A_D_Reg ( UCodeB
}
uInstr0(cb, CALLM_E, 0);
- if (dis) VG_(printf)("%s%c %s\n", signed_multiply ? "imul" : "mul",
+ DIP("%s%c %s\n", signed_multiply ? "imul" : "mul",
nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
@@ -1579,5 +1589,5 @@ static void codegen_mul_A_D_Temp ( UCode
}
uInstr0(cb, CALLM_E, 0);
- if (dis) VG_(printf)("%s%c %s\n", signed_multiply ? "imul" : "mul",
+ DIP("%s%c %s\n", signed_multiply ? "imul" : "mul",
nameISize(sz), dis_buf);
}
@@ -1607,6 +1617,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
uInstr2(cb, AND, sz, TempReg, tao, TempReg, t1);
setFlagsFromUOpcode(cb, AND);
- if (dis)
- VG_(printf)("test%c $0x%x, %s\n",
+ DIP("test%c $0x%x, %s\n",
nameISize(sz), d32, nameIReg(sz, eregOfRM(modrm)));
break;
@@ -1618,7 +1627,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, NOT);
uInstr2(cb, PUT, sz, TempReg, t1, ArchReg, eregOfRM(modrm));
- if (dis)
- VG_(printf)("not%c %s\n",
- nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+ DIP("not%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
break;
case 3: /* NEG */
@@ -1628,7 +1635,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, NEG);
uInstr2(cb, PUT, sz, TempReg, t1, ArchReg, eregOfRM(modrm));
- if (dis)
- VG_(printf)("neg%c %s\n",
- nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+ DIP("neg%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
break;
case 4: /* MUL */
@@ -1644,7 +1649,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
uInstr2(cb, GET, sz, ArchReg, eregOfRM(modrm), TempReg, t1);
codegen_div ( cb, sz, t1, False );
- if (dis)
- VG_(printf)("div%c %s\n", nameISize(sz),
- nameIReg(sz, eregOfRM(modrm)));
+ DIP("div%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
break;
case 7: /* IDIV */
@@ -1652,7 +1655,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
uInstr2(cb, GET, sz, ArchReg, eregOfRM(modrm), TempReg, t1);
codegen_div ( cb, sz, t1, True );
- if (dis)
- VG_(printf)("idiv%c %s\n", nameISize(sz),
- nameIReg(sz, eregOfRM(modrm)));
+ DIP("idiv%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
break;
default:
@@ -1662,5 +1663,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
}
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t2 = LOW24(pair);
t1 = newTemp(cb);
@@ -1675,7 +1676,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
uInstr2(cb, AND, sz, TempReg, tao, TempReg, t1);
setFlagsFromUOpcode(cb, AND);
- if (dis)
- VG_(printf)("test%c $0x%x, %s\n",
- nameISize(sz), d32, dis_buf);
+ DIP("test%c $0x%x, %s\n", nameISize(sz), d32, dis_buf);
break;
}
@@ -1684,6 +1683,5 @@ Addr dis_Grp3 ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, NOT);
uInstr2(cb, STORE, sz, TempReg, t1, TempReg, t2);
- if (dis)
- VG_(printf)("not%c %s\n", nameISize(sz), dis_buf);
+ DIP("not%c %s\n", nameISize(sz), dis_buf);
break;
case 3: /* NEG */
@@ -1691,23 +1689,20 @@ Addr dis_Grp3 ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, NEG);
uInstr2(cb, STORE, sz, TempReg, t1, TempReg, t2);
- if (dis)
- VG_(printf)("neg%c %s\n", nameISize(sz), dis_buf);
+ DIP("neg%c %s\n", nameISize(sz), dis_buf);
break;
case 4: /* MUL */
codegen_mul_A_D_Temp ( cb, sz, t1, False,
- dis?dis_buf:NULL );
+ dis_buf );
break;
case 5: /* IMUL */
- codegen_mul_A_D_Temp ( cb, sz, t1, True, dis?dis_buf:NULL );
+ codegen_mul_A_D_Temp ( cb, sz, t1, True, dis_buf );
break;
case 6: /* DIV */
codegen_div ( cb, sz, t1, False );
- if (dis)
- VG_(printf)("div%c %s\n", nameISize(sz), dis_buf);
+ DIP("div%c %s\n", nameISize(sz), dis_buf);
break;
case 7: /* IDIV */
codegen_div ( cb, sz, t1, True );
- if (dis)
- VG_(printf)("idiv%c %s\n", nameISize(sz), dis_buf);
+ DIP("idiv%c %s\n", nameISize(sz), dis_buf);
break;
default:
@@ -1754,9 +1749,8 @@ Addr dis_Grp4 ( UCodeBlock* cb,
}
eip++;
- if (dis)
- VG_(printf)("%sb %s\n", nameGrp4(gregOfRM(modrm)),
+ DIP("%sb %s\n", nameGrp4(gregOfRM(modrm)),
nameIReg(1, eregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t2 = LOW24(pair);
t1 = newTemp(cb);
@@ -1779,6 +1773,5 @@ Addr dis_Grp4 ( UCodeBlock* cb,
}
eip += HI8(pair);
- if (dis)
- VG_(printf)("%sb %s\n", nameGrp4(gregOfRM(modrm)), dis_buf);
+ DIP("%sb %s\n", nameGrp4(gregOfRM(modrm)), dis_buf);
}
return eip;
@@ -1822,12 +1815,10 @@ Addr dis_Grp5 ( UCodeBlock* cb,
uLiteral(cb, eip+1);
uInstr2(cb, STORE, 4, TempReg, t4, TempReg, t3);
- uInstr1(cb, JMP, 0, TempReg, t1);
- uCond(cb, CondAlways);
+ jmp_treg(cb, t1);
LAST_UINSTR(cb).jmpkind = JmpCall;
*isEnd = True;
break;
case 4: /* jmp Ev */
- uInstr1(cb, JMP, 0, TempReg, t1);
- uCond(cb, CondAlways);
+ jmp_treg(cb, t1);
*isEnd = True;
break;
@@ -1838,9 +1829,8 @@ Addr dis_Grp5 ( UCodeBlock* cb,
}
eip++;
- if (dis)
- VG_(printf)("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
+ DIP("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t2 = LOW24(pair);
t1 = newTemp(cb);
@@ -1866,12 +1856,10 @@ Addr dis_Grp5 ( UCodeBlock* cb,
uLiteral(cb, eip+HI8(pair));
uInstr2(cb, STORE, 4, TempReg, t4, TempReg, t3);
- uInstr1(cb, JMP, 0, TempReg, t1);
- uCond(cb, CondAlways);
+ jmp_treg(cb, t1);
LAST_UINSTR(cb).jmpkind = JmpCall;
*isEnd = True;
break;
case 4: /* JMP Ev */
- uInstr1(cb, JMP, 0, TempReg, t1);
- uCond(cb, CondAlways);
+ jmp_treg(cb, t1);
*isEnd = True;
break;
@@ -1890,6 +1878,5 @@ Addr dis_Grp5 ( UCodeBlock* cb,
}
eip += HI8(pair);
- if (dis)
- VG_(printf)("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
+ DIP("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
nameISize(sz), dis_buf);
}
@@ -1897,12 +1884,4 @@ Addr dis_Grp5 ( UCodeBlock* cb,
}
-static __inline__
-void dis_JMP_d32( UCodeBlock* cb, Addr d32 )
-{
- uInstr1(cb, JMP, 0, Literal, 0);
- uLiteral(cb, d32);
- uCond(cb, CondAlways);
-}
-
/*------------------------------------------------------------*/
/*--- Disassembling string ops (including REP prefixes) ---*/
@@ -1938,5 +1917,5 @@ void dis_string_op( UCodeBlock* cb, void
dis_string_op_increment(cb, sz, t_inc);
dis_OP( cb, sz, t_inc );
- if (dis) VG_(printf)("%s%c\n", name, nameISize(sz));
+ DIP("%s%c\n", name, nameISize(sz));
}
@@ -2055,14 +2034,10 @@ void dis_REP_op ( UCodeBlock* cb, Int co
if (cond == CondAlways) {
- dis_JMP_d32 (cb, eip);
+ jmp_lit(cb, eip);
} else {
- uInstr1 (cb, JMP, 0, Literal, 0);
- uLiteral (cb, eip);
- uCond (cb, cond);
- uFlagsRWU (cb, FlagsOSZACP, FlagsEmpty, FlagsEmpty);
-
- dis_JMP_d32 (cb, eip_next);
+ jcc_lit(cb, eip, cond);
+ jmp_lit(cb, eip_next);
}
- if (dis) VG_(printf)("%s%c\n", name, nameISize(sz));
+ DIP("%s%c\n", name, nameISize(sz));
}
@@ -2092,6 +2067,5 @@ Addr dis_mul_E_G ( UCodeBlock* cb,
setFlagsFromUOpcode(cb, MUL);
uInstr2(cb, PUT, size, TempReg, tg, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("%smul%c %s, %s\n",
- signed_multiply ? "i" : "",
+ DIP("%smul%c %s, %s\n", signed_multiply ? "i" : "",
nameISize(size),
nameIReg(size,eregOfRM(rm)),
@@ -2101,5 +2075,5 @@ Addr dis_mul_E_G ( UCodeBlock* cb,
UInt pair;
vg_assert(signed_multiply);
- pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ pair = disAMode ( cb, sorb, eip0, dis_buf );
ta = LOW24(pair);
uInstr2(cb, LOAD, size, TempReg, ta, TempReg, te);
@@ -2109,8 +2083,7 @@ Addr dis_mul_E_G ( UCodeBlock* cb,
uInstr2(cb, PUT, size, TempReg, tg, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("%smul%c %s, %s\n",
- signed_multiply ? "i" : "",
+ DIP("%smul%c %s, %s\n", signed_multiply ? "i" : "",
nameISize(size),
- dis_buf,nameIReg(size,gregOfRM(rm)));
+ dis_buf, nameIReg(size,gregOfRM(rm)));
return HI8(pair)+eip0;
}
@@ -2127,5 +2100,5 @@ Addr dis_imul_I_E_G ( UCodeBlock* cb,
{
Int ta, te, tl, d32;
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar rm = getUChar(eip);
ta = INVALID_TEMPREG;
@@ -2137,5 +2110,5 @@ Addr dis_imul_I_E_G ( UCodeBlock* cb,
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
ta = LOW24(pair);
uInstr2(cb, LOAD, size, TempReg, ta, TempReg, te);
@@ -2152,13 +2125,7 @@ Addr dis_imul_I_E_G ( UCodeBlock* cb,
uInstr2(cb, PUT, size, TempReg, te, ArchReg, gregOfRM(rm));
- if (dis) {
- if (epartIsReg(rm)) {
- VG_(printf)("imul %d, %s, %s\n", d32, nameIReg(size,eregOfRM(rm)),
- nameIReg(size,gregOfRM(rm)));
- } else {
- VG_(printf)("imul %d, %s, %s\n", d32, dis_buf,
- nameIReg(size,gregOfRM(rm)));
- }
- }
+ DIP("imul %d, %s, %s\n", d32,
+ ( epartIsReg(rm) ? nameIReg(size,eregOfRM(rm)) : dis_buf ),
+ nameIReg(size,gregOfRM(rm)) );
return eip;
@@ -2180,5 +2147,5 @@ Addr dis_fpu_mem ( UCodeBlock* cb,
vg_assert(second_byte < 0xC0);
second_byte &= 0x38;
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
ta = LOW24(pair);
eip += HI8(pair);
@@ -2187,14 +2154,10 @@ Addr dis_fpu_mem ( UCodeBlock* cb,
(((UShort)first_byte) << 8) | ((UShort)second_byte),
TempReg, ta);
- if (dis) {
- if (is_write)
- VG_(printf)("fpu_w_%d 0x%x:0x%x, %s\n",
- size, (UInt)first_byte,
- (UInt)second_byte, dis_buf );
- else
- VG_(printf)("fpu_r_%d %s, 0x%x:0x%x\n",
- size, dis_buf,
- (UInt)first_byte,
- (UInt)second_byte );
+ if (is_write) {
+ DIP("fpu_w_%d 0x%x:0x%x, %s\n",
+ size, (UInt)first_byte, (UInt)second_byte, dis_buf );
+ } else {
+ DIP("fpu_r_%d %s, 0x%x:0x%x\n",
+ size, dis_buf, (UInt)first_byte, (UInt)second_byte );
}
return eip;
@@ -2265,6 +2228,5 @@ Addr dis_fpu_no_mem ( UCodeBlock* cb, Ad
}
- if (dis) VG_(printf)("fpu 0x%x:0x%x%s%s\n",
- (UInt)first_byte, (UInt)second_byte,
+ DIP("fpu 0x%x:0x%x%s%s\n", (UInt)first_byte, (UInt)second_byte,
uses_ZCP ? " -rZCP" : "",
sets_ZCP ? " -wZCP" : "" );
@@ -2296,5 +2258,5 @@ Addr dis_fpu ( UCodeBlock* cb,
uInstr2(cb, PUT, 2, TempReg, t1, ArchReg, R_EAX);
uInstr0(cb, CALLM_E, 0);
- if (dis) VG_(printf)("fstsw %%ax\n");
+ DIP("fstsw %%ax\n");
eip++;
return eip;
@@ -2498,11 +2460,9 @@ Addr dis_SHLRD_Gv_Ev ( UCodeBlock* cb,
uInstr1(cb, POP, sz, TempReg, t);
uInstr2(cb, PUT, sz, TempReg, t, ArchReg, eregOfRM(modrm));
- if (dis)
- VG_(printf)("sh%cd%c %%cl, %s, %s\n",
- ( left_shift ? 'l' : 'r' ),
- nameISize(sz), nameIReg(sz, gregOfRM(modrm)),
- nameIReg(sz, eregOfRM(modrm)));
+ DIP("sh%cd%c %%cl, %s, %s\n",
+ ( left_shift ? 'l' : 'r' ), nameISize(sz),
+ nameIReg(sz, gregOfRM(modrm)), nameIReg(sz, eregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
ta = LOW24(pair);
eip += HI8(pair);
@@ -2513,9 +2473,6 @@ Addr dis_SHLRD_Gv_Ev ( UCodeBlock* cb,
uInstr1(cb, POP, sz, TempReg, t);
uInstr2(cb, STORE, sz, TempReg, t, TempReg, ta);
- if (dis)
- VG_(printf)("sh%cd%c %%cl, %s, %s\n",
- ( left_shift ? 'l' : 'r' ),
- nameISize(sz), nameIReg(sz, gregOfRM(modrm)),
- dis_buf);
+ DIP("sh%cd%c %%cl, %s, %s\n", ( left_shift ? 'l' : 'r' ),
+ nameISize(sz), nameIReg(sz, gregOfRM(modrm)), dis_buf);
}
@@ -2551,5 +2508,5 @@ Addr dis_bt_G_E ( UCodeBlock* cb,
{
UInt pair;
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm;
@@ -2592,5 +2549,5 @@ Addr dis_bt_G_E ( UCodeBlock* cb,
uInstr2(cb, AND, 4, TempReg, lit, TempReg, t_bitno);
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t_addr = LOW24(pair);
eip += HI8(pair);
@@ -2668,17 +2625,7 @@ Addr dis_bt_G_E ( UCodeBlock* cb,
}
- if (epartIsReg(modrm)) {
- if (dis)
- VG_(printf)("bt%s%c %s, %s\n",
- nameBtOp(op),
- nameISize(sz), nameIReg(sz, gregOfRM(modrm)),
- nameIReg(sz, eregOfRM(modrm)));
- } else {
- if (dis)
- VG_(printf)("bt%s%c %s, %s\n",
- nameBtOp(op),
- nameISize(sz), nameIReg(sz, gregOfRM(modrm)),
- dis_buf);
- }
+ DIP("bt%s%c %s, %s\n",
+ nameBtOp(op), nameISize(sz), nameIReg(sz, gregOfRM(modrm)),
+ ( epartIsReg(modrm) ? nameIReg(sz, eregOfRM(modrm)) : dis_buf ) );
return eip;
@@ -2696,6 +2642,7 @@ Addr dis_bs_E_G ( UCodeBlock* cb,
Int t, t1, ta, helper;
UInt pair;
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm;
+ Bool isReg;
vg_assert(sz == 2 || sz == 4);
@@ -2711,23 +2658,18 @@ Addr dis_bs_E_G ( UCodeBlock* cb,
uInstr1(cb, PUSH, sz, TempReg, t1);
- if (epartIsReg(modrm)) {
+ isReg = epartIsReg(modrm);
+ if (isReg) {
eip++;
uInstr2(cb, GET, sz, ArchReg, eregOfRM(modrm), TempReg, t);
- if (dis)
- VG_(printf)("bs%c%c %s, %s\n",
- fwds ? 'f' : 'r',
- nameISize(sz), nameIReg(sz, eregOfRM(modrm)),
- nameIReg(sz, gregOfRM(modrm)));
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
ta = LOW24(pair);
eip += HI8(pair);
uInstr2(cb, LOAD, sz, TempReg, ta, TempReg, t);
- if (dis)
- VG_(printf)("bs%c%c %s, %s\n",
- fwds ? 'f' : 'r',
- nameISize(sz), dis_buf,
- nameIReg(sz, gregOfRM(modrm)));
}
+ DIP("bs%c%c %s, %s\n",
+ fwds ? 'f' : 'r', nameISize(sz),
+ ( isReg ? nameIReg(sz, eregOfRM(modrm)) : dis_buf ),
+ nameIReg(sz, gregOfRM(modrm)));
uInstr1(cb, PUSH, sz, TempReg, t);
@@ -2754,7 +2696,6 @@ void codegen_xchg_eAX_Reg ( UCodeBlock*
uInstr2(cb, PUT, sz, TempReg, t2, ArchReg, R_EAX);
uInstr2(cb, PUT, sz, TempReg, t1, ArchReg, reg);
- if (dis)
- VG_(printf)("xchg%c %s, %s\n", nameISize(sz),
- nameIReg(sz, R_EAX), nameIReg(sz, reg));
+ DIP("xchg%c %s, %s\n",
+ nameISize(sz), nameIReg(sz, R_EAX), nameIReg(sz, reg));
}
@@ -2830,14 +2771,13 @@ Addr dis_cmpxchg_G_E ( UCodeBlock* cb,
uInstr2(cb, GET, size, ArchReg, eregOfRM(rm), TempReg, dest);
eip0++;
- if (dis) VG_(printf)("cmpxchg%c %s,%s\n",
- nameISize(size),
+ DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)),
nameIReg(size,eregOfRM(rm)) );
} else {
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf );
ta = LOW24(pair);
uInstr2(cb, LOAD, size, TempReg, ta, TempReg, dest);
eip0 += HI8(pair);
- if (dis) VG_(printf)("cmpxchg%c %s,%s\n", nameISize(size),
+ DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)), dis_buf);
}
@@ -2889,5 +2829,5 @@ Addr dis_cmpxchg8b ( UCodeBlock* cb,
vg_assert(!epartIsReg(rm));
- pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip0, dis_buf );
tal = LOW24(pair);
tah = newTemp(cb);
@@ -2896,5 +2836,5 @@ Addr dis_cmpxchg8b ( UCodeBlock* cb,
uLiteral(cb, 4);
eip0 += HI8(pair);
- if (dis) VG_(printf)("cmpxchg8b %s\n", dis_buf);
+ DIP("cmpxchg8b %s\n", dis_buf);
uInstr0(cb, CALLM_S, 0);
@@ -2972,6 +2912,5 @@ Addr dis_cmov_E_G ( UCodeBlock* cb,
uFlagsRWU(cb, FlagsOSZACP, FlagsEmpty, FlagsEmpty);
uInstr2(cb, PUT, size, TempReg, tmpd, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("cmov%c%s %s,%s\n",
- nameISize(size),
+ DIP("cmov%c%s %s,%s\n", nameISize(size),
VG_(name_UCondcode)(cond),
nameIReg(size,eregOfRM(rm)),
@@ -2982,5 +2921,5 @@ Addr dis_cmov_E_G ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf );
Int tmpa = LOW24(pair);
uInstr2(cb, LOAD, size, TempReg, tmpa, TempReg, tmps);
@@ -2990,6 +2929,5 @@ Addr dis_cmov_E_G ( UCodeBlock* cb,
uFlagsRWU(cb, FlagsOSZACP, FlagsEmpty, FlagsEmpty);
uInstr2(cb, PUT, size, TempReg, tmpd, ArchReg, gregOfRM(rm));
- if (dis) VG_(printf)("cmov%c%s %s,%s\n",
- nameISize(size),
+ DIP("cmov%c%s %s,%s\n", nameISize(size),
VG_(name_UCondcode)(cond),
dis_buf,
@@ -3019,11 +2957,9 @@ Addr dis_xadd_G_E ( UCodeBlock* cb,
uInstr2(cb, PUT, sz, TempReg, tmpt, ArchReg, eregOfRM(rm));
uInstr2(cb, PUT, sz, TempReg, tmpd, ArchReg, gregOfRM(rm));
- if (dis)
- VG_(printf)("xadd%c %s, %s\n", nameISize(sz),
- nameIReg(sz,gregOfRM(rm)),
- nameIReg(sz,eregOfRM(rm)));
+ DIP("xadd%c %s, %s\n",
+ nameISize(sz), nameIReg(sz,gregOfRM(rm)), nameIReg(sz,eregOfRM(rm)));
return 1+eip0;
} else {
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf );
Int tmpa = LOW24(pair);
uInstr2(cb, LOAD, sz, TempReg, tmpa, TempReg, tmpd);
@@ -3033,8 +2969,6 @@ Addr dis_xadd_G_E ( UCodeBlock* cb,
uInstr2(cb, STORE, sz, TempReg, tmpt, TempReg, tmpa);
uInstr2(cb, PUT, sz, TempReg, tmpd, ArchReg, gregOfRM(rm));
- if (dis)
- VG_(printf)("xadd%c %s, %s\n", nameISize(sz),
- nameIReg(sz,gregOfRM(rm)),
- dis_buf);
+ DIP("xadd%c %s, %s\n",
+ nameISize(sz), nameIReg(sz,gregOfRM(rm)), dis_buf);
return HI8(pair)+eip0;
}
@@ -3070,7 +3004,5 @@ Addr dis_mov_Ew_Sw ( UCodeBlock* cb,
uInstr2(cb, GET, 2, ArchReg, eregOfRM(rm), TempReg, tmpv);
uInstr2(cb, PUTSEG, 2, TempReg, tmpv, ArchRegS, gregOfRM(rm));
- if (dis) VG_(printf)("movw %s,%s\n",
- nameIReg(2,eregOfRM(rm)),
- nameSReg(gregOfRM(rm)));
+ DIP("movw %s,%s\n", nameIReg(2,eregOfRM(rm)), nameSReg(gregOfRM(rm)));
return 1+eip0;
}
@@ -3078,11 +3010,10 @@ Addr dis_mov_Ew_Sw ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf );
Int tmpa = LOW24(pair);
Int tmpb = newTemp(cb);
uInstr2(cb, LOAD, 2, TempReg, tmpa, TempReg, tmpb);
uInstr2(cb, PUTSEG, 2, TempReg, tmpb, ArchRegS, gregOfRM(rm));
- if (dis) VG_(printf)("movw %s,%s\n",
- dis_buf,nameSReg(gregOfRM(rm)));
+ DIP("movw %s,%s\n", dis_buf,nameSReg(gregOfRM(rm)));
return HI8(pair)+eip0;
}
@@ -3118,7 +3049,5 @@ Addr dis_mov_Sw_Ew ( UCodeBlock* cb,
uInstr2(cb, GETSEG, 2, ArchRegS, gregOfRM(rm), TempReg, tmpv);
uInstr2(cb, PUT, 2, TempReg, tmpv, ArchReg, eregOfRM(rm));
- if (dis) VG_(printf)("movw %s,%s\n",
- nameSReg(gregOfRM(rm)),
- nameIReg(2,eregOfRM(rm)));
+ DIP("movw %s,%s\n", nameSReg(gregOfRM(rm)), nameIReg(2,eregOfRM(rm)));
return 1+eip0;
}
@@ -3126,11 +3055,10 @@ Addr dis_mov_Sw_Ew ( UCodeBlock* cb,
/* E refers to memory */
{
- UInt pair = disAMode ( cb, sorb, eip0, dis?dis_buf:NULL);
+ UInt pair = disAMode ( cb, sorb, eip0, dis_buf );
Int tmpa = LOW24(pair);
Int tmpv = newTemp(cb);
uInstr2(cb, GETSEG, 2, ArchRegS, gregOfRM(rm), TempReg, tmpv);
uInstr2(cb, STORE, 2, TempReg, tmpv, TempReg, tmpa);
- if (dis) VG_(printf)("mov %s,%s\n",
- nameSReg(gregOfRM(rm)), dis_buf);
+ DIP("mov %s,%s\n", nameSReg(gregOfRM(rm)), dis_buf);
return HI8(pair)+eip0;
}
@@ -3153,20 +3081,15 @@ Addr dis_MMXop_regmem_to_reg ( UCodeBloc
Bool show_granularity )
{
- UChar dis_buf[50];
- UChar modrm;
- modrm = getUChar(eip);
- if (epartIsReg(modrm)) {
+ Char dis_buf[50];
+ UChar modrm = getUChar(eip);
+ Bool isReg = epartIsReg(modrm);
+
+ if (isReg) {
eip++;
uInstr1(cb, MMX2, 0,
Lit16,
(((UShort)(opc)) << 8) | ((UShort)modrm) );
- if (dis)
- VG_(printf)("%s%s %s, %s\n",
- name,
- show_granularity ? nameMMXGran(opc & 3) : (Char*)"",
- nameMMXReg(eregOfRM(modrm)),
- nameMMXReg(gregOfRM(modrm)));
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3175,11 +3098,11 @@ Addr dis_MMXop_regmem_to_reg ( UCodeBloc
(((UShort)(opc)) << 8) | ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s%s %s, %s\n",
- name,
- show_granularity ? nameMMXGran(opc & 3) : (Char*)"",
- dis_buf,
- nameMMXReg(gregOfRM(modrm)));
}
+
+ DIP("%s%s %s, %s\n",
+ name, show_granularity ? nameMMXGran(opc & 3) : (Char*)"",
+ ( isReg ? nameMMXReg(eregOfRM(modrm)) : dis_buf ),
+ nameMMXReg(gregOfRM(modrm)) );
+
return eip;
}
@@ -3204,18 +3127,16 @@ Addr dis_SSE3_reg_or_mem ( UCodeBlock* c
UChar opc3 )
{
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm = getUChar(eip);
- if (epartIsReg(modrm)) {
+ Bool isReg = epartIsReg(modrm);
+
+ if (isReg) {
/* Completely internal SSE insn. */
uInstr2(cb, SSE4, 0, /* ignore sz for internal ops */
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (((UShort)opc3) << 8) | (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3224,10 +3145,11 @@ Addr dis_SSE3_reg_or_mem ( UCodeBlock* c
Lit16, (((UShort)(opc3)) << 8) | ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)));
}
+
+ DIP("%s %s, %s\n",
+ name,
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)) );
+
return eip;
}
@@ -3250,18 +3172,16 @@ Addr dis_SSE2_reg_or_mem ( UCodeBlock* c
UChar opc2 )
{
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm = getUChar(eip);
- if (epartIsReg(modrm)) {
+ Bool isReg = epartIsReg(modrm);
+
+ if (isReg) {
/* Completely internal SSE insn. */
uInstr2(cb, SSE3, 0, /* ignore sz for internal ops */
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3270,10 +3190,10 @@ Addr dis_SSE2_reg_or_mem ( UCodeBlock* c
Lit16, (UShort)modrm,
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)));
}
+ DIP("%s %s, %s\n",
+ name,
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)) );
+
return eip;
}
@@ -3296,8 +3216,10 @@ Addr dis_SSE2_reg_or_mem_Imm8 ( UCodeBlo
UChar opc2 )
{
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm = getUChar(eip);
UChar imm8;
- if (epartIsReg(modrm)) {
+ Bool isReg = epartIsReg(modrm);
+
+ if (isReg) {
/* Completely internal SSE insn. */
eip++;
@@ -3306,11 +3228,7 @@ Addr dis_SSE2_reg_or_mem_Imm8 ( UCodeBlo
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (((UShort)modrm) << 8) | (UShort)imm8 );
- if (dis)
- VG_(printf)("%s %s, %s, $%d\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3321,10 +3239,8 @@ Addr dis_SSE2_reg_or_mem_Imm8 ( UCodeBlo
Lit16, (((UShort)(modrm)) << 8) | ((UShort)imm8),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s, $%d\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
}
+ DIP("%s %s, %s, $%d\n",
+ name, ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
return eip;
}
@@ -3348,8 +3264,10 @@ Addr dis_SSE3_reg_or_mem_Imm8 ( UCodeBlo
UChar opc3 )
{
- UChar dis_buf[50];
+ Char dis_buf[50];
UChar modrm = getUChar(eip);
UChar imm8;
- if (epartIsReg(modrm)) {
+ Bool isReg = epartIsReg(modrm);
+
+ if (isReg) {
/* Completely internal SSE insn. */
eip++;
@@ -3359,11 +3277,7 @@ Addr dis_SSE3_reg_or_mem_Imm8 ( UCodeBlo
Lit16, (((UShort)opc3) << 8) | (UShort)modrm,
Lit16, (UShort)imm8 );
- if (dis)
- VG_(printf)("%s %s, %s, $%d\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3375,10 +3289,8 @@ Addr dis_SSE3_reg_or_mem_Imm8 ( UCodeBlo
TempReg, tmpa);
uLiteral(cb, imm8);
- if (dis)
- VG_(printf)("%s %s, %s, $%d\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
}
+ DIP("%s %s, %s, $%d\n",
+ name, ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)), (Int)imm8 );
return eip;
}
@@ -3400,10 +3312,11 @@ Addr dis_SSE3_load_store_or_mov ( UCodeB
UChar insn2 )
{
- UChar dis_buf[50];
- UChar modrm;
+ Char dis_buf[50];
+ UChar modrm = getUChar(eip);
+ Bool isReg = epartIsReg(modrm);
UInt pair;
Int t1;
- modrm = getUChar(eip);
- if (epartIsReg(modrm)) {
+
+ if (isReg) {
/* Completely internal; we can issue SSE4. */
eip++;
@@ -3411,14 +3324,6 @@ Addr dis_SSE3_load_store_or_mov ( UCodeB
Lit16, (((UShort)insn0) << 8) | (UShort)insn1,
Lit16, (((UShort)insn2) << 8) | (UShort)modrm );
- if (dis && is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(gregOfRM(modrm)),
- nameXMMReg(eregOfRM(modrm)) );
- if (dis && !is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t1 = LOW24(pair);
eip += HI8(pair);
@@ -3427,10 +3332,16 @@ Addr dis_SSE3_load_store_or_mov ( UCodeB
Lit16, (((UShort)insn2) << 8) | (UShort)modrm,
TempReg, t1 );
- if (dis && is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(gregOfRM(modrm)), dis_buf );
- if (dis && !is_store)
- VG_(printf)("%s %s, %s\n", name,
- dis_buf, nameXMMReg(gregOfRM(modrm)) );
+ }
+
+ if (is_store) {
+ DIP("%s %s, %s\n",
+ name,
+ nameXMMReg(gregOfRM(modrm)),
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ) );
+ } else {
+ DIP("%s %s, %s\n",
+ name,
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)) );
}
return eip;
@@ -3451,10 +3362,11 @@ Addr dis_SSE2_load_store_or_mov ( UCodeB
UChar insn1 )
{
- UChar dis_buf[50];
- UChar modrm;
+ Char dis_buf[50];
+ UChar modrm = getUChar(eip);
+ Bool isReg = epartIsReg(modrm);
UInt pair;
Int t1;
- modrm = getUChar(eip);
- if (epartIsReg(modrm)) {
+
+ if (isReg) {
/* Completely internal; we can issue SSE3. */
eip++;
@@ -3462,14 +3374,6 @@ Addr dis_SSE2_load_store_or_mov ( UCodeB
Lit16, (((UShort)insn0) << 8) | (UShort)insn1,
Lit16, (UShort)modrm );
- if (dis && is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(gregOfRM(modrm)),
- nameXMMReg(eregOfRM(modrm)) );
- if (dis && !is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
} else {
- pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip, dis_buf );
t1 = LOW24(pair);
eip += HI8(pair);
@@ -3478,10 +3382,16 @@ Addr dis_SSE2_load_store_or_mov ( UCodeB
Lit16, (UShort)modrm,
TempReg, t1 );
- if (dis && is_store)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(gregOfRM(modrm)), dis_buf );
- if (dis && !is_store)
- VG_(printf)("%s %s, %s\n", name,
- dis_buf, nameXMMReg(gregOfRM(modrm)) );
+ }
+
+ if (is_store) {
+ DIP("%s %s, %s\n",
+ name,
+ nameXMMReg(gregOfRM(modrm)),
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ) );
+ } else {
+ DIP("%s %s, %s\n",
+ name,
+ ( isReg ? nameXMMReg(eregOfRM(modrm)) : dis_buf ),
+ nameXMMReg(gregOfRM(modrm)) );
}
return eip;
@@ -3512,11 +3422,9 @@ Addr dis_SSE2_to_MMX ( UCodeBlock *cb,
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameMMXReg(gregOfRM(modrm)) );
+ DIP("%s %s, %s\n",
+ name, nameXMMReg(eregOfRM(modrm)), nameMMXReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3525,9 +3433,5 @@ Addr dis_SSE2_to_MMX ( UCodeBlock *cb,
Lit16, ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameMMXReg(gregOfRM(modrm)));
+ DIP("%s %s, %s\n", name, dis_buf, nameMMXReg(gregOfRM(modrm)));
}
return eip;
@@ -3558,11 +3462,9 @@ Addr dis_SSE2_from_MMX ( UCodeBlock *cb,
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameMMXReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
+ DIP("%s %s, %s\n",
+ name, nameMMXReg(eregOfRM(modrm)), nameXMMReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3571,9 +3473,5 @@ Addr dis_SSE2_from_MMX ( UCodeBlock *cb,
Lit16, ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)));
+ DIP("%s %s, %s\n", name, dis_buf, nameXMMReg(gregOfRM(modrm)));
}
return eip;
@@ -3605,11 +3503,9 @@ Addr dis_SSE3_to_MMX ( UCodeBlock *cb,
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (((UShort)opc3) << 8) | (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameXMMReg(eregOfRM(modrm)),
- nameMMXReg(gregOfRM(modrm)) );
+ DIP("%s %s, %s\n",
+ name, nameXMMReg(eregOfRM(modrm)), nameMMXReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3618,9 +3514,5 @@ Addr dis_SSE3_to_MMX ( UCodeBlock *cb,
Lit16, (((UShort)(opc3)) << 8) | ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameMMXReg(gregOfRM(modrm)));
+ DIP("%s %s, %s\n", name, dis_buf, nameMMXReg(gregOfRM(modrm)));
}
return eip;
@@ -3652,11 +3544,9 @@ Addr dis_SSE3_from_MMX ( UCodeBlock *cb,
Lit16, (((UShort)opc1) << 8) | (UShort)opc2,
Lit16, (((UShort)opc3) << 8) | (UShort)modrm );
- if (dis)
- VG_(printf)("%s %s, %s\n", name,
- nameMMXReg(eregOfRM(modrm)),
- nameXMMReg(gregOfRM(modrm)) );
+ DIP("%s %s, %s\n",
+ name, nameMMXReg(eregOfRM(modrm)), nameXMMReg(gregOfRM(modrm)) );
eip++;
} else {
- UInt pair = disAMode ( cb, sorb, eip, dis?dis_buf:NULL );
+ UInt pair = disAMode ( cb, sorb, eip, dis_buf );
Int tmpa = LOW24(pair);
eip += HI8(pair);
@@ -3665,9 +3555,5 @@ Addr dis_SSE3_from_MMX ( UCodeBlock *cb,
Lit16, (((UShort)(opc3)) << 8) | ((UShort)modrm),
TempReg, tmpa);
- if (dis)
- VG_(printf)("%s %s, %s\n",
- name,
- dis_buf,
- nameXMMReg(gregOfRM(modrm)));
+ DIP("%s %s, %s\n", name, dis_buf, nameXMMReg(gregOfRM(modrm)));
}
return eip;
@@ -3686,6 +3572,5 @@ void dis_push_segreg ( UCodeBlock* cb, U
uInstr2(cb, PUT, 4, TempReg, t2, ArchReg, R_ESP);
uInstr2(cb, STORE, 2, TempReg, t1, TempReg, t2);
- if (dis)
- VG_(printf)("push %s\n", VG_(name_of_seg_reg)(sreg));
+ DIP("push %s\n", VG_(name_of_seg_reg)(sreg));
}
@@ -3701,6 +3586,5 @@ void dis_pop_segreg ( UCodeBlock* cb, UI
uInstr2(cb, PUT, 4, TempReg, t2, ArchReg, R_ESP);
uInstr2(cb, PUTSEG, 2, TempReg, t1, ArchRegS, sreg);
- if (dis)
- VG_(printf)("pop %s\n", VG_(name_of_seg_reg)(sreg));
+ DIP("pop %s\n", VG_(name_of_seg_reg)(sreg));
}
@@ -3711,5 +3595,5 @@ void dis_pop_segreg ( UCodeBlock* cb, UI
/* Disassemble a single instruction into ucode, returning the updated
eip, and setting *isEnd to True if this is the last insn in a basic
- block. Also do debug printing if (dis). */
+ block. Also do debug printing if necessary. */
static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
@@ -3739,5 +3623,5 @@ static Addr disInstr ( UCodeBlock* cb, A
t1 = t2 = t3 = t4 = INVALID_TEMPREG;
- if (dis) VG_(printf)("\t0x%x: ", eip);
+ DIP("\t0x%x: ", eip);
/* Spot the client-request magic sequence. */
@@ -3760,11 +3644,8 @@ static Addr disInstr ( UCodeBlock* cb, A
) {
eip += 18;
- uInstr1(cb, JMP, 0, Literal, 0);
- uLiteral(cb, eip);
- uCond(cb, CondAlways);
+ jmp_lit(cb, eip);
LAST_UINSTR(cb).jmpkind = JmpClientReq;
*isEnd = True;
- if (dis)
- VG_(printf)("%%edx = client_request ( %%eax )\n");
+ DIP("%%edx = client_request ( %%eax )\n");
return eip;
}
@@ -3829,5 +3710,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Bool store = gregOfRM(insn[2]) == 0;
vg_assert(sz == 4);
- pair = disAMode ( cb, sorb, eip+2, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip+2, dis_buf );
t1 = LOW24(pair);
eip += 2+HI8(pair);
@@ -3836,6 +3717,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Lit16, (UShort)insn[2],
TempReg, t1 );
- if (dis)
- VG_(printf)("fx%s %s\n", store ? "save" : "rstor", dis_buf );
+ DIP("fx%s %s\n", store ? "save" : "rstor", dis_buf );
goto decode_success;
}
@@ -3847,5 +3727,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Bool store = gregOfRM(insn[2]) == 3;
vg_assert(sz == 4);
- pair = disAMode ( cb, sorb, eip+2, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip+2, dis_buf );
t1 = LOW24(pair);
eip += 2+HI8(pair);
@@ -3854,6 +3734,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Lit16, (UShort)insn[2],
TempReg, t1 );
- if (dis)
- VG_(printf)("%smxcsr %s\n", store ? "st" : "ld", dis_buf );
+ DIP("%smxcsr %s\n", store ? "st" : "ld", dis_buf );
goto decode_success;
}
@@ -3869,6 +3748,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Lit16, (((UShort)0x0F) << 8) | (UShort)0xAE,
Lit16, (UShort)insn[2] );
- if (dis)
- VG_(printf)("sfence\n");
+ DIP("sfence\n");
goto decode_success;
}
@@ -3880,5 +3758,5 @@ static Addr disInstr ( UCodeBlock* cb, A
{
vg_assert(sz == 4);
- pair = disAMode ( cb, sorb, eip+2, dis?dis_buf:NULL );
+ pair = disAMode ( cb, sorb, eip+2, dis_buf );
t1 = LOW24(pair);
eip += 2+HI8(pair);
@@ -3887,6 +3765,5 @@ static Addr disInstr ( UCodeBlock* cb, A
Lit16, (UShort)insn[2],
TempReg, t1 );
- if (dis)
- VG_(printf)("clflush %s\n", dis_buf);
+ DIP("clflush %s\n", dis_buf);
goto decode_success;
}
@@ -3953,8 +3830,6 @@ static Addr disInstr ( UCodeBlock* cb, A
TempReg, t1 );
uInstr2(cb, PUT, 4, TempReg, t1, ArchReg, gregOfRM(modrm));
- if (dis)
- VG_(printf)("cvt{t}s{s,d}2si %s, %s\n",
- nameXMMReg(eregOfRM(modrm)),
- nameIReg(4,gregOfRM(modrm))...
[truncated message content] |
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 17:10:43
|
CVS commit by nethercote:
Patch from Tom Hughes:
Patch to provide a proper environment to the debugger
Although this patch isn't strictly needed to allow alternative debuggers to
be used, it is needed if you want to use an X based debugger such as ups
(and presumably ddd) as VG_(system) has until now passed an empty
enviroment when starting the debugger but that causes DISPLAY to be lost.
This patch causes VG_(system) to pass a copy of the client environment
instead, with the necessary mashing done to clean up the LD_xxx variables.
M +5 -0 vg_include.h 1.177
M +62 -2 vg_mylibc.c 1.68
M +8 -8 vg_syscalls.c 1.82
--- valgrind/coregrind/vg_include.h #1.176:1.177
@@ -1088,4 +1088,5 @@ extern Int VG_(connect_via_socket)( UCha
/* Environment manipulations */
+extern Char **VG_(env_clone) ( Char **oldenv );
extern Char* VG_(env_getenv) ( Char **env, Char* varname );
extern Char **VG_(env_setenv) ( Char ***envp, const Char* varname, const Char *val );
@@ -1414,4 +1415,8 @@ extern Bool VG_(sysinfo_page_exists);
extern Addr VG_(sysinfo_page_addr);
+/* Walk through a colon separated list variable, removing entries
+ which match pattern. */
+extern void VG_(mash_colon_env)(Char *varp, const Char *pattern);
+
/* Something of a function looking for a home ... start up debugger. */
extern void VG_(start_debugger) ( Int tid );
--- valgrind/coregrind/vg_mylibc.c #1.67:1.68
@@ -1344,4 +1344,30 @@ Bool VG_(getcwd_alloc) ( Char** out )
------------------------------------------------------------------ */
+/* clone the environment */
+Char **VG_(env_clone) ( Char **oldenv )
+{
+ Char **oldenvp;
+ Char **newenvp;
+ Char **newenv;
+ Int envlen;
+
+ for (oldenvp = oldenv; oldenvp && *oldenvp; oldenvp++);
+
+ envlen = oldenvp - oldenv + 1;
+
+ newenv = VG_(arena_malloc)(VG_AR_CORE, envlen * sizeof(Char **));
+
+ oldenvp = oldenv;
+ newenvp = newenv;
+
+ while (oldenvp && *oldenvp) {
+ *newenvp++ = *oldenvp++;
+ }
+
+ *newenvp = *oldenvp;
+
+ return newenv;
+}
+
void VG_(env_unsetenv) ( Char **env, const Char *varname )
{
@@ -1497,5 +1523,4 @@ Int VG_(system) ( Char* cmd )
{
Int pid, res;
- void* environ[1] = { NULL };
if (cmd == NULL)
return 1;
@@ -1505,5 +1530,38 @@ Int VG_(system) ( Char* cmd )
if (pid == 0) {
/* child */
+ static Char** envp = NULL;
Char* argv[4];
+
+ if (envp == NULL) {
+ Int i;
+ Char* ld_preload_str = NULL;
+ Char* ld_library_path_str = NULL;
+ Char* buf;
+
+ envp = VG_(env_clone)(VG_(client_envp));
+
+ for (i = 0; envp[i] != NULL; i++) {
+ if (VG_(strncmp)(envp[i], "LD_PRELOAD=", 11) == 0)
+ ld_preload_str = &envp[i][11];
+ if (VG_(strncmp)(envp[i], "LD_LIBRARY_PATH=", 16) == 0)
+ ld_library_path_str = &envp[i][16];
+ }
+
+ buf = VG_(arena_malloc)(VG_AR_CORE, VG_(strlen)(VG_(libdir)) + 20);
+
+ VG_(sprintf)(buf, "%s*/vg_inject.so", VG_(libdir));
+ VG_(mash_colon_env)(ld_preload_str, buf);
+
+ VG_(sprintf)(buf, "%s*/vgpreload_*.so", VG_(libdir));
+ VG_(mash_colon_env)(ld_preload_str, buf);
+
+ VG_(sprintf)(buf, "%s*", VG_(libdir));
+ VG_(mash_colon_env)(ld_library_path_str, buf);
+
+ VG_(env_unsetenv)(envp, VALGRINDCLO);
+
+ VG_(arena_free)(VG_AR_CORE, buf);
+ }
+
argv[0] = "/bin/sh";
argv[1] = "-c";
@@ -1510,6 +1568,8 @@ Int VG_(system) ( Char* cmd )
argv[2] = cmd;
argv[3] = 0;
+
(void)VG_(do_syscall)(__NR_execve,
- (UInt)"/bin/sh", (UInt)argv, (UInt)&environ);
+ (UInt)"/bin/sh", (UInt)argv, (UInt)envp);
+
/* If we're still alive here, execve failed. */
return -1;
--- valgrind/coregrind/vg_syscalls.c #1.81:1.82
@@ -164,5 +164,5 @@ static Bool valid_client_addr(Addr start
delimited by ':' are considered to be '.' in a path.
*/
-static void mash_colon_env(Char *varp, const Char *remove_pattern)
+void VG_(mash_colon_env)(Char *varp, const Char *remove_pattern)
{
Char *const start = varp;
@@ -1870,11 +1870,11 @@ PRE(execve)
VG_(sprintf)(buf, "%s*/vg_inject.so", VG_(libdir));
- mash_colon_env(ld_preload_str, buf);
+ VG_(mash_colon_env)(ld_preload_str, buf);
VG_(sprintf)(buf, "%s*/vgpreload_*.so", VG_(libdir));
- mash_colon_env(ld_preload_str, buf);
+ VG_(mash_colon_env)(ld_preload_str, buf);
VG_(sprintf)(buf, "%s*", VG_(libdir));
- mash_colon_env(ld_library_path_str, buf);
+ VG_(mash_colon_env)(ld_library_path_str, buf);
VG_(env_unsetenv)(envp, VALGRINDCLO);
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 16:48:38
|
CVS commit by nethercote:
Patch from Tom Hughes:
Patch to allow debuggers other than GDB to be used
The patch replaces --gdb-attach and --gdb-path with --db-attach and
--db-command which are more general. The --db-command switch takes a
command string that can contain one or more instances of %p and %f markers.
The %p marker is replaced with the PID of the process to attach to and the
%f marker with the filename of the executable being attached to.
The default command is "gdb -nw %f %p" which gaves the same result as
currently.
M +10 -10 vg_errcontext.c 1.53
M +9 -6 vg_include.h 1.176
M +60 -25 vg_main.c 1.142
M +2 -2 vg_signals.c 1.59
M +21 -16 docs/coregrind_core.html 1.24
--- valgrind/coregrind/vg_errcontext.c #1.52:1.53
@@ -112,5 +112,5 @@ static void pp_Error ( Error* err, Bool
}
-/* Figure out if we want to attach for GDB for this error, possibly
+/* Figure out if we want to perform a given action for this error, possibly
by asking the user. */
Bool VG_(is_action_requested) ( Char* action, Bool* clo )
@@ -248,12 +248,12 @@ static void gen_suppression(Error* err)
static
-void do_actions_on_error(Error* err, Bool allow_GDB_attach)
+void do_actions_on_error(Error* err, Bool allow_db_attach)
{
- /* Perhaps we want a GDB attach at this point? */
- if (allow_GDB_attach &&
- VG_(is_action_requested)( "Attach to GDB", & VG_(clo_GDB_attach) ))
+ /* Perhaps we want a debugger attach at this point? */
+ if (allow_db_attach &&
+ VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) ))
{
- VG_(printf)("starting gdb\n");
- VG_(start_GDB)( err->tid );
+ VG_(printf)("starting debugger\n");
+ VG_(start_debugger)( err->tid );
}
/* Or maybe we want to generate the error's suppression? */
@@ -413,5 +413,5 @@ void VG_(maybe_record_error) ( ThreadId
is_first_shown_context = False;
vg_n_errs_shown++;
- do_actions_on_error(p, /*allow_GDB_attach*/True);
+ do_actions_on_error(p, /*allow_db_attach*/True);
} else {
vg_n_errs_suppressed++;
@@ -429,5 +429,5 @@ void VG_(maybe_record_error) ( ThreadId
Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind, Addr a, Char* s,
void* extra, ExeContext* where, Bool print_error,
- Bool allow_GDB_attach, Bool count_error )
+ Bool allow_db_attach, Bool count_error )
{
Error err;
@@ -454,5 +454,5 @@ Bool VG_(unique_error) ( ThreadId tid, E
is_first_shown_context = False;
}
- do_actions_on_error(&err, allow_GDB_attach);
+ do_actions_on_error(&err, allow_db_attach);
return False;
--- valgrind/coregrind/vg_include.h #1.175:1.176
@@ -164,4 +164,7 @@
#define VG_CLO_MAX_SFILES 10
+/* Default debugger command. */
+#define VG_CLO_DEFAULT_DBCOMMAND GDB_PATH " -nw %f %p"
+
/* Describes where logging output is to be sent. */
typedef
@@ -183,8 +186,8 @@ extern Int VG_(max_fd);
/* Should we stop collecting errors if too many appear? default: YES */
extern Bool VG_(clo_error_limit);
-/* Enquire about whether to attach to GDB at errors? default: NO */
-extern Bool VG_(clo_GDB_attach);
-/* The path to GDB? default: whatever ./configure found */
-extern Char* VG_(clo_GDB_path);
+/* Enquire about whether to attach to a debugger at errors? default: NO */
+extern Bool VG_(clo_db_attach);
+/* The debugger command? default: whatever gdb ./configure found */
+extern Char* VG_(clo_db_command);
/* Enquire about generating a suppression for each error? default: NO */
extern Bool VG_(clo_gen_suppressions);
@@ -1411,6 +1414,6 @@ extern Bool VG_(sysinfo_page_exists);
extern Addr VG_(sysinfo_page_addr);
-/* Something of a function looking for a home ... start up GDB. */
-extern void VG_(start_GDB) ( Int tid );
+/* Something of a function looking for a home ... start up debugger. */
+extern void VG_(start_debugger) ( Int tid );
/* VG_(bbs_done) in include/vg_skin.h */
--- valgrind/coregrind/vg_main.c #1.141:1.142
@@ -293,10 +293,10 @@ static void show_counts ( void )
/*====================================================================*/
-/* Start GDB and get it to attach to this process. Called if the user
- requests this service after an error has been shown, so she can
+/* Start debugger and get it to attach to this process. Called if the
+ user requests this service after an error has been shown, so she can
poke around and look at parameters, memory, etc. You can't
- meaningfully get GDB to continue the program, though; to continue,
- quit GDB. */
-void VG_(start_GDB) ( Int tid )
+ meaningfully get the debugger to continue the program, though; to
+ continue, quit the debugger. */
+void VG_(start_debugger) ( Int tid )
{
Int pid;
@@ -353,14 +353,49 @@ void VG_(start_GDB) ( Int tid )
ptrace(PTRACE_SETREGS, pid, NULL, ®s) == 0 &&
ptrace(PTRACE_DETACH, pid, NULL, SIGSTOP) == 0) {
- UChar buf[VG_(strlen)(VG_(clo_GDB_path)) + 100];
+ Char pidbuf[15];
+ Char file[30];
+ Char buf[100];
+ Char *bufptr;
+ Char *cmdptr;
- VG_(sprintf)(buf, "%s -nw /proc/%d/fd/%d %d",
- VG_(clo_GDB_path), VG_(main_pid), VG_(clexecfd), pid);
- VG_(message)(Vg_UserMsg, "starting GDB with cmd: %s", buf);
+ VG_(sprintf)(pidbuf, "%d", pid);
+ VG_(sprintf)(file, "/proc/%d/fd/%d", pid, VG_(clexecfd));
+
+ bufptr = buf;
+ cmdptr = VG_(clo_db_command);
+
+ while (*cmdptr) {
+ switch (*cmdptr) {
+ case '%':
+ switch (*++cmdptr) {
+ case 'f':
+ VG_(memcpy)(bufptr, file, VG_(strlen)(file));
+ bufptr += VG_(strlen)(file);
+ cmdptr++;
+ break;
+ case 'p':
+ VG_(memcpy)(bufptr, pidbuf, VG_(strlen)(pidbuf));
+ bufptr += VG_(strlen)(pidbuf);
+ cmdptr++;
+ break;
+ default:
+ *bufptr++ = *cmdptr++;
+ break;
+ }
+ break;
+ default:
+ *bufptr++ = *cmdptr++;
+ break;
+ }
+ }
+
+ *bufptr++ = '\0';
+
+ VG_(message)(Vg_UserMsg, "starting debugger with cmd: %s", buf);
res = VG_(system)(buf);
if (res == 0) {
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg,
- "GDB has detached. Valgrind regains control. We continue.");
+ "Debugger has detached. Valgrind regains control. We continue.");
} else {
VG_(message)(Vg_UserMsg, "Apparently failed!");
@@ -1364,6 +1399,6 @@ static void load_client(char* cl_argv[],
/* Define, and set defaults. */
Bool VG_(clo_error_limit) = True;
-Bool VG_(clo_GDB_attach) = False;
-Char* VG_(clo_GDB_path) = GDB_PATH;
+Bool VG_(clo_db_attach) = False;
+Char* VG_(clo_db_command) = VG_CLO_DEFAULT_DBCOMMAND;
Bool VG_(clo_gen_suppressions) = False;
Int VG_(sanity_level) = 1;
@@ -1473,7 +1508,7 @@ void usage ( void )
" --gen-suppressions=no|yes print suppressions for errors detected [no]\n"
-" --gdb-attach=no|yes start GDB when errors detected? [no]\n"
-" --gdb-path=/path/to/gdb path to the GDB to use [/usr/bin/gdb]\n"
-" --input-fd=<number> file descriptor for (gdb) input [0=stdin]\n"
+" --db-attach=no|yes start debugger when errors detected? [no]\n"
+" --db-command=<command> command to start debugger [gdb -nw %%f %%p]\n"
+" --input-fd=<number> file descriptor for input [0=stdin]\n"
"\n";
@@ -1656,11 +1691,11 @@ static void process_cmd_line_options
VG_(clo_error_limit) = False;
- else if (VG_CLO_STREQ(arg, "--gdb-attach=yes"))
- VG_(clo_GDB_attach) = True;
- else if (VG_CLO_STREQ(arg, "--gdb-attach=no"))
- VG_(clo_GDB_attach) = False;
+ else if (VG_CLO_STREQ(arg, "--db-attach=yes"))
+ VG_(clo_db_attach) = True;
+ else if (VG_CLO_STREQ(arg, "--db-attach=no"))
+ VG_(clo_db_attach) = False;
- else if (VG_CLO_STREQN(11,arg, "--gdb-path="))
- VG_(clo_GDB_path) = &arg[11];
+ else if (VG_CLO_STREQN(13,arg, "--db-command="))
+ VG_(clo_db_command) = &arg[13];
else if (VG_CLO_STREQ(arg, "--gen-suppressions=yes"))
@@ -1849,11 +1884,11 @@ static void process_cmd_line_options
VG_(clo_verbosity) = 0;
- if (VG_(clo_GDB_attach) && VG_(clo_trace_children)) {
+ if (VG_(clo_db_attach) && VG_(clo_trace_children)) {
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg,
- "--gdb-attach=yes conflicts with --trace-children=yes");
+ "--db-attach=yes conflicts with --trace-children=yes");
VG_(message)(Vg_UserMsg,
"Please choose one or the other, but not both.");
- VG_(bad_option)("--gdb-attach=yes and --trace-children=yes");
+ VG_(bad_option)("--db-attach=yes and --trace-children=yes");
}
--- valgrind/coregrind/vg_signals.c #1.58:1.59
@@ -1392,6 +1392,6 @@ static void vg_default_action(const vki_
}
- if (VG_(is_action_requested)( "Attach to GDB", & VG_(clo_GDB_attach) )) {
- VG_(start_GDB)( tid );
+ if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
+ VG_(start_debugger)( tid );
}
--- valgrind/coregrind/docs/coregrind_core.html #1.23:1.24
@@ -617,5 +617,5 @@
<code>---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----</code>
<p>
- The prompt's behaviour is the same as for the <code>--gdb-attach</code>
+ The prompt's behaviour is the same as for the <code>--db-attach</code>
option.
<p>
@@ -647,26 +647,26 @@
<br><p>
- <li><code>--gdb-attach=no</code> [default]<br>
- <code>--gdb-attach=yes</code>
+ <li><code>--db-attach=no</code> [default]<br>
+ <code>--db-attach=yes</code>
<p>When enabled, Valgrind will pause after every error shown,
and print the line
<br>
- <code>---- Attach to GDB ? --- [Return/N/n/Y/y/C/c] ----</code>
+ <code>---- Attach to debugger ? --- [Return/N/n/Y/y/C/c] ----</code>
<p>
Pressing <code>Ret</code>, or <code>N</code> <code>Ret</code>
or <code>n</code> <code>Ret</code>, causes Valgrind not to
- start GDB for this error.
+ start a debugger for this error.
<p>
<code>Y</code> <code>Ret</code>
or <code>y</code> <code>Ret</code> causes Valgrind to
- start GDB, for the program at this point. When you have
- finished with GDB, quit from it, and the program will continue.
- Trying to continue from inside GDB doesn't work.
+ start a debugger, for the program at this point. When you have
+ finished with the debugger, quit from it, and the program will continue.
+ Trying to continue from inside the debugger doesn't work.
<p>
<code>C</code> <code>Ret</code>
or <code>c</code> <code>Ret</code> causes Valgrind not to
- start GDB, and not to ask again.
+ start a debugger, and not to ask again.
<p>
- <code>--gdb-attach=yes</code> conflicts with
+ <code>--db-attach=yes</code> conflicts with
<code>--trace-children=yes</code>. You can't use them together.
Valgrind refuses to start up in this situation. 1 May 2002:
@@ -679,14 +679,19 @@
</li><br><p>
- <li><code>--gdb-path=/path/to/gdb</code>
- <p>This specifies how Valgrind will invoke GDB. By default, it
- will use whatever GDB is detected at build time,
+ <li><code>--db-command=<command></code> [default: gdb -nw %f %p]<br>
+ <p>This specifies how Valgrind will invoke the debugger. By
+ default it will use whatever GDB is detected at build time,
which is usually <code>/usr/bin/gdb</code>. Using this command,
- you can specify some alternative path to the GDB you want to
- use.
+ you can specify some alternative command to invoke the debugger
+ you want to use.
+ <p>
+ The command string given can include one or instances of the
+ %p and %f expansions. Each instance of %p expands to the PID of
+ the process to be debugged and each instance of %f expands to
+ the path to the executable for the process to be debugged.
</li><br><p>
<li><code>--input-fd=<number></code> [default=0, stdin]<br>
- <p>When using <code>--gdb-attach=yes</code> and
+ <p>When using <code>--db-attach=yes</code> and
<code>--gen-suppressions=yes</code>, Valgrind will stop
so as to read keyboard input from you, when each error occurs.
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 14:52:14
|
CVS commit by nethercote:
Remove some small unnecessary, out-of-date bits
M +0 -15 vg_default.c 1.21
--- valgrind/coregrind/vg_default.c #1.20:1.21
@@ -32,8 +32,4 @@
-/* These functions aren't intended to be run. Replacement functions used by
- * the chosen tool are substituted by compiling the tool into a .so and
- * LD_PRELOADing it. Nasty :) */
-
#include "vg_include.h"
@@ -67,15 +63,4 @@ void malloc_panic ( const Char* fn )
}
-#define MALLOC(proto) \
-__attribute__((weak)) \
-proto \
-{ \
- malloc_panic(__PRETTY_FUNCTION__); \
-}
-
-/* ---------------------------------------------------------------------
- Default functions
- ------------------------------------------------------------------ */
-
/*------------------------------------------------------------*/
/*--- Replacing malloc et al ---*/
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-26 14:51:16
|
CVS commit by nethercote:
Fix comment
M +2 -2 vg_scheduler.c 1.140
--- valgrind/coregrind/vg_scheduler.c #1.139:1.140
@@ -2964,6 +2964,6 @@ void do_client_request ( ThreadId tid )
the replacement versions. For those that don't, we want to call
VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
- malloc-replacing tools must replace, but have its default definition
- call */
+ malloc-replacing tools must replace, but have the default definition
+ of SK_(malloc)() call VG_(cli_malloc)(). */
/* Note: for MALLOC and FREE, must set the appropriate "lock"... see
|
|
From: Josef W. <Jos...@gm...> - 2004-01-26 14:02:56
|
On Sunday 25 January 2004 16:53, Nicholas Nethercote wrote: > Josef, > > The topic of tracking function entry/exit has come up a few times on the > mailing lists recently. My usual answer is that it's difficult to do > correctly. However, you seem to do it with Calltree. I looked at the > source code a bit, and it looks like you are doing some reasonably > complicated things to get it right, eg. unwinding the stack. How robust > is your approach? Can you briefly explain how it works? A note before describing the mechanism: I need to have a helper call at start of every BB anyway, so I use this helper to do the tracking. This of course has some overhead, and perhaps can be avoided, but it seems to add to the robustness. I have a bug fix here for reentrent entering of a signal handler (2 bug reports). Otherwise I have no bug reports, so I assume that the mechanism to be quite robust. I have a shadow call stack for every thread. For signal handlers of a thread, I first PUSH a separation marker on the shadow stack, and use the stack as normal. The marker is used for unwinding when leaving the signal handler. This is fine as there is no scheduling among signal handlers of one thread. Instrumentation of calltree: * Store at the end of each basic block the jmpkind into a tool-global, static variable. * At the start of every BB, jump to a helper function. The helper function does the following regarding function call tracking: - for a control transfer to another ELF object/ELF section, override jmpkind with a CALL (*1) - for a control transfer to the 1st basic block of a function, override jmpkind with a CALL (*2) - do unwinding if needed (i.e, POPs of the shadow call stack) - if jmpkind is RET and there was no unwinding/POP: - if our call stack is empty, simulate a CALL lasting from beginning (with Valgrind 2.1.x, this is not needed any more, as we run on simulated CPU from first client instruction) - otherwise this is a JMP using a RET instruction (typically used in the runtime linker). Do a POP, setting previous BB address to call site and override jmpkind with a CALL. By this, you get 2 function calls from a calling site. - when jmpkind is a CALL, push new function call from previous BB to current BB on shadow call stack. - Save current BB address to be available for call to handler in next BB. Special care is needed at thread switches and enter/leave of signal handlers, as we need separate shadow call stacks. Known bug: We should check for the need of unwinding when ESP is explicitly written to. I hope this doesn't create too much overhead. Remarks: (*1) Jumps between ELF objects are function calls to a shared library. This is mainly done to catch the JMP from PLT code. (*2) This is what your function tracking skin/tool does. It is needed here mainly to catch tail recursion. In general, for functions doing a "return otherfunction()", GCC produces JMPs with -O2. Additional points: - If I need a name for a function, but there is no debug info, I use the instruction address minus the load offset of the corresponding ELF object (if there is one) to get a relative address for that ELF object. This offset can be used with objdump later in postprocessing tools (e.g. objdump). I would suggest this change even for cachegrind instead of a "???". - I introduced the ability to specify functions to be "skipped". This means that execution of these functions is attributed to the calling function. The default is to skip all functions located in PLT sections. Thus, in effect, costs of PLT functions are attributed to callers, and the call to a shared library function starts directly with code in the other ELF object. - As Vg 2.1.x does pointerchecking, the instrumentation can't write to memory space of Valgrind any longer. Currently, my tool needs "--pointercheck=no" to be able to run. Jeremy and me already agreed on replacing current LD/ST with a CLD/CST (Client Load/Store) with pointer check and keep original LD/ST for tool usage without pointerchecking. Looking at these things, it seems possible to do function tracking at end of a basic block instead of the beginning of the next BB. This way, we can perhaps avoid calls to helpers at every BB. From my point of view, it would be great to integrate optional function tracking into Valgrind core with some hooks. Josef |
|
From: Jeremy F. <je...@go...> - 2004-01-26 09:40:08
|
On Sun, 2004-01-25 at 16:57, Dirk Mueller wrote: > On Monday 26 January 2004 01:16, Jeremy Fitzhardinge wrote: > > > The old code was incorrect, because it just masked out a bit without > > checking the vendor ID. That bit is only defined for AMD CPUs. > > Exactly thats why the code was correct. The application that we're emulating > has to check before calling cpuid(0x80000001) if it is in fact a AMD CPU. If > they don't, well, they get undefined behaviour anyway. No, because other vendors can use 0x80000001 for other purposes. Intel has this reserved, for example. Just clearing the bit means you could be clearing some other feature flag for another vendor. > Besides that, the behaviour is defined: those cpus which don't support the > extension return 0x00 in all registers. masking out a bit there doesn't hurt > at all. Yes it does, because other vendors can (and do) have their own definitions for those operations. > Correct. IMHO, we should only expose those which exist at all on the original > host cpu. So basically we have to blacklist a few extensions which we don't > support. But that makes no sense. At any point, one of the currently reserved bits could come into use, and a piece of software will try to use instructions which are associated with that bit - requiring an upgrade to Valgrind to make any progress at all. If we clear all the unknown bits, then the program will either behave as it would on an older CPU, or crash anyway because it uses the new instructions without checking (as it would on any other CPU which doesn't implement that extension). After all, Valgrind's intent is not to emulate your actual host CPU, but the common useful subset of ia32 implementation - which your host CPU happens to be one of. > > For the latter, they're only relevent to kernels, and not > > user-mode code, since they don't functionally change the CPU's > > characteristics for user-mode - or if they do, then we don't emulate > > that change, and so should suppress the bit. > > euhm? when does invoking the cpuid instruction change the CPU behavior? Got an > example for that? It doesn't, but it does indicate CPU behavioural changes. The only ones which user-mode programs care about are instruction-set extensions. > I think its fine for an application to be able to query CPU characteristics, > like cache sizes etc even when running under valgrind. Yes, I agree. But that isn't part of the feature bitmasks. > IMHO as long as the characteristics doesn't affect valgrind emulation in any > way: don't hide the information. Instruction set changes do affect the emulation. All the others are a big "don't care". It's trivial to copy them through. > > results. I'm very uneasy about CPUID returning partially mangled > > undefined results; we should only return stuff which we *know* to be > > true, or at worst irrelevant. > > I think it should return all the information the "real" CPU would do too, > except for those bits what we *know* to be *false* on our CPU. Well, we don't know anything about the reserved bits, since they could come to mean anything at all in future. So we have to assume that they will indicate something we can't handle when they become defined. > > The old code was fragile and broken, because it always assumed > > 0x80000001 was returning AMD feature flags. > > Ok, on which CPU does it return something else? Intel reserve that output, so it could mean anything else. > Thats why we disabled the 3dnow! capabilities. We didn't disable the SSE ones > for that reason because a lot of code uses significantly different codepaths > nowadays when the CPU supports SSE. And if those codepaths trigger a bug in > your application, you do want to be able to find this bug using valgrind. If > running under valgrind makes your application avoid those code paths that > trigger the bug, then valgrind is pointless. And we don't want that, do > we? :-) Yes, enough people use SSE that it is worthwhile supporting it. Practically nobody uses 3dnow, so nobody made the effort. For any future extensions, we'll assume that they're useless until enough people make a fuss about them to want them implemented. One program which happens to use them isn't enough. J |
|
From: Dirk M. <dm...@gm...> - 2004-01-26 00:57:27
|
On Monday 26 January 2004 01:16, Jeremy Fitzhardinge wrote: > The old code was incorrect, because it just masked out a bit without > checking the vendor ID. That bit is only defined for AMD CPUs. Exactly thats why the code was correct. The application that we're emulating has to check before calling cpuid(0x80000001) if it is in fact a AMD CPU. If they don't, well, they get undefined behaviour anyway. Besides that, the behaviour is defined: those cpus which don't support the extension return 0x00 in all registers. masking out a bit there doesn't hurt at all. Looking at the kernel sources, I don't see any other CPU implementing this cpuid feature extension. according to sandpile.org no vendor is documented to support it in a different way either, and in addition, this bit has no overloaded meaning for other vendors besides AMD. I agree though that there are bits which indicate different things depending on the vendor, but this is not the case here. But you're right, the old cpuid check could have been more careful to only mask out when the cpu was in fact a AMD one. easy fix though, no need to change the design. > There are two kinds of features: those which indicate a CPU instruction > set extension, and those which indicate some other non-ISA change. In > the first case, we know exactly which instruction-set extensions we > support, so those are the only ones which we should expose to the > client. Correct. IMHO, we should only expose those which exist at all on the original host cpu. So basically we have to blacklist a few extensions which we don't support. > For the latter, they're only relevent to kernels, and not > user-mode code, since they don't functionally change the CPU's > characteristics for user-mode - or if they do, then we don't emulate > that change, and so should suppress the bit. euhm? when does invoking the cpuid instruction change the CPU behavior? Got an example for that? I think its fine for an application to be able to query CPU characteristics, like cache sizes etc even when running under valgrind. IMHO as long as the characteristics doesn't affect valgrind emulation in any way: don't hide the information. > results. I'm very uneasy about CPUID returning partially mangled > undefined results; we should only return stuff which we *know* to be > true, or at worst irrelevant. I think it should return all the information the "real" CPU would do too, except for those bits what we *know* to be *false* on our CPU. > The old code was fragile and broken, because it always assumed > 0x80000001 was returning AMD feature flags. Ok, on which CPU does it return something else? I'm fine with disabling the manipulation of 0x80000001 when the host CPU is not an AMD one (of course). Just a matter of either another cpuid instruction, or querying a global variable which we fill during initialization. > emulation, but in the meantime the correct thing to do is say we don't > support it. Thats why we disabled the 3dnow! capabilities. We didn't disable the SSE ones for that reason because a lot of code uses significantly different codepaths nowadays when the CPU supports SSE. And if those codepaths trigger a bug in your application, you do want to be able to find this bug using valgrind. If running under valgrind makes your application avoid those code paths that trigger the bug, then valgrind is pointless. And we don't want that, do we? :-) |
|
From: Jeremy F. <je...@go...> - 2004-01-26 00:16:37
|
On Sun, 2004-01-25 at 14:53, Dirk Mueller wrote: > No of course not, but thats not what your patch was about, was it?. For > example, up to the best of my knowledge we unmasked the 3dnow! feature for > example before already. So no application correctly using cpuid was ever > going to run into 3dnow! instructions. The old code was incorrect, because it just masked out a bit without checking the vendor ID. That bit is only defined for AMD CPUs. > > If we claim > > to support SSE3 just because the underlying CPU supports it, how does > > that help anyone? > > It doesn't. But do you want to "blacklist" features which we for sure not > support, or do you want to "whitelist" features which we support? > > since its hard to say which kind of features some obscure CPU might have, I > don't think whitelisting makes sense. We know the handful of features we > don't support, so blacklist them. Leave the rest alone. Don't disturb the > emulation when there is no urgent need to do so. There are two kinds of features: those which indicate a CPU instruction set extension, and those which indicate some other non-ISA change. In the first case, we know exactly which instruction-set extensions we support, so those are the only ones which we should expose to the client. For the latter, they're only relevent to kernels, and not user-mode code, since they don't functionally change the CPU's characteristics for user-mode - or if they do, then we don't emulate that change, and so should suppress the bit. Since most of CPUID is defined in a vendor-specific way, we don't really know what it's telling the client without explicitly parsing all the results. I'm very uneasy about CPUID returning partially mangled undefined results; we should only return stuff which we *know* to be true, or at worst irrelevant. > Coincidentally, this was exactly what the old code was doing, and I'd have > liked to see some discussion first before we radically change such a > decision, especially when there was apparently no concrete bug fix pending. The old code was fragile and broken, because it always assumed 0x80000001 was returning AMD feature flags. We may as well replace it with something which does the job properly, in a moderately extensible way. Every interpretation and manipulation of the output of CPUID must be done in the context of a (vendor ID, operation code) tuple. > > The specific thing I wanted to address here was not reporting CPU > > capabilities which Valgrind doesn't implement. Since there's a new wave > > of CPUs being released with new feature bits (most importantly Intel's > > Prescott, but also AMD's Athlon64), we're going to see new feature flags > > appearing, and they need to be handled correctly. > > Sure, when they cause a known problem, which we can't fix otherwise right now, > we disable them. I for one would like to know about those problems first for > deciding if its better to disable the feature in the cpuid, or instead trying > to implement the feature in the valgrind emulation. Well, if we can get away from adding yet another instruction set extension, we should do so where-ever possible. If it's enough to say we don't support it, then that's the correct fix. It's the correct fix for extensions we do know about but don't care about (3dnow), and its the correct fix for any future extension (SSE3, etc). If we see a significant number of programs requiring, say, SSE3, then we can add the emulation, but in the meantime the correct thing to do is say we don't support it. > When you're disabling the feature in the first place, you'll never get told > about if that feature is working or not, or what you have to look at to get > it working. Disabling it is always the right first implementation. J |
|
From: Tom H. <th...@cy...> - 2004-01-25 22:56:15
|
In message <107...@ix...>
Jeremy Fitzhardinge <je...@go...> wrote:
> On Sun, 2004-01-25 at 02:12, Tom Hughes wrote:
> > We should really handle extended flags and set the MMXEXT bit as we
> > do support all the MMXEXT instructions on Athlons.
>
> Oh, I hadn't realized we were supporting some of the AMD extensions.
> Well, I guess we need to leave the vendor name unmolested.
Well it's only MMXEXT at the moment, which is actually a subset of
the original SSE extensions, but earlier Athlons had MMXEXT but not
full SSE support.
Tom
--
Tom Hughes (th...@cy...)
Software Engineer, Cyberscience Corporation
http://www.cyberscience.com/
|
|
From: Dirk M. <dm...@gm...> - 2004-01-25 22:54:05
|
On Sunday 25 January 2004 22:59, Jeremy Fitzhardinge wrote: > Those are just typos. The real issue is that should we claim to support > instruction set features which we don't actually support. No of course not, but thats not what your patch was about, was it?. For example, up to the best of my knowledge we unmasked the 3dnow! feature for example before already. So no application correctly using cpuid was ever going to run into 3dnow! instructions. > If we claim > to support SSE3 just because the underlying CPU supports it, how does > that help anyone? It doesn't. But do you want to "blacklist" features which we for sure not support, or do you want to "whitelist" features which we support? since its hard to say which kind of features some obscure CPU might have, I don't think whitelisting makes sense. We know the handful of features we don't support, so blacklist them. Leave the rest alone. Don't disturb the emulation when there is no urgent need to do so. Coincidentally, this was exactly what the old code was doing, and I'd have liked to see some discussion first before we radically change such a decision, especially when there was apparently no concrete bug fix pending. > The specific thing I wanted to address here was not reporting CPU > capabilities which Valgrind doesn't implement. Since there's a new wave > of CPUs being released with new feature bits (most importantly Intel's > Prescott, but also AMD's Athlon64), we're going to see new feature flags > appearing, and they need to be handled correctly. Sure, when they cause a known problem, which we can't fix otherwise right now, we disable them. I for one would like to know about those problems first for deciding if its better to disable the feature in the cpuid, or instead trying to implement the feature in the valgrind emulation. When you're disabling the feature in the first place, you'll never get told about if that feature is working or not, or what you have to look at to get it working. Dirk |
|
From: Eyal L. <ey...@ey...> - 2004-01-25 22:48:31
|
Pawel Kot wrote: > > On Thu, 22 Jan 2004, Marcelo Tosatti wrote: > > > What happened to this? > > > > Was valgrind fixed or its compilation is still broken? > > I have successfully built valgrind versions 2.0.0 and 2.1.0 with 2.4.24 > includes. I have explicitely copied the includes from the sources to be > sure it is correct. > > pkot@laptok:~$ gcc -v > [...] > gcc version 3.2.2 Sure. The problem only started with 2.4.25-pre. It was clear from the start that linux headers changed. [BTW, it really started with 2.4.24-pre, but this series was withdrawn and became 2.4.25-pre, hence the 2.4.24 in the title which I now changed]. The question is "who is at fault"? Are we (vg) using kernel headers incorrectly and were now caught, or did the linux people really break their headers. Is any valgrind developer testing with 2.4.25-pre? This is not an area I am familiar with, it is best handled by a vg person who knows about the use of asm/timex.h. -- Eyal Lebedinsky (ey...@ey...) <http://samba.org/eyal/> |
|
From: Jeremy F. <je...@go...> - 2004-01-25 21:59:33
|
On Sun, 2004-01-25 at 13:27, Dirk Mueller wrote: > On Sunday 25 January 2004 18:46, Julian Seward wrote: > > > I think a case can be made for both points of view. However, it > > seems to me that Jeremy's approach is reasonable enough -- in fact > > I quite like the sound of it. > > I complain less about the Vendorname change (though I find it pretty > pointless, it just introduces an emulation breakage without any good reason > for it as far as I can see), but about the "we only tell the user about those > flags we know about". There are so many cpu's out there, and so far the only > problem we had was that we don't support 3dnow!. I think it is wrong trying > to artificially limit the "features" we advertise from those of the host we > run on. Tom already found two features that were disabled with this patch > which we support just fine - there might be more. Those are just typos. The real issue is that should we claim to support instruction set features which we don't actually support. If we claim to support SSE3 just because the underlying CPU supports it, how does that help anyone? All the other feature flags are things which just don't matter to user-mode programs. They don't represent any kind of instruction set extension; they're related to other things like page-table format, power management, etc. We could pass them through, but there's no strong reason to do so. > Personally, I find it rather frustrating during debugging that whatever I'm > trying to look at is "healed" by running under valgrind. This is just another > change that purposefully breaks the emulation for (apparently) no good > reason. There are a few that are nasty, and are remaining: like syscalls > being terrible fast compared to the "normal" case, and the resulting "timing" > based races. Hm? What's this? > I haven't tested yet. But to ask another question: which problems does this > change fix? Be specific, please. > > Just give me one concrete bug that can't be fixed without that patch, and I'll > shut up. The specific thing I wanted to address here was not reporting CPU capabilities which Valgrind doesn't implement. Since there's a new wave of CPUs being released with new feature bits (most importantly Intel's Prescott, but also AMD's Athlon64), we're going to see new feature flags appearing, and they need to be handled correctly. The ValgrindVCPU thing was more a spur of the moment thing, and I'll think I'll back it out, since it obscures real information. J |
|
From: Jeremy F. <je...@go...> - 2004-01-25 21:53:19
|
On Sun, 2004-01-25 at 08:06, Nicholas Nethercote wrote: > "ValgrindVCPU" seems ok to me, for two reasons: > > 1. Theoretical: The difference between Valgrind's VCPU and the underlying > one are greater than just timing. For example, my Athlon supports 3dNow! > instructions, Valgrind doesn't. > > 2. Practical: It's hard to imagine anyone actually using the vendor string > ID in a real program; or certainly not in a way that changes any code > paths taken. Well, that's not quite true. The vendorname is what you need to look at to see what vendor-specific extensions are available (requests >0x80000000). One of the reasons for changing the vendor name is that it gives us scope to add our own vendor extensions. Unfortunately, if we're implementing vendor-specific extensions like AMD's MMXext, then we can't play this game. We need to pass through the vendor string so that clients can know what parts of the CPUID request space they can use. I still think its a good idea to suppress all the extensions which we either don't know about, or aren't relevant to user-mode programs. Also, things like the cache/TLB details are (naturally) very implementation specific, so the vendor ID will play a role there. We'll care about that when we get to self-virtualizing. J |
|
From: Jeremy F. <je...@go...> - 2004-01-25 21:41:36
|
On Sun, 2004-01-25 at 02:12, Tom Hughes wrote: > We should really handle extended flags and set the MMXEXT bit as we > do support all the MMXEXT instructions on Athlons. Oh, I hadn't realized we were supporting some of the AMD extensions. Well, I guess we need to leave the vendor name unmolested. J |
|
From: Dirk M. <dm...@gm...> - 2004-01-25 21:27:11
|
On Sunday 25 January 2004 18:46, Julian Seward wrote: > I think a case can be made for both points of view. However, it > seems to me that Jeremy's approach is reasonable enough -- in fact > I quite like the sound of it. I complain less about the Vendorname change (though I find it pretty pointless, it just introduces an emulation breakage without any good reason for it as far as I can see), but about the "we only tell the user about those flags we know about". There are so many cpu's out there, and so far the only problem we had was that we don't support 3dnow!. I think it is wrong trying to artificially limit the "features" we advertise from those of the host we run on. Tom already found two features that were disabled with this patch which we support just fine - there might be more. > If we get a lot of people complaining, > we can always back it out and/or modify it. How many people do you think will be able to track back that whatever weird behaviour they see when not running under valgrind, and is gone when running under valgrind, to exactly this change? So how do we expect to get a lot of complains? Personally, I find it rather frustrating during debugging that whatever I'm trying to look at is "healed" by running under valgrind. This is just another change that purposefully breaks the emulation for (apparently) no good reason. There are a few that are nasty, and are remaining: like syscalls being terrible fast compared to the "normal" case, and the resulting "timing" based races. Adding another one on top doesn't make things better. > > What do you gain by breaking code which you don't have the sources of > > (like for example the nvidia dri stuff) ? > Be specific -- what problem(s) is this change giving you? I haven't tested yet. But to ask another question: which problems does this change fix? Be specific, please. Just give me one concrete bug that can't be fixed without that patch, and I'll shut up. Dirk "if it ain't broken, don't fix it" |
|
From: Jeremy F. <je...@go...> - 2004-01-25 20:52:49
|
On Sun, 2004-01-25 at 10:35, Nicholas Nethercote wrote: > Attached is a my attempt to add epoll support to the CVS HEAD. I'm not > sure about the use of the "#ifdef KERNEL_2_6" in vg_syscalls.c, however. > It was my attempt to cope with "struct epoll_event" not being available in > 2.4 and earlier kernels. I would be inclined to copy the definition into vg_kerneliface.h, and make it all unconditional. If the underlying kernel doesn't support the syscall, then it will fail properly. As much as possible, I'd like to eliminate compile-time dependencies on particular kernel versions. J |
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 20:50:49
|
CVS commit by nethercote:
Update description of Robert's patches.
M +2 -2 related.html 1.5
--- devel-home/valgrind/related.html #1.4:1.5
@@ -34,6 +34,6 @@
<li>Robert Walsh has two useful
<a href="http://www.durables.org/software/valgrind/">patches</a>. One
- adds watchpoints on memory locations, the other adds file descriptor
- leak checking.
+ adds watchpoints on memory locations, the other adds support for pool-based
+ allocators.
<p>
</ul>
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 20:49:13
|
CVS commit by nethercote:
Include results of 2 late surveys.
M +29 -26 survey-summary 1.2
--- devel-home/valgrind/survey-summary #1.1:1.2
@@ -5,7 +5,7 @@
directly in the past 18 months (10 of those bounced).
-Got 114 full responses (plus 2 or 3 that gave no useful info).
+Got 116 full responses (plus 2 or 3 that gave no useful info).
-113 were in English. 1 was in French. Fortunately the French was pretty easy.
+115 were in English. 1 was in French. Fortunately the French was pretty easy.
Nationalities of the 226 people directly contacted (based on email suffixes;
@@ -156,5 +156,5 @@
private: lots of small personal projects.
-other: file format translator, job scheduling system.
+other: CAD, file format translator, job scheduling system.
unexpected: helped one guy learning C++, esp. for understanding destructors.
@@ -165,11 +165,12 @@
tell if they use both, or just consider them equivalent.
-C 54
-C++ 50
+C 56
+C++ 52
Fortran 6
Java 3
-asm 2
+asm 3
+Python 2
+TCL/TK 1
Objective C 1
-Python 1
Pike 1
ExaScript 1
@@ -184,5 +185,5 @@
two 7
~5 12
-~10 9
+~10 10
~15 4
~20 2
@@ -205,5 +206,5 @@
Raw figures:
-Memcheck 100% - 47, 99% - 5, 97% - 1, 95% - 11, 90% - 13, 80% - 8, 75% - 1,
+Memcheck 100% - 48, 99% - 5, 97% - 1, 95% - 11, 90% - 14, 80% - 8, 75% - 1,
70% - 3, 60% - 3, 50% - 2, 40% - 1, 30% - 1, 33% - 1, 25% - 1,
20% - 3, 10% - 2, used - 1
@@ -212,5 +213,5 @@
5% - 4
-Calltree 100% - 1, 80% - 2, 50% - 1, 40% - 1, 25% - 2, 20% - 7, 10% - 7,
+Calltree 100% - 1, 80% - 2, 50% - 1, 40% - 1, 25% - 2, 20% - 7, 10% - 8,
5% - 6, 1% - 1, used - 2
@@ -232,6 +233,6 @@
sum %
--- -
-Memcheck: 8920 85%
-Calltree: 641 6%
+Memcheck: 9110 85%
+Calltree: 651 6%
Addrcheck: 583 6%
Cachegrind: 234 2%
@@ -295,7 +296,7 @@
Event-based:
- when a bug occurs/suspected 41
- before releases 17
- on big changes 8
+ when a bug occurs/suspected 42
+ before releases 19
+ on big changes 9
Calltree/KCachegrind when I'm bored 2
on every change 1
@@ -319,6 +320,6 @@
command line, or via a script.
-in automated testing 11
-manually 102
+in automated testing 13
+manually 103
via script/makefile
(to avoid long command lines) 11
@@ -406,5 +407,5 @@
purify (?):
Valgrind pros:
- V easier to run 12
+ V easier to run 13
V is better 7
V has no horrible licence server 4
@@ -425,5 +426,5 @@
V finds free/mismatch errors 1
Purify pros:
- P GUI is nicer 7
+ P GUI is nicer 8
P faster 3
P allows interactive leak checks 2
@@ -661,5 +662,5 @@
usage:
ease of use/no recompilation 40
- "it works" 13
+ "it works"/"it just works" 14
more convenient to run than GDB 1
programs sometimes seg fault normally,
@@ -698,4 +699,5 @@
finds most bugs 2
finds bugs I wouldn't otherwise know about 2
+ finds uninitialised errors 2
find bugs more easily than with traditional tools 1
finds memory overruns 1
@@ -707,5 +709,4 @@
full code coverage 1
tests thing no other free software can test 1
- only way to find uninitialised errors 1
bit-level accuracy is good 1
@@ -897,4 +898,5 @@
threading/syscall msgs could be better 1
lack of type information in error messages 1
+ some errors could give more information 1
skins, usage:
@@ -915,4 +917,5 @@
code is complex 1
reinvents the wheel (viz. bochs, QEMU) 1
+ KCachegrind takes some understanding 1
10 had no complaints, 6 didn't answer, which presumably means no complaints.
@@ -936,5 +939,5 @@
# Good things about non-software stuff
-generally happy 71
+generally happy 72
[Ie. answered "yes" to the "are you happy with the way Valgrind is developed"
question. Some also had extra comments/quibbles.]
@@ -1075,6 +1078,6 @@
generally yes 5
3,2,1 total
-win32/2000/XP 3,16,15 56
-Solaris 5,12, 8 47
+win32/2000/XP 4,16,15 59
+Solaris 6,13, 8 52
OS X/Darwin 2, 5, 6 22
FreeBSD 1, 8, 4 21
@@ -1121,8 +1124,8 @@
generally yes 6
+SPARC 0, 17, 6 40
PowerPC 5, 7, 10 39
-SPARC 0, 16, 6 38
AMD-64 1, 11, 5 30
-ia64 1, 4, 4 15
+ia64 1, 5, 4 17
Power(4) 2, 3, 1 13
something 64-bit 1, 3, 0 9
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 20:34:24
|
CVS commit by nethercote:
staticalise
M +1 -1 vg_errcontext.c 1.52
--- valgrind/coregrind/vg_errcontext.c #1.51:1.52
@@ -192,5 +192,5 @@ void construct_error ( Error* err, Threa
}
-void gen_suppression(Error* err)
+static void gen_suppression(Error* err)
{
Int i;
|
|
From: Dirk M. <dm...@gm...> - 2004-01-25 19:53:40
|
On Sunday 25 January 2004 20:30, Nicholas Nethercote wrote: > Anti-globalisation > -void VG_(gen_suppression)(Error* err) > +void gen_suppression(Error* err) static? |
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 19:30:59
|
CVS commit by nethercote:
Anti-globalisation
M +2 -2 vg_errcontext.c 1.51
M +0 -2 vg_include.h 1.175
--- valgrind/coregrind/vg_errcontext.c #1.50:1.51
@@ -192,5 +192,5 @@ void construct_error ( Error* err, Threa
}
-void VG_(gen_suppression)(Error* err)
+void gen_suppression(Error* err)
{
Int i;
@@ -260,5 +260,5 @@ void do_actions_on_error(Error* err, Boo
if (VG_(is_action_requested)( "Print suppression",
& VG_(clo_gen_suppressions) )) {
- VG_(gen_suppression)(err);
+ gen_suppression(err);
}
}
--- valgrind/coregrind/vg_include.h #1.174:1.175
@@ -1282,6 +1282,4 @@ extern void VG_(show_all_errors) (
extern Bool VG_(is_action_requested) ( Char* action, Bool* clo );
-extern void VG_(gen_suppression) ( Error* err );
-
extern UInt VG_(n_errs_found);
|
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 19:07:10
|
On Sat, 24 Jan 2004, Julian Seward wrote: > I think we should remove support for --stop-after. Attached patch does so. N |
|
From: Nicholas N. <nj...@ca...> - 2004-01-25 18:35:08
|
On Tue, 20 Jan 2004, Tom Hughes wrote: > Mukund has confirmed to me that it is only normal kernel locking that > he is talking about, so I think it is only epoll_wait that we need to > consider as blocking. Attached is a my attempt to add epoll support to the CVS HEAD. I'm not sure about the use of the "#ifdef KERNEL_2_6" in vg_syscalls.c, however. It was my attempt to cope with "struct epoll_event" not being available in 2.4 and earlier kernels. Also, I'm not super confident about the checking of the arguments; my patch is based on the patch at www.fefe.de/diffs/, but I changed the argument checking a little bit, because I don't think it was correct. Finally, I haven't actually tried it because my machine is still running a 2.4 kernel... N |