You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
|
1
(6) |
|
2
(4) |
3
(9) |
4
(11) |
5
(16) |
6
(6) |
7
(1) |
8
(11) |
|
9
(11) |
10
(6) |
11
(10) |
12
(23) |
13
(23) |
14
(6) |
15
(10) |
|
16
(5) |
17
(13) |
18
(9) |
19
(4) |
20
(6) |
21
(16) |
22
(3) |
|
23
(5) |
24
(7) |
25
(6) |
26
(4) |
27
(8) |
28
|
29
(3) |
|
30
(2) |
31
(17) |
|
|
|
|
|
|
From: <sv...@va...> - 2015-08-04 21:26:18
|
Author: florian
Date: Tue Aug 4 22:26:10 2015
New Revision: 15486
Log:
Fix printf format inconsistencies as pointed out by gcc -Wformat-signedness.
Modified:
trunk/memcheck/mc_errors.c
trunk/memcheck/mc_leakcheck.c
trunk/memcheck/mc_main.c
trunk/memcheck/mc_malloc_wrappers.c
Modified: trunk/memcheck/mc_errors.c
==============================================================================
--- trunk/memcheck/mc_errors.c (original)
+++ trunk/memcheck/mc_errors.c Tue Aug 4 22:26:10 2015
@@ -375,8 +375,8 @@
lr->num_blocks, d_num_blocks,
str_leak_lossmode(lr->key.state),
n_this_record, n_total_records );
- emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
- emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
+ emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
+ emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
emit( " </xwhat>\n" );
}
VG_(pp_ExeContext)(lr->key.allocated_at);
@@ -439,7 +439,7 @@
MC_(any_value_errors) = True;
if (xml) {
emit( " <kind>UninitValue</kind>\n" );
- emit( " <what>Use of uninitialised value of size %ld</what>\n",
+ emit( " <what>Use of uninitialised value of size %lu</what>\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
@@ -448,7 +448,7 @@
} else {
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
- emit( "Use of uninitialised value of size %ld\n",
+ emit( "Use of uninitialised value of size %lu\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
@@ -594,7 +594,7 @@
if (xml) {
emit( " <kind>Invalid%s</kind>\n",
extra->Err.Addr.isWrite ? "Write" : "Read" );
- emit( " <what>Invalid %s of size %ld</what>\n",
+ emit( " <what>Invalid %s of size %lu</what>\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
@@ -602,7 +602,7 @@
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
} else {
- emit( "Invalid %s of size %ld\n",
+ emit( "Invalid %s of size %lu\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
Modified: trunk/memcheck/mc_leakcheck.c
==============================================================================
--- trunk/memcheck/mc_leakcheck.c (original)
+++ trunk/memcheck/mc_leakcheck.c Tue Aug 4 22:26:10 2015
@@ -1875,11 +1875,11 @@
&& addr_in_reg < searched_wpa + searched_szB) {
if (addr_in_reg == searched_wpa)
VG_(umsg)
- ("tid %d register %s pointing at %#lx\n",
+ ("tid %u register %s pointing at %#lx\n",
tid, regname, searched_wpa);
else
VG_(umsg)
- ("tid %d register %s interior pointing %lu bytes inside %#lx\n",
+ ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
tid, regname, (long unsigned) addr_in_reg - searched_wpa,
searched_wpa);
}
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 22:26:10 2015
@@ -6122,7 +6122,7 @@
VG_(printf) ("\n");
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
@@ -6272,11 +6272,11 @@
case -1: break;
case 0: /* addressable */
if (is_mem_addressable ( address, szB, &bad_addr ))
- VG_(printf) ("Address %p len %ld addressable\n",
+ VG_(printf) ("Address %p len %lu addressable\n",
(void *)address, szB);
else
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
MC_(pp_describe_addr) (address);
break;
@@ -6284,7 +6284,7 @@
res = is_mem_defined ( address, szB, &bad_addr, &otag );
if (MC_AddrErr == res)
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
else if (MC_ValueErr == res) {
okind = otag & 3;
@@ -6300,7 +6300,7 @@
default: tl_assert(0);
}
VG_(printf)
- ("Address %p len %ld not defined:\n"
+ ("Address %p len %lu not defined:\n"
"Uninitialised value at %p%s\n",
(void *)address, szB, (void *) bad_addr, src);
ecu = otag & ~3;
@@ -6310,7 +6310,7 @@
}
}
else
- VG_(printf) ("Address %p len %ld defined\n",
+ VG_(printf) ("Address %p len %lu defined\n",
(void *)address, szB);
MC_(pp_describe_addr) (address);
break;
@@ -6390,7 +6390,7 @@
gdb_xb (address + szB - szB % 8, szB % 8, res);
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
@@ -7392,7 +7392,7 @@
static void print_SM_info(const HChar* type, Int n_SMs)
{
VG_(message)(Vg_DebugMsg,
- " memcheck: SMs: %s = %d (%ldk, %ldM)\n",
+ " memcheck: SMs: %s = %d (%luk, %luM)\n",
type,
n_SMs,
n_SMs * sizeof(SecMap) / 1024UL,
@@ -7409,18 +7409,18 @@
" memcheck: sanity checks: %d cheap, %d expensive\n",
n_sanity_cheap, n_sanity_expensive );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use\n",
+ " memcheck: auxmaps: %llu auxmap entries (%lluk, %lluM) in use\n",
n_auxmap_L2_nodes,
n_auxmap_L2_nodes * 64,
n_auxmap_L2_nodes / 16 );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10\n",
+ " memcheck: auxmaps_L1: %llu searches, %llu cmps, ratio %llu:10\n",
n_auxmap_L1_searches, n_auxmap_L1_cmps,
(10ULL * n_auxmap_L1_cmps)
/ (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
);
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L2: %lld searches, %lld nodes\n",
+ " memcheck: auxmaps_L2: %llu searches, %llu nodes\n",
n_auxmap_L2_searches, n_auxmap_L2_nodes
);
@@ -7444,7 +7444,7 @@
max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
VG_(message)(Vg_DebugMsg,
- " memcheck: max sec V bit nodes: %d (%ldk, %ldM)\n",
+ " memcheck: max sec V bit nodes: %d (%luk, %luM)\n",
max_secVBit_nodes, max_secVBit_szB / 1024,
max_secVBit_szB / (1024 * 1024));
VG_(message)(Vg_DebugMsg,
@@ -7452,7 +7452,7 @@
sec_vbits_new_nodes + sec_vbits_updates,
sec_vbits_new_nodes, sec_vbits_updates );
VG_(message)(Vg_DebugMsg,
- " memcheck: max shadow mem size: %ldk, %ldM\n",
+ " memcheck: max shadow mem size: %luk, %luM\n",
max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
if (MC_(clo_mc_level) >= 3) {
@@ -7472,8 +7472,8 @@
stats_ocacheL1_found_at_N,
stats_ocacheL1_movefwds );
VG_(message)(Vg_DebugMsg,
- " ocacheL1: %'12lu sizeB %'12u useful\n",
- (UWord)sizeof(OCache),
+ " ocacheL1: %'12lu sizeB %'12d useful\n",
+ (SizeT)sizeof(OCache),
4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
VG_(message)(Vg_DebugMsg,
" ocacheL2: %'12lu refs %'12lu misses\n",
Modified: trunk/memcheck/mc_malloc_wrappers.c
==============================================================================
--- trunk/memcheck/mc_malloc_wrappers.c (original)
+++ trunk/memcheck/mc_malloc_wrappers.c Tue Aug 4 22:26:10 2015
@@ -680,7 +680,7 @@
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
+ VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %u, %d)\n",
pool, rzB, is_zeroed);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
@@ -779,7 +779,7 @@
}
VG_(message)(Vg_UserMsg,
- "Total mempools active: %d pools, %d chunks\n",
+ "Total mempools active: %u pools, %u chunks\n",
total_pools, total_chunks);
tick = 0;
}
@@ -792,7 +792,7 @@
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data > chunks[i+1]->data) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d is out of order "
+ "Mempool chunk %u / %u is out of order "
"wrt. its successor\n",
i+1, n_chunks);
bad = 1;
@@ -803,7 +803,7 @@
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d overlaps with its successor\n",
+ "Mempool chunk %u / %u overlaps with its successor\n",
i+1, n_chunks);
bad = 1;
}
@@ -811,11 +811,11 @@
if (bad) {
VG_(message)(Vg_UserMsg,
- "Bad mempool (%d chunks), dumping chunks for inspection:\n",
+ "Bad mempool (%u chunks), dumping chunks for inspection:\n",
n_chunks);
for (i = 0; i < n_chunks; ++i) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d: %ld bytes "
+ "Mempool chunk %u / %u: %lu bytes "
"[%lx,%lx), allocated:\n",
i+1,
n_chunks,
@@ -834,7 +834,7 @@
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@@ -884,7 +884,7 @@
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
- "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
+ "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
pool, addr, mc->szB + 0UL);
}
@@ -902,7 +902,7 @@
VgHashNode** chunks;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@@ -1036,7 +1036,7 @@
ThreadId tid = VG_(get_running_tid)();
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
pool, addrA, addrB, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
|
|
From: Florian K. <fl...@ei...> - 2015-08-04 20:58:29
|
On 04.08.2015 20:22, Matthias Schwarzott wrote: > Am 04.08.2015 um 17:58 schrieb sv...@va...: >> Author: florian >> Date: Tue Aug 4 16:58:41 2015 >> New Revision: 15482 >> >> Log: >> Rewrite memcheck's event counter machinery. >> - unique event counters >> - simplify PROF_EVENT >> - do not allocate more event counters than needed >> - compile cleanly >> >> Modified: >> trunk/memcheck/mc_include.h >> trunk/memcheck/mc_leakcheck.c >> trunk/memcheck/mc_main.c >> > > Hi Florian, > > this is definitely a nice solution. > If you do not like the two lists - enum constant definition and name > assignment - to be maintained, there is a possible improvement: > Have one header file with custom macro calls to define the events and > include it twice with different macro definitions. > Matthias, yes, that would have been an alternative implementation, more concise. Now that you mention it I remember having seen this trick being used t in the GCC source tree. But I did not think of it at the time. And now I'm too lazy to change it. :) That being said, I'm not opposed to receiving a patch. Note, there is a safe guard in the code. Should somebody add an enumerator and forget to define a corresponding event counter name, there will be an assert. Florian |
|
From: Philippe W. <phi...@sk...> - 2015-08-04 19:27:20
|
On Tue, 2015-08-04 at 08:35 +0200, Matthias Schwarzott wrote: > * Fix missing compiler options: [PATCH 1/2] Fix compilation of libvex > tests when additional compiler options are needed. This is committed as revision 15485. Thanks for the patch. Philippe |
|
From: <sv...@va...> - 2015-08-04 19:26:00
|
Author: philippe
Date: Tue Aug 4 20:25:53 2015
New Revision: 15485
Log:
Ensure libvex tests in none are compiling on amd64, when configuring for 32 bits only
Patch from Matthias Schwarzott
Modified:
trunk/none/tests/Makefile.am
Modified: trunk/none/tests/Makefile.am
==============================================================================
--- trunk/none/tests/Makefile.am (original)
+++ trunk/none/tests/Makefile.am Tue Aug 4 20:25:53 2015
@@ -276,10 +276,10 @@
nestedfns_CFLAGS = $(AM_CFLAGS)
mq_LDADD = -lrt
endif
-libvex_test_CFLAGS = @FLAG_FSANITIZE@
+libvex_test_CFLAGS = $(AM_CFLAGS)@FLAG_FSANITIZE@
libvex_test_LDADD = ../../VEX/libvex-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a \
@LIB_UBSAN@
-libvexmultiarch_test_CFLAGS= @FLAG_FSANITIZE@
+libvexmultiarch_test_CFLAGS= $(AM_CFLAGS) @FLAG_FSANITIZE@
libvexmultiarch_test_LDADD = \
../../VEX/libvexmultiarch-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a \
../../VEX/libvex-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a @LIB_UBSAN@
|
|
From: <sv...@va...> - 2015-08-04 19:12:12
|
Author: philippe
Date: Tue Aug 4 20:12:05 2015
New Revision: 15484
Log:
Refine NEWS entry for memcheck xb monitor command
Modified:
trunk/NEWS
Modified: trunk/NEWS
==============================================================================
--- trunk/NEWS (original)
+++ trunk/NEWS Tue Aug 4 20:12:05 2015
@@ -16,9 +16,9 @@
- A new monitor command 'xb <addr> <len>' shows the validity bits
of <len> bytes at <addr>. Below the validity bits, the byte
values are shown using a layout similar to the GDB command
- 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
- (in particular on little endian computers) when you need to associate
- byte data value with their corresponding validity bits.
+ 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use than
+ get_vbits (in particular on little endian computers) when you need to
+ associate byte data value with their corresponding validity bits.
* Massif:
- New monitor command 'all_snapshots <filename>' that dumps all snapshots
|
|
From: <sv...@va...> - 2015-08-04 19:11:11
|
Author: philippe
Date: Tue Aug 4 20:11:03 2015
New Revision: 15483
Log:
* Memcheck:
- A new monitor command 'xb <addr> <len>' shows the validity bits
of <len> bytes at <addr>. Below the validity bits, the byte
values are shown using a layout similar to the GDB command
'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
(in particular on little endian computers) when you need to associate
byte data value with their corresponding validity bits.
Modified:
trunk/NEWS
trunk/gdbserver_tests/mchelp.stdoutB.exp
trunk/memcheck/docs/mc-manual.xml
trunk/memcheck/mc_main.c
Modified: trunk/NEWS
==============================================================================
--- trunk/NEWS (original)
+++ trunk/NEWS Tue Aug 4 20:11:03 2015
@@ -13,6 +13,12 @@
* ==================== TOOL CHANGES ====================
* Memcheck:
+ - A new monitor command 'xb <addr> <len>' shows the validity bits
+ of <len> bytes at <addr>. Below the validity bits, the byte
+ values are shown using a layout similar to the GDB command
+ 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
+ (in particular on little endian computers) when you need to associate
+ byte data value with their corresponding validity bits.
* Massif:
- New monitor command 'all_snapshots <filename>' that dumps all snapshots
Modified: trunk/gdbserver_tests/mchelp.stdoutB.exp
==============================================================================
--- trunk/gdbserver_tests/mchelp.stdoutB.exp (original)
+++ trunk/gdbserver_tests/mchelp.stdoutB.exp Tue Aug 4 20:11:03 2015
@@ -13,10 +13,14 @@
v.set merge-recursive-frames <num> : merge recursive calls in max <num> frames
v.set vgdb-error <errornr> : debug me at error >= <errornr>
memcheck monitor commands:
- get_vbits <addr> [<len>]
- returns validity bits for <len> (or 1) bytes at <addr>
+ xb <addr> [<len>]
+ prints validity bits for <len> (or 1) bytes at <addr>
bit values 0 = valid, 1 = invalid, __ = unaddressable byte
- Example: get_vbits 0x........ 10
+ Then prints the bytes values below the corresponding validity bits
+ in a layout similar to the gdb command 'x /<len>xb <addr>'
+ Example: xb 0x........ 10
+ get_vbits <addr> [<len>]
+ Similar to xb, but only prints the validity bytes by group of 4.
make_memory [noaccess|undefined
|defined|Definedifaddressable] <addr> [<len>]
mark <len> (or 1) bytes at <addr> with the given accessibility
@@ -73,10 +77,14 @@
(default traceflags 0b00100000 : show after instrumentation)
An additional flag 0b100000000 allows to show gdbserver instrumentation
memcheck monitor commands:
- get_vbits <addr> [<len>]
- returns validity bits for <len> (or 1) bytes at <addr>
+ xb <addr> [<len>]
+ prints validity bits for <len> (or 1) bytes at <addr>
bit values 0 = valid, 1 = invalid, __ = unaddressable byte
- Example: get_vbits 0x........ 10
+ Then prints the bytes values below the corresponding validity bits
+ in a layout similar to the gdb command 'x /<len>xb <addr>'
+ Example: xb 0x........ 10
+ get_vbits <addr> [<len>]
+ Similar to xb, but only prints the validity bytes by group of 4.
make_memory [noaccess|undefined
|defined|Definedifaddressable] <addr> [<len>]
mark <len> (or 1) bytes at <addr> with the given accessibility
Modified: trunk/memcheck/docs/mc-manual.xml
==============================================================================
--- trunk/memcheck/docs/mc-manual.xml (original)
+++ trunk/memcheck/docs/mc-manual.xml Tue Aug 4 20:11:03 2015
@@ -1648,36 +1648,51 @@
<itemizedlist>
<listitem>
- <para><varname>get_vbits <addr> [<len>]</varname>
- shows the definedness (V) bits for <len> (default 1) bytes
- starting at <addr>. The definedness of each byte in the
- range is given using two hexadecimal digits. These hexadecimal
- digits encode the validity of each bit of the corresponding byte,
- using 0 if the bit is defined and 1 if the bit is undefined.
- If a byte is not addressable, its validity bits are replaced
- by <varname>__</varname> (a double underscore).
+ <para><varname>xb <addr> [<len>]</varname>
+ shows the definedness (V) bits and values for <len> (default 1)
+ bytes starting at <addr>.
+ For each 8 bytes, two lines are output.
+ </para>
+ <para>
+ The first line shows the validity bits for 8 bytes.
+ The definedness of each byte in the range is given using two hexadecimal
+ digits. These hexadecimal digits encode the validity of each bit of the
+ corresponding byte,
+ using 0 if the bit is defined and 1 if the bit is undefined.
+ If a byte is not addressable, its validity bits are replaced
+ by <varname>__</varname> (a double underscore).
+ </para>
+ <para>
+ The second line shows the values of the bytes below the corresponding
+ validity bits. The format used to show the bytes data is similar to the
+ GDB command 'x /<len>xb <addr>'. The value for a non
+ addressable bytes is shown as ?? (two question marks).
</para>
<para>
- In the following example, <varname>string10</varname> is an array
- of 10 characters, in which the even numbered bytes are
- undefined. In the below example, the byte corresponding
- to <varname>string10[5]</varname> is not addressable.
+ In the following example, <varname>string10</varname> is an array
+ of 10 characters, in which the even numbered bytes are
+ undefined. In the below example, the byte corresponding
+ to <varname>string10[5]</varname> is not addressable.
</para>
<programlisting><![CDATA[
(gdb) p &string10
-$4 = (char (*)[10]) 0x8049e28
-(gdb) monitor get_vbits 0x8049e28 10
-ff00ff00 ff__ff00 ff00
-(gdb)
+$4 = (char (*)[10]) 0x804a2f0
+(gdb) mo xb 0x804a2f0 10
+ ff 00 ff 00 ff __ ff 00
+0x804A2F0: 0x3f 0x6e 0x3f 0x65 0x3f 0x?? 0x3f 0x65
+ ff 00
+0x804A2F8: 0x3f 0x00
+Address 0x804A2F0 len 10 has 1 bytes unaddressable
+(gdb)
]]></programlisting>
- <para> The command get_vbits cannot be used with registers. To get
- the validity bits of a register, you must start Valgrind with the
- option <option>--vgdb-shadow-registers=yes</option>. The validity
- bits of a register can be obtained by printing the 'shadow 1'
- corresponding register. In the below x86 example, the register
- eax has all its bits undefined, while the register ebx is fully
- defined.
+ <para> The command xb cannot be used with registers. To get
+ the validity bits of a register, you must start Valgrind with the
+ option <option>--vgdb-shadow-registers=yes</option>. The validity
+ bits of a register can then be obtained by printing the 'shadow 1'
+ corresponding register. In the below x86 example, the register
+ eax has all its bits undefined, while the register ebx is fully
+ defined.
</para>
<programlisting><![CDATA[
(gdb) p /x $eaxs1
@@ -1690,6 +1705,31 @@
</listitem>
<listitem>
+ <para><varname>get_vbits <addr> [<len>]</varname>
+ shows the definedness (V) bits for <len> (default 1) bytes
+ starting at <addr> using the same convention as the
+ <varname>xb</varname> command. <varname>get_vbits</varname> only
+ shows the V bits (grouped by 4 bytes). It does not show the values.
+ If you want to associate V bits with the corresponding byte values, the
+ <varname>xb</varname> command will be easier to use, in particular
+ on little endian computers when associating undefined parts of an integer
+ with their V bits values.
+ </para>
+ <para>
+ The following example shows the result of <varname>get_vibts</varname>
+ on the <varname>string10</varname> used in the <varname>xb</varname>
+ command explanation.
+ </para>
+<programlisting><![CDATA[
+(gdb) monitor get_vbits 0x804a2f0 10
+ff00ff00 ff__ff00 ff00
+Address 0x804A2F0 len 10 has 1 bytes unaddressable
+(gdb)
+]]></programlisting>
+
+ </listitem>
+
+ <listitem>
<para><varname>make_memory
[noaccess|undefined|defined|Definedifaddressable] <addr>
[<len>]</varname> marks the range of <len> (default 1)
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 20:11:03 2015
@@ -6010,10 +6010,14 @@
(
"\n"
"memcheck monitor commands:\n"
-" get_vbits <addr> [<len>]\n"
-" returns validity bits for <len> (or 1) bytes at <addr>\n"
+" xb <addr> [<len>]\n"
+" prints validity bits for <len> (or 1) bytes at <addr>\n"
" bit values 0 = valid, 1 = invalid, __ = unaddressable byte\n"
-" Example: get_vbits 0x8049c78 10\n"
+" Then prints the bytes values below the corresponding validity bits\n"
+" in a layout similar to the gdb command 'x /<len>xb <addr>'\n"
+" Example: xb 0x8049c78 10\n"
+" get_vbits <addr> [<len>]\n"
+" Similar to xb, but only prints the validity bytes by group of 4.\n"
" make_memory [noaccess|undefined\n"
" |defined|Definedifaddressable] <addr> [<len>]\n"
" mark <len> (or 1) bytes at <addr> with the given accessibility\n"
@@ -6043,6 +6047,28 @@
"\n");
}
+/* Print szB bytes at address, with a format similar to the gdb command
+ x /<szB>xb address.
+ res[i] == 1 indicates the corresponding byte is addressable. */
+static void gdb_xb (Addr address, SizeT szB, Int res[])
+{
+ UInt i;
+
+ for (i = 0; i < szB; i++) {
+ UInt bnr = i % 8;
+ if (bnr == 0) {
+ if (i != 0)
+ VG_(printf) ("\n"); // Terminate previous line
+ VG_(printf) ("%p:", (void*)(address+i));
+ }
+ if (res[i] == 1)
+ VG_(printf) ("\t0x%02x", *(UChar*)(address+i));
+ else
+ VG_(printf) ("\t0x??");
+ }
+ VG_(printf) ("\n"); // Terminate previous line
+}
+
/* return True if request recognised, False otherwise */
static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
{
@@ -6058,7 +6084,7 @@
command. This ensures a shorter abbreviation for the user. */
switch (VG_(keyword_id)
("help get_vbits leak_check make_memory check_memory "
- "block_list who_points_at",
+ "block_list who_points_at xb",
wcmd, kwd_report_duplicated_matches)) {
case -2: /* multiple matches */
return True;
@@ -6191,8 +6217,8 @@
else if (int_value > 0)
lcp.max_loss_records_output = (UInt) int_value;
else
- VG_(gdb_printf) ("max_loss_records_output must be >= 1, got %d\n",
- int_value);
+ VG_(gdb_printf) ("max_loss_records_output must be >= 1,"
+ " got %d\n", int_value);
break;
}
default:
@@ -6303,7 +6329,8 @@
if (wl == NULL || *endptr != '\0') {
VG_(gdb_printf) ("malformed or missing integer\n");
} else {
- // lr_nr-1 as what is shown to the user is 1 more than the index in lr_array.
+ /* lr_nr-1 as what is shown to the user is 1 more than the index
+ in lr_array. */
if (lr_nr == 0 || ! MC_(print_block_list) (lr_nr-1))
VG_(gdb_printf) ("invalid loss record nr\n");
}
@@ -6324,6 +6351,52 @@
return True;
}
+ case 7: { /* xb */
+ Addr address;
+ SizeT szB = 1;
+ if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
+ UChar vbits[8];
+ Int res[8];
+ Int i;
+ Int unaddressable = 0;
+ for (i = 0; i < szB; i++) {
+ Int bnr = i % 8;
+ res[bnr] = mc_get_or_set_vbits_for_client
+ (address+i, (Addr) &vbits[bnr], 1,
+ False, /* get them */
+ False /* is client request */ );
+ /* We going to print the first vabits of a new line.
+ Terminate the previous line if needed: prints a line with the
+ address and the data. */
+ if (bnr == 0) {
+ if (i != 0) {
+ VG_(printf) ("\n");
+ gdb_xb (address + i - 8, 8, res);
+ }
+ VG_(printf) ("\t"); // To align VABITS with gdb_xb layout
+ }
+ if (res[bnr] == 1) {
+ VG_(printf) ("\t %02x", vbits[bnr]);
+ } else {
+ tl_assert(3 == res[bnr]);
+ unaddressable++;
+ VG_(printf) ("\t __");
+ }
+ }
+ VG_(printf) ("\n");
+ if (szB % 8 == 0 && szB > 0)
+ gdb_xb (address + szB - 8, 8, res);
+ else
+ gdb_xb (address + szB - szB % 8, szB % 8, res);
+ if (unaddressable) {
+ VG_(printf)
+ ("Address %p len %ld has %d bytes unaddressable\n",
+ (void *)address, szB, unaddressable);
+ }
+ }
+ return True;
+ }
+
default:
tl_assert(0);
return False;
|
|
From: Matthias S. <zz...@ge...> - 2015-08-04 18:23:02
|
Am 04.08.2015 um 17:58 schrieb sv...@va...:
> Author: florian
> Date: Tue Aug 4 16:58:41 2015
> New Revision: 15482
>
> Log:
> Rewrite memcheck's event counter machinery.
> - unique event counters
> - simplify PROF_EVENT
> - do not allocate more event counters than needed
> - compile cleanly
>
> Modified:
> trunk/memcheck/mc_include.h
> trunk/memcheck/mc_leakcheck.c
> trunk/memcheck/mc_main.c
>
Hi Florian,
this is definitely a nice solution.
If you do not like the two lists - enum constant definition and name
assignment - to be maintained, there is a possible improvement:
Have one header file with custom macro calls to define the events and
include it twice with different macro definitions.
The alternative version with only one macro argument is added in comments.
mc_prof_events.h:
DEFINE_PROF_EVENT(MCPE_LOADV8, "LOADV8")
// or: DEFINE_PROF_EVENT(LOADV8)
...
mc_include.h:
#define DEFINE_PROF_EVENT(a,b) a,
// or: #define DEFINE_PROF_EVENT(a) MCPE_##a,
enum {
# include "mc_prof_events.h"
/* Do not add enumerators past this line. */
MCPE_LAST
};
#undef DEFINE_PROF_EVENT
mc_main.c:
#define DEFINE_PROF_EVENT(a,b) [a] = b,
// or: #define DEFINE_PROF_EVENT(a) [MCPE_##a] = #a,
static const HChar* MC_(event_ctr_name)[MCPE_LAST] =
{
# include "mc_prof_events.h"
};
#undef DEFINE_PROF_EVENT
Regards
Matthias
|
|
From: <sv...@va...> - 2015-08-04 15:58:51
|
Author: florian
Date: Tue Aug 4 16:58:41 2015
New Revision: 15482
Log:
Rewrite memcheck's event counter machinery.
- unique event counters
- simplify PROF_EVENT
- do not allocate more event counters than needed
- compile cleanly
Modified:
trunk/memcheck/mc_include.h
trunk/memcheck/mc_leakcheck.c
trunk/memcheck/mc_main.c
Modified: trunk/memcheck/mc_include.h
==============================================================================
--- trunk/memcheck/mc_include.h (original)
+++ trunk/memcheck/mc_include.h Tue Aug 4 16:58:41 2015
@@ -207,26 +207,129 @@
/* Define to collect detailed performance info. */
/* #define MC_PROFILE_MEMORY */
-
#ifdef MC_PROFILE_MEMORY
-# define N_PROF_EVENTS 500
-
-UInt MC_(event_ctr)[N_PROF_EVENTS];
-HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
-# define PROF_EVENT(ev, name) \
- do { tl_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
- /* crude and inaccurate check to ensure the same */ \
- /* event isn't being used with > 1 name */ \
- if (MC_(event_ctr_name)[ev]) \
- tl_assert(name == MC_(event_ctr_name)[ev]); \
- MC_(event_ctr)[ev]++; \
- MC_(event_ctr_name)[ev] = (name); \
+/* Order of enumerators does not matter. But MCPE_LAST has to be the
+ last entry in the list as it is used as an array bound. */
+enum {
+ MCPE_LOADV8,
+ MCPE_LOADV8_SLOW1,
+ MCPE_LOADV8_SLOW2,
+ MCPE_LOADV16,
+ MCPE_LOADV16_SLOW1,
+ MCPE_LOADV16_SLOW2,
+ MCPE_LOADV32,
+ MCPE_LOADV32_SLOW1,
+ MCPE_LOADV32_SLOW2,
+ MCPE_LOADV64,
+ MCPE_LOADV64_SLOW1,
+ MCPE_LOADV64_SLOW2,
+ MCPE_LOADV_128_OR_256,
+ MCPE_LOADV_128_OR_256_SLOW_LOOP,
+ MCPE_LOADV_128_OR_256_SLOW1,
+ MCPE_LOADV_128_OR_256_SLOW2,
+ MCPE_LOADVN_SLOW,
+ MCPE_LOADVN_SLOW_LOOP,
+ MCPE_STOREV8,
+ MCPE_STOREV8_SLOW1,
+ MCPE_STOREV8_SLOW2,
+ MCPE_STOREV8_SLOW3,
+ MCPE_STOREV8_SLOW4,
+ MCPE_STOREV16,
+ MCPE_STOREV16_SLOW1,
+ MCPE_STOREV16_SLOW2,
+ MCPE_STOREV16_SLOW3,
+ MCPE_STOREV16_SLOW4,
+ MCPE_STOREV32,
+ MCPE_STOREV32_SLOW1,
+ MCPE_STOREV32_SLOW2,
+ MCPE_STOREV32_SLOW3,
+ MCPE_STOREV32_SLOW4,
+ MCPE_STOREV64,
+ MCPE_STOREV64_SLOW1,
+ MCPE_STOREV64_SLOW2,
+ MCPE_STOREV64_SLOW3,
+ MCPE_STOREV64_SLOW4,
+ MCPE_STOREVN_SLOW,
+ MCPE_STOREVN_SLOW_LOOP,
+ MCPE_MAKE_ALIGNED_WORD32_UNDEFINED,
+ MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW,
+ MCPE_MAKE_ALIGNED_WORD64_UNDEFINED,
+ MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW,
+ MCPE_MAKE_ALIGNED_WORD32_NOACCESS,
+ MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW,
+ MCPE_MAKE_ALIGNED_WORD64_NOACCESS,
+ MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW,
+ MCPE_MAKE_MEM_NOACCESS,
+ MCPE_MAKE_MEM_UNDEFINED,
+ MCPE_MAKE_MEM_UNDEFINED_W_OTAG,
+ MCPE_MAKE_MEM_DEFINED,
+ MCPE_CHEAP_SANITY_CHECK,
+ MCPE_EXPENSIVE_SANITY_CHECK,
+ MCPE_COPY_ADDRESS_RANGE_STATE,
+ MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1,
+ MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2,
+ MCPE_CHECK_MEM_IS_NOACCESS,
+ MCPE_CHECK_MEM_IS_NOACCESS_LOOP,
+ MCPE_IS_MEM_ADDRESSABLE,
+ MCPE_IS_MEM_ADDRESSABLE_LOOP,
+ MCPE_IS_MEM_DEFINED,
+ MCPE_IS_MEM_DEFINED_LOOP,
+ MCPE_IS_MEM_DEFINED_COMPREHENSIVE,
+ MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP,
+ MCPE_IS_DEFINED_ASCIIZ,
+ MCPE_IS_DEFINED_ASCIIZ_LOOP,
+ MCPE_FIND_CHUNK_FOR_OLD,
+ MCPE_FIND_CHUNK_FOR_OLD_LOOP,
+ MCPE_SET_ADDRESS_RANGE_PERMS,
+ MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP,
+ MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP,
+ MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM,
+ MCPE_NEW_MEM_STACK,
+ MCPE_NEW_MEM_STACK_4,
+ MCPE_NEW_MEM_STACK_8,
+ MCPE_NEW_MEM_STACK_12,
+ MCPE_NEW_MEM_STACK_16,
+ MCPE_NEW_MEM_STACK_32,
+ MCPE_NEW_MEM_STACK_112,
+ MCPE_NEW_MEM_STACK_128,
+ MCPE_NEW_MEM_STACK_144,
+ MCPE_NEW_MEM_STACK_160,
+ MCPE_DIE_MEM_STACK,
+ MCPE_DIE_MEM_STACK_4,
+ MCPE_DIE_MEM_STACK_8,
+ MCPE_DIE_MEM_STACK_12,
+ MCPE_DIE_MEM_STACK_16,
+ MCPE_DIE_MEM_STACK_32,
+ MCPE_DIE_MEM_STACK_112,
+ MCPE_DIE_MEM_STACK_128,
+ MCPE_DIE_MEM_STACK_144,
+ MCPE_DIE_MEM_STACK_160,
+ /* Do not add enumerators past this line. */
+ MCPE_LAST
+};
+
+extern UInt MC_(event_ctr)[MCPE_LAST];
+
+# define PROF_EVENT(ev) \
+ do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST); \
+ MC_(event_ctr)[ev]++; \
} while (False);
#else
-# define PROF_EVENT(ev, name) /* */
+# define PROF_EVENT(ev) /* */
#endif /* MC_PROFILE_MEMORY */
Modified: trunk/memcheck/mc_leakcheck.c
==============================================================================
--- trunk/memcheck/mc_leakcheck.c (original)
+++ trunk/memcheck/mc_leakcheck.c Tue Aug 4 16:58:41 2015
@@ -265,9 +265,9 @@
{
Int i;
Addr a_lo, a_hi;
- PROF_EVENT(70, "find_chunk_for_OLD");
+ PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
for (i = 0; i < n_chunks; i++) {
- PROF_EVENT(71, "find_chunk_for_OLD(loop)");
+ PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD_LOOP);
a_lo = chunks[i]->data;
a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
if (a_lo <= ptr && ptr < a_hi)
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 16:58:41 2015
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
/*--------------------------------------------------------------------*/
/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
@@ -1235,7 +1236,7 @@
ULong pessim64 = V_BITS64_DEFINED;
UWord long_index = byte_offset_w(szL, bigendian, j);
for (i = 8-1; i >= 0; i--) {
- PROF_EVENT(29, "mc_LOADV_128_or_256_slow(loop)");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW_LOOP);
ai = a + 8*long_index + byte_offset_w(8, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
@@ -1316,7 +1317,7 @@
this function may get called from hand written assembly. */
ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
{
- PROF_EVENT(30, "mc_LOADVn_slow");
+ PROF_EVENT(MCPE_LOADVN_SLOW);
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
@@ -1370,7 +1371,7 @@
info can be gleaned from pessim64) but is used as a
cross-check. */
for (i = szB-1; i >= 0; i--) {
- PROF_EVENT(31, "mc_LOADVn_slow(loop)");
+ PROF_EVENT(MCPE_LOADVN_SLOW_LOOP);
ai = a + byte_offset_w(szB, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
@@ -1468,7 +1469,7 @@
Addr ai;
Bool ok;
- PROF_EVENT(35, "mc_STOREVn_slow");
+ PROF_EVENT(MCPE_STOREVN_SLOW);
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
@@ -1530,7 +1531,7 @@
/* Dump vbytes in memory, iterating from least to most significant
byte. At the same time establish addressibility of the location. */
for (i = 0; i < szB; i++) {
- PROF_EVENT(36, "mc_STOREVn_slow(loop)");
+ PROF_EVENT(MCPE_STOREVN_SLOW_LOOP);
ai = a + byte_offset_w(szB, bigendian, i);
vbits8 = vbytes & 0xff;
ok = set_vbits8(ai, vbits8);
@@ -1559,7 +1560,7 @@
SecMap** sm_ptr;
SecMap* example_dsm;
- PROF_EVENT(150, "set_address_range_perms");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS);
/* Check the V+A bits make sense. */
tl_assert(VA_BITS16_NOACCESS == vabits16 ||
@@ -1637,19 +1638,19 @@
len_to_next_secmap = aNext - a;
if ( lenT <= len_to_next_secmap ) {
// Range entirely within one sec-map. Covers almost all cases.
- PROF_EVENT(151, "set_address_range_perms-single-secmap");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP);
lenA = lenT;
lenB = 0;
} else if (is_start_of_sm(a)) {
// Range spans at least one whole sec-map, and starts at the beginning
// of a sec-map; skip to Part 2.
- PROF_EVENT(152, "set_address_range_perms-startof-secmap");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP);
lenA = 0;
lenB = lenT;
goto part2;
} else {
// Range spans two or more sec-maps, first one is partial.
- PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS);
lenA = len_to_next_secmap;
lenB = lenT - lenA;
}
@@ -1666,11 +1667,11 @@
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so skip.
- PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK);
a = aNext;
lenA = 0;
} else {
- PROF_EVENT(155, "set_address_range_perms-dist-sm1");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1);
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
@@ -1680,7 +1681,7 @@
while (True) {
if (VG_IS_8_ALIGNED(a)) break;
if (lenA < 1) break;
- PROF_EVENT(156, "set_address_range_perms-loop1a");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1689,7 +1690,7 @@
// 8-aligned, 8 byte steps
while (True) {
if (lenA < 8) break;
- PROF_EVENT(157, "set_address_range_perms-loop8a");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
@@ -1698,7 +1699,7 @@
// 1 byte steps
while (True) {
if (lenA < 1) break;
- PROF_EVENT(158, "set_address_range_perms-loop1b");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1719,10 +1720,10 @@
while (True) {
if (lenB < SM_SIZE) break;
tl_assert(is_start_of_sm(a));
- PROF_EVENT(159, "set_address_range_perms-loop64K");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K);
sm_ptr = get_secmap_ptr(a);
if (!is_distinguished_sm(*sm_ptr)) {
- PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM);
// Free the non-distinguished sec-map that we're replacing. This
// case happens moderately often, enough to be worthwhile.
SysRes sres = VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
@@ -1750,10 +1751,10 @@
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so stop.
- PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK);
return;
} else {
- PROF_EVENT(162, "set_address_range_perms-dist-sm2");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2);
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
@@ -1762,7 +1763,7 @@
// 8-aligned, 8 byte steps
while (True) {
if (lenB < 8) break;
- PROF_EVENT(163, "set_address_range_perms-loop8b");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
@@ -1771,7 +1772,7 @@
// 1 byte steps
while (True) {
if (lenB < 1) return;
- PROF_EVENT(164, "set_address_range_perms-loop1c");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1784,7 +1785,7 @@
void MC_(make_mem_noaccess) ( Addr a, SizeT len )
{
- PROF_EVENT(40, "MC_(make_mem_noaccess)");
+ PROF_EVENT(MCPE_MAKE_MEM_NOACCESS);
DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1793,14 +1794,14 @@
static void make_mem_undefined ( Addr a, SizeT len )
{
- PROF_EVENT(41, "make_mem_undefined");
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED);
DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
}
void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
{
- PROF_EVENT(43, "MC_(make_mem_undefined)");
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED_W_OTAG);
DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1837,7 +1838,7 @@
void MC_(make_mem_defined) ( Addr a, SizeT len )
{
- PROF_EVENT(42, "MC_(make_mem_defined)");
+ PROF_EVENT(MCPE_MAKE_MEM_DEFINED);
DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1897,7 +1898,7 @@
Bool aligned, nooverlap;
DEBUG("MC_(copy_address_range_state)\n");
- PROF_EVENT(50, "MC_(copy_address_range_state)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE);
if (len == 0 || src == dst)
return;
@@ -1947,7 +1948,7 @@
/* We have to do things the slow way */
if (src < dst) {
for (i = 0, j = len-1; i < len; i++, j--) {
- PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1);
vabits2 = get_vabits2( src+j );
set_vabits2( dst+j, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
@@ -1958,7 +1959,7 @@
if (src > dst) {
for (i = 0; i < len; i++) {
- PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2);
vabits2 = get_vabits2( src+i );
set_vabits2( dst+i, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
@@ -2606,7 +2607,7 @@
static INLINE void make_aligned_word32_undefined ( Addr a )
{
- PROF_EVENT(300, "make_aligned_word32_undefined");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_UNDEFINED);
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 4);
@@ -2616,7 +2617,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW);
make_mem_undefined(a, 4);
return;
}
@@ -2649,7 +2650,7 @@
static INLINE
void make_aligned_word32_noaccess ( Addr a )
{
- PROF_EVENT(310, "make_aligned_word32_noaccess");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_NOACCESS);
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 4);
@@ -2659,7 +2660,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW);
MC_(make_mem_noaccess)(a, 4);
return;
}
@@ -2690,7 +2691,7 @@
static INLINE void make_aligned_word64_undefined ( Addr a )
{
- PROF_EVENT(320, "make_aligned_word64_undefined");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_UNDEFINED);
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 8);
@@ -2700,7 +2701,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW);
make_mem_undefined(a, 8);
return;
}
@@ -2734,7 +2735,7 @@
static INLINE
void make_aligned_word64_noaccess ( Addr a )
{
- PROF_EVENT(330, "make_aligned_word64_noaccess");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_NOACCESS);
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 8);
@@ -2744,7 +2745,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW);
MC_(make_mem_noaccess)(a, 8);
return;
}
@@ -2786,7 +2787,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(110, "new_mem_stack_4");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
} else {
@@ -2797,7 +2798,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
{
- PROF_EVENT(110, "new_mem_stack_4");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else {
@@ -2808,7 +2809,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
{
- PROF_EVENT(120, "die_mem_stack_4");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
@@ -2822,7 +2823,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(111, "new_mem_stack_8");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2836,7 +2837,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
{
- PROF_EVENT(111, "new_mem_stack_8");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2850,7 +2851,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
{
- PROF_EVENT(121, "die_mem_stack_8");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2867,7 +2868,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_12_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(112, "new_mem_stack_12");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -2885,7 +2886,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
{
- PROF_EVENT(112, "new_mem_stack_12");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -2903,7 +2904,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
{
- PROF_EVENT(122, "die_mem_stack_12");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_12);
/* Note the -12 in the test */
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
/* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
@@ -2927,7 +2928,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_16_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(113, "new_mem_stack_16");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
@@ -2946,7 +2947,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
{
- PROF_EVENT(113, "new_mem_stack_16");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
@@ -2965,7 +2966,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
{
- PROF_EVENT(123, "die_mem_stack_16");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
@@ -2986,7 +2987,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_32_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(114, "new_mem_stack_32");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
@@ -3009,7 +3010,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
{
- PROF_EVENT(114, "new_mem_stack_32");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
@@ -3032,7 +3033,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
{
- PROF_EVENT(124, "die_mem_stack_32");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
@@ -3058,7 +3059,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_112_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(115, "new_mem_stack_112");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
@@ -3082,7 +3083,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
{
- PROF_EVENT(115, "new_mem_stack_112");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3106,7 +3107,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
{
- PROF_EVENT(125, "die_mem_stack_112");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
@@ -3133,7 +3134,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_128_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(116, "new_mem_stack_128");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
@@ -3159,7 +3160,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
{
- PROF_EVENT(116, "new_mem_stack_128");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3185,7 +3186,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
{
- PROF_EVENT(126, "die_mem_stack_128");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
@@ -3214,7 +3215,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_144_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(117, "new_mem_stack_144");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -3242,7 +3243,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
{
- PROF_EVENT(117, "new_mem_stack_144");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3270,7 +3271,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
{
- PROF_EVENT(127, "die_mem_stack_144");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
@@ -3301,7 +3302,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_160_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(118, "new_mem_stack_160");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -3331,7 +3332,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
{
- PROF_EVENT(118, "new_mem_stack_160");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3361,7 +3362,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
{
- PROF_EVENT(128, "die_mem_stack_160");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
@@ -3393,19 +3394,19 @@
static void mc_new_mem_stack_w_ECU ( Addr a, SizeT len, UInt ecu )
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(115, "new_mem_stack_w_otag");
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + a, len, otag );
}
static void mc_new_mem_stack ( Addr a, SizeT len )
{
- PROF_EVENT(115, "new_mem_stack");
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
make_mem_undefined ( -VG_STACK_REDZONE_SZB + a, len );
}
static void mc_die_mem_stack ( Addr a, SizeT len )
{
- PROF_EVENT(125, "die_mem_stack");
+ PROF_EVENT(MCPE_DIE_MEM_STACK);
MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
}
@@ -3759,9 +3760,9 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(60, "check_mem_is_noaccess");
+ PROF_EVENT(MCPE_CHECK_MEM_IS_NOACCESS);
for (i = 0; i < len; i++) {
- PROF_EVENT(61, "check_mem_is_noaccess(loop)");
+ PROF_EVENT(MCPE_CHECK_MEM_IS_NOACCESS_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS != vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -3778,9 +3779,9 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(62, "is_mem_addressable");
+ PROF_EVENT(MCPE_IS_MEM_ADDRESSABLE);
for (i = 0; i < len; i++) {
- PROF_EVENT(63, "is_mem_addressable(loop)");
+ PROF_EVENT(MCPE_IS_MEM_ADDRESSABLE_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS == vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -3798,13 +3799,13 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(64, "is_mem_defined");
+ PROF_EVENT(MCPE_IS_MEM_DEFINED);
DEBUG("is_mem_defined\n");
if (otag) *otag = 0;
if (bad_addr) *bad_addr = 0;
for (i = 0; i < len; i++) {
- PROF_EVENT(65, "is_mem_defined(loop)");
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
@@ -3851,13 +3852,13 @@
UWord vabits2;
Bool already_saw_errV = False;
- PROF_EVENT(64, "is_mem_defined"); // fixme
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_COMPREHENSIVE);
DEBUG("is_mem_defined_comprehensive\n");
tl_assert(!(*errorV || *errorA));
for (i = 0; i < len; i++) {
- PROF_EVENT(65, "is_mem_defined(loop)"); // fixme
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP);
vabits2 = get_vabits2(a);
switch (vabits2) {
case VA_BITS2_DEFINED:
@@ -3896,13 +3897,13 @@
{
UWord vabits2;
- PROF_EVENT(66, "mc_is_defined_asciiz");
+ PROF_EVENT(MCPE_IS_DEFINED_ASCIIZ);
DEBUG("mc_is_defined_asciiz\n");
if (otag) *otag = 0;
if (bad_addr) *bad_addr = 0;
while (True) {
- PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
+ PROF_EVENT(MCPE_IS_DEFINED_ASCIIZ_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
@@ -4436,7 +4437,7 @@
void mc_LOADV_128_or_256 ( /*OUT*/ULong* res,
Addr a, SizeT nBits, Bool isBigEndian )
{
- PROF_EVENT(200, "mc_LOADV_128_or_256");
+ PROF_EVENT(MCPE_LOADV_128_OR_256);
#ifndef PERF_FAST_LOADV
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
@@ -4449,7 +4450,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,nBits) )) {
- PROF_EVENT(201, "mc_LOADV_128_or_256-slow1");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW1);
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
return;
}
@@ -4470,7 +4471,7 @@
} else {
/* Slow case: some block of 8 bytes are not all-defined or
all-undefined. */
- PROF_EVENT(202, "mc_LOADV_128_or_256-slow2");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW2);
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
return;
}
@@ -4505,7 +4506,7 @@
static INLINE
ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(200, "mc_LOADV64");
+ PROF_EVENT(MCPE_LOADV64);
#ifndef PERF_FAST_LOADV
return mc_LOADVn_slow( a, 64, isBigEndian );
@@ -4515,7 +4516,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
- PROF_EVENT(201, "mc_LOADV64-slow1");
+ PROF_EVENT(MCPE_LOADV64_SLOW1);
return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
}
@@ -4532,7 +4533,7 @@
return V_BITS64_UNDEFINED;
} else {
/* Slow case: the 8 bytes are not all-defined or all-undefined. */
- PROF_EVENT(202, "mc_LOADV64-slow2");
+ PROF_EVENT(MCPE_LOADV64_SLOW2);
return mc_LOADVn_slow( a, 64, isBigEndian );
}
}
@@ -4636,7 +4637,7 @@
static INLINE
void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
{
- PROF_EVENT(210, "mc_STOREV64");
+ PROF_EVENT(MCPE_STOREV64);
#ifndef PERF_FAST_STOREV
// XXX: this slow case seems to be marginally faster than the fast case!
@@ -4648,7 +4649,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
- PROF_EVENT(211, "mc_STOREV64-slow1");
+ PROF_EVENT(MCPE_STOREV64_SLOW1);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
@@ -4667,7 +4668,7 @@
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV64-slow2");
+ PROF_EVENT(MCPE_STOREV64_SLOW2);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
@@ -4679,12 +4680,12 @@
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV64-slow3");
+ PROF_EVENT(MCPE_STOREV64_SLOW3);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
- PROF_EVENT(212, "mc_STOREV64-slow4");
+ PROF_EVENT(MCPE_STOREV64_SLOW4);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
}
#endif
@@ -4706,7 +4707,7 @@
static INLINE
UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(220, "mc_LOADV32");
+ PROF_EVENT(MCPE_LOADV32);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
@@ -4716,7 +4717,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
- PROF_EVENT(221, "mc_LOADV32-slow1");
+ PROF_EVENT(MCPE_LOADV32_SLOW1);
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
@@ -4735,7 +4736,7 @@
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
} else {
/* Slow case: the 4 bytes are not all-defined or all-undefined. */
- PROF_EVENT(222, "mc_LOADV32-slow2");
+ PROF_EVENT(MCPE_LOADV32_SLOW2);
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
}
@@ -4832,7 +4833,7 @@
static INLINE
void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
{
- PROF_EVENT(230, "mc_STOREV32");
+ PROF_EVENT(MCPE_STOREV32);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
@@ -4842,7 +4843,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
- PROF_EVENT(231, "mc_STOREV32-slow1");
+ PROF_EVENT(MCPE_STOREV32_SLOW1);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
@@ -4861,7 +4862,7 @@
sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV32-slow2");
+ PROF_EVENT(MCPE_STOREV32_SLOW2);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
@@ -4873,12 +4874,12 @@
sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
return;
}
- PROF_EVENT(233, "mc_STOREV32-slow3");
+ PROF_EVENT(MCPE_STOREV32_SLOW3);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
- PROF_EVENT(234, "mc_STOREV32-slow4");
+ PROF_EVENT(MCPE_STOREV32_SLOW4);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
}
#endif
@@ -4900,7 +4901,7 @@
static INLINE
UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(240, "mc_LOADV16");
+ PROF_EVENT(MCPE_LOADV16);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
@@ -4910,7 +4911,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
- PROF_EVENT(241, "mc_LOADV16-slow1");
+ PROF_EVENT(MCPE_LOADV16_SLOW1);
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
@@ -4930,7 +4931,7 @@
else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
else {
/* Slow case: the two bytes are not all-defined or all-undefined. */
- PROF_EVENT(242, "mc_LOADV16-slow2");
+ PROF_EVENT(MCPE_LOADV16_SLOW2);
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
}
@@ -5070,7 +5071,7 @@
static INLINE
void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
{
- PROF_EVENT(250, "mc_STOREV16");
+ PROF_EVENT(MCPE_STOREV16);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
@@ -5080,7 +5081,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
- PROF_EVENT(251, "mc_STOREV16-slow1");
+ PROF_EVENT(MCPE_STOREV16_SLOW1);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
return;
}
@@ -5101,7 +5102,7 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(232, "mc_STOREV16-slow2");
+ PROF_EVENT(MCPE_STOREV16_SLOW2);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
}
if (V_BITS16_UNDEFINED == vbits16) {
@@ -5114,12 +5115,12 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(233, "mc_STOREV16-slow3");
+ PROF_EVENT(MCPE_STOREV16_SLOW3);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
return;
}
- PROF_EVENT(234, "mc_STOREV16-slow4");
+ PROF_EVENT(MCPE_STOREV16_SLOW4);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
}
#endif
@@ -5238,7 +5239,7 @@
VG_REGPARM(1)
UWord MC_(helperc_LOADV8) ( Addr a )
{
- PROF_EVENT(260, "mc_LOADV8");
+ PROF_EVENT(MCPE_LOADV8);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
@@ -5248,7 +5249,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
- PROF_EVENT(261, "mc_LOADV8-slow1");
+ PROF_EVENT(MCPE_LOADV8_SLOW1);
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
@@ -5268,7 +5269,7 @@
else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
else {
/* Slow case: the byte is not all-defined or all-undefined. */
- PROF_EVENT(262, "mc_LOADV8-slow2");
+ PROF_EVENT(MCPE_LOADV8_SLOW2);
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
}
@@ -5284,7 +5285,7 @@
VG_REGPARM(2)
void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
{
- PROF_EVENT(270, "mc_STOREV8");
+ PROF_EVENT(MCPE_STOREV8);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
@@ -5294,7 +5295,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
- PROF_EVENT(271, "mc_STOREV8-slow1");
+ PROF_EVENT(MCPE_STOREV8_SLOW1);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
@@ -5350,7 +5351,7 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(232, "mc_STOREV8-slow2");
+ PROF_EVENT(MCPE_STOREV8_SLOW2);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
@@ -5366,13 +5367,13 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(233, "mc_STOREV8-slow3");
+ PROF_EVENT(MCPE_STOREV8_SLOW3);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
// Partially defined word
- PROF_EVENT(234, "mc_STOREV8-slow4");
+ PROF_EVENT(MCPE_STOREV8_SLOW4);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
}
#endif
@@ -5583,7 +5584,7 @@
static Bool mc_cheap_sanity_check ( void )
{
n_sanity_cheap++;
- PROF_EVENT(490, "cheap_sanity_check");
+ PROF_EVENT(MCPE_CHEAP_SANITY_CHECK);
/* Check for sane operating level */
if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
return False;
@@ -5603,7 +5604,7 @@
if (0) return True;
n_sanity_expensive++;
- PROF_EVENT(491, "expensive_sanity_check");
+ PROF_EVENT(MCPE_EXPENSIVE_SANITY_CHECK);
/* Check for sane operating level */
if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
@@ -6672,33 +6673,159 @@
#ifdef MC_PROFILE_MEMORY
-UInt MC_(event_ctr)[N_PROF_EVENTS];
-HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
+UInt MC_(event_ctr)[MCPE_LAST];
+
+/* Event counter names. Use the name of the function that increases the
+ event counter. Drop any MC_() and mc_ prefices. */
+static const HChar* MC_(event_ctr_name)[MCPE_LAST] = {
+ [MCPE_LOADVN_SLOW] = "LOADVn_slow",
+ [MCPE_LOADVN_SLOW_LOOP] = "LOADVn_slow_loop",
+ [MCPE_STOREVN_SLOW] = "STOREVn_slow",
+ [MCPE_STOREVN_SLOW_LOOP] = "STOREVn_slow(loop)",
+ [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED] = "make_aligned_word32_undefined",
+ [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW] =
+ "make_aligned_word32_undefined_slow",
+ [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED] = "make_aligned_word64_undefined",
+ [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW] =
+ "make_aligned_word64_undefined_slow",
+ [MCPE_MAKE_ALIGNED_WORD32_NOACCESS] = "make_aligned_word32_noaccess",
+ [MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW] =
+ "make_aligned_word32_noaccess_slow",
+ [MCPE_MAKE_ALIGNED_WORD64_NOACCESS] = "make_aligned_word64_noaccess",
+ [MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW] =
+ "make_aligned_word64_noaccess_slow",
+ [MCPE_MAKE_MEM_NOACCESS] = "make_mem_noaccess",
+ [MCPE_MAKE_MEM_UNDEFINED] = "make_mem_undefined",
+ [MCPE_MAKE_MEM_UNDEFINED_W_OTAG] = "make_mem_undefined_w_otag",
+ [MCPE_MAKE_MEM_DEFINED] = "make_mem_defined",
+ [MCPE_CHEAP_SANITY_CHECK] = "cheap_sanity_check",
+ [MCPE_EXPENSIVE_SANITY_CHECK] = "expensive_sanity_check",
+ [MCPE_COPY_ADDRESS_RANGE_STATE] = "copy_address_range_state",
+ [MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1] = "copy_address_range_state(loop1)",
+ [MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2] = "copy_address_range_state(loop2)",
+ [MCPE_CHECK_MEM_IS_NOACCESS] = "check_mem_is_noaccess",
+ [MCPE_CHECK_MEM_IS_NOACCESS_LOOP] = "check_mem_is_noaccess(loop)",
+ [MCPE_IS_MEM_ADDRESSABLE] = "is_mem_addressable",
+ [MCPE_IS_MEM_ADDRESSABLE_LOOP] = "is_mem_addressable(loop)",
+ [MCPE_IS_MEM_DEFINED] = "is_mem_defined",
+ [MCPE_IS_MEM_DEFINED_LOOP] = "is_mem_defined(loop)",
+ [MCPE_IS_MEM_DEFINED_COMPREHENSIVE] = "is_mem_defined_comprehensive",
+ [MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP] =
+ "is_mem_defined_comprehensive(loop)",
+ [MCPE_IS_DEFINED_ASCIIZ] = "is_defined_asciiz",
+ [MCPE_IS_DEFINED_ASCIIZ_LOOP] = "is_defined_asciiz(loop)",
+ [MCPE_FIND_CHUNK_FOR_OLD] = "find_chunk_for_OLD",
+ [MCPE_FIND_CHUNK_FOR_OLD_LOOP] = "find_chunk_for_OLD(loop)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS] = "set_address_range_perms",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP] =
+ "set_address_range_perms(single-secmap)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP] =
+ "set_address_range_perms(startof-secmap)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS] =
+ "set_address_range_perms(multiple-secmaps)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1] =
+ "set_address_range_perms(dist-sm1)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2] =
+ "set_address_range_perms(dist-sm2)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK] =
+ "set_address_range_perms(dist-sm1-quick)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK] =
+ "set_address_range_perms(dist-sm2-quick)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A] = "set_address_range_perms(loop1a)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B] = "set_address_range_perms(loop1b)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C] = "set_address_range_perms(loop1c)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A] = "set_address_range_perms(loop8a)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B] = "set_address_range_perms(loop8b)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K] = "set_address_range_perms(loop64K)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM] =
+ "set_address_range_perms(loop64K-free-dist-sm)",
+ [MCPE_LOADV_128_OR_256_SLOW_LOOP] = "LOADV_128_or_256_slow(loop)",
+ [MCPE_LOADV_128_OR_256] = "LOADV_128_or_256",
+ [MCPE_LOADV_128_OR_256_SLOW1] = "LOADV_128_or_256-slow1",
+ [MCPE_LOADV_128_OR_256_SLOW2] = "LOADV_128_or_256-slow2",
+ [MCPE_LOADV64] = "LOADV64",
+ [MCPE_LOADV64_SLOW1] = "LOADV64-slow1",
+ [MCPE_LOADV64_SLOW2] = "LOADV64-slow2",
+ [MCPE_STOREV64] = "STOREV64",
+ [MCPE_STOREV64_SLOW1] = "STOREV64-slow1",
+ [MCPE_STOREV64_SLOW2] = "STOREV64-slow2",
+ [MCPE_STOREV64_SLOW3] = "STOREV64-slow3",
+ [MCPE_STOREV64_SLOW4] = "STOREV64-slow4",
+ [MCPE_LOADV32] = "LOADV32",
+ [MCPE_LOADV32_SLOW1] = "LOADV32-slow1",
+ [MCPE_LOADV32_SLOW2] = "LOADV32-slow2",
+ [MCPE_STOREV32] = "STOREV32",
+ [MCPE_STOREV32_SLOW1] = "STOREV32-slow1",
+ [MCPE_STOREV32_SLOW2] = "STOREV32-slow2",
+ [MCPE_STOREV32_SLOW3] = "STOREV32-slow3",
+ [MCPE_STOREV32_SLOW4] = "STOREV32-slow4",
+ [MCPE_LOADV16] = "LOADV16",
+ [MCPE_LOADV16_SLOW1] = "LOADV16-slow1",
+ [MCPE_LOADV16_SLOW2] = "LOADV16-slow2",
+ [MCPE_STOREV16] = "STOREV16",
+ [MCPE_STOREV16_SLOW1] = "STOREV16-slow1",
+ [MCPE_STOREV16_SLOW2] = "STOREV16-slow2",
+ [MCPE_STOREV16_SLOW3] = "STOREV16-slow3",
+ [MCPE_STOREV16_SLOW4] = "STOREV16-slow4",
+ [MCPE_LOADV8] = "LOADV8",
+ [MCPE_LOADV8_SLOW1] = "LOADV8-slow1",
+ [MCPE_LOADV8_SLOW2] = "LOADV8-slow2",
+ [MCPE_STOREV8] = "STOREV8",
+ [MCPE_STOREV8_SLOW1] = "STOREV8-slow1",
+ [MCPE_STOREV8_SLOW2] = "STOREV8-slow2",
+ [MCPE_STOREV8_SLOW3] = "STOREV8-slow3",
+ [MCPE_STOREV8_SLOW4] = "STOREV8-slow4",
+ [MCPE_NEW_MEM_STACK_4] = "new_mem_stack_4",
+ [MCPE_NEW_MEM_STACK_8] = "new_mem_stack_8",
+ [MCPE_NEW_MEM_STACK_12] = "new_mem_stack_12",
+ [MCPE_NEW_MEM_STACK_16] = "new_mem_stack_16",
+ [MCPE_NEW_MEM_STACK_32] = "new_mem_stack_32",
+ [MCPE_NEW_MEM_STACK_112] = "new_mem_stack_112",
+ [MCPE_NEW_MEM_STACK_128] = "new_mem_stack_128",
+ [MCPE_NEW_MEM_STACK_144] = "new_mem_stack_144",
+ [MCPE_NEW_MEM_STACK_160] = "new_mem_stack_160",
+ [MCPE_DIE_MEM_STACK_4] = "die_mem_stack_4",
+ [MCPE_DIE_MEM_STACK_8] = "die_mem_stack_8",
+ [MCPE_DIE_MEM_STACK_12] = "die_mem_stack_12",
+ [MCPE_DIE_MEM_STACK_16] = "die_mem_stack_16",
+ [MCPE_DIE_MEM_STACK_32] = "die_mem_stack_32",
+ [MCPE_DIE_MEM_STACK_112] = "die_mem_stack_112",
+ [MCPE_DIE_MEM_STACK_128] = "die_mem_stack_128",
+ [MCPE_DIE_MEM_STACK_144] = "die_mem_stack_144",
+ [MCPE_DIE_MEM_STACK_160] = "die_mem_stack_160",
+ [MCPE_NEW_MEM_STACK] = "new_mem_stack",
+ [MCPE_DIE_MEM_STACK] = "die_mem_stack",
+};
static void init_prof_mem ( void )
{
- Int i;
- for (i = 0; i < N_PROF_EVENTS; i++) {
+ Int i, name_count = 0;
+
+ for (i = 0; i < MCPE_LAST; i++) {
MC_(event_ctr)[i] = 0;
- MC_(event_ctr_name)[i] = NULL;
+ if (MC_(event_ctr_name)[i] != NULL)
+ ++name_count;
}
+
+ /* Make sure every profiling event has a name */
+ tl_assert(name_count == MCPE_LAST);
}
static void done_prof_mem ( void )
{
- Int i;
+ Int i, n;
Bool spaced = False;
- for (i = 0; i < N_PROF_EVENTS; i++) {
- if (!spaced && (i % 10) == 0) {
+ for (i = n = 0; i < MCPE_LAST; i++) {
+ if (!spaced && (n % 10) == 0) {
VG_(printf)("\n");
spaced = True;
}
if (MC_(event_ctr)[i] > 0) {
spaced = False;
- VG_(printf)( "prof mem event %3d: %9d %s\n",
+ ++n;
+ VG_(printf)( "prof mem event %3d: %9u %s\n",
i, MC_(event_ctr)[i],
- MC_(event_ctr_name)[i]
- ? MC_(event_ctr_name)[i] : "unnamed");
+ MC_(event_ctr_name)[i]);
}
}
}
|
|
From: Florian K. <fl...@ei...> - 2015-08-04 07:08:03
|
On 04.08.2015 03:37, Zhi-Gang Liu wrote:> Sorry, it failed for the
Makefile consistency check. I will fix it.
Yes, that has been fixed. However, I also said:
> On Mon, Aug 3, 2015 at 5:33 AM, Florian Krohm <fl...@ei...>
wrote:
>
>>
>> Also make sure that "make dist BUILD_ALL_DOCS=no" runs successfully
to completion.
This is broken:
$ make dist BUILD_ALL_DOCS=no
make dist-bzip2 dist-gzip am__post_remove_distdir='@:'
make[1]: Entering directory `/home/florian/valgrind/orig'
....
(cd mips64 && make top_distdir=../../../valgrind-3.11.0.SVN
distdir=../../../valgrind-3.11.0.SVN/none/tests/mips64 \
am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=:
distdir)
make[4]: Entering directory `/home/florian/valgrind/orig/none/tests/mips64'
make[4]: Leaving directory `/home/florian/valgrind/orig/none/tests/mips64'
(cd tilegx && make top_distdir=../../../valgrind-3.11.0.SVN
distdir=../../../valgrind-3.11.0.SVN/none/tests/tilegx \
am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=:
distdir)
make[4]: Entering directory `/home/florian/valgrind/orig/none/tests/tilegx'
make[4]: *** No rule to make target `insn_test_add_X0.c', needed by
`distdir'. Stop.
make[4]: Leaving directory `/home/florian/valgrind/orig/none/tests/tilegx'
make[3]: *** [distdir] Error 1
make[3]: Leaving directory `/home/florian/valgrind/orig/none/tests'
make[2]: *** [distdir] Error 1
make[2]: Leaving directory `/home/florian/valgrind/orig/none'
make[1]: *** [distdir] Error 1
make[1]: Leaving directory `/home/florian/valgrind/orig'
make: *** [dist] Error 2
|
|
From: Matthias S. <zz...@ge...> - 2015-08-04 06:35:55
|
Am 10.07.2015 um 11:56 schrieb Julian Seward: > > Greetings. > Hi! > It'll soon be time for another X.Y.0 release. Personally, I'd prefer > to call it 4.0.0 rather than 3.11.0, in keeping with recent > rationalisation of the Linux kernel and GCC numbering schemes. > > I'd like to propose the following: > > 7 August 2015: feature freeze > 1 September 2015: release > > This puts it at roughly a year since 3.10, and also ties in > conveniently with the tentative Fedora 23 schedule. > > On the whole I think the tree is in a pretty good shape. Items I'd > like to see completed: > > Make sure that gcc 5.1 and glibc 2.22 support is solid > Fix as many bugs in docs/internals/3_10_BUGSTATUS.txt as possible > Initial support for MacOSX 10.11 > Finish reviewing and merge the Solaris port, if feasible > > Comments on the timing? Other stuff people want get in? My wishlist is: Bugfixes: * Fix missing compiler options: [PATCH 1/2] Fix compilation of libvex tests when additional compiler options are needed. * Fix profiling numbers that trigger assertion fails: [PATCH 2/2] memcheck: Fix prof_event numbers * Fix signedness of profile prints: Re: [Valgrind-developers] [PATCH 2/2] memcheck: Fix prof_event numbers Features: * Write callstack of fatal signal to xml output: https://bugs.kde.org/show_bug.cgi?id=191069 * Not implemented yet: Let exit from replaced functions not call exit, but either a client request or raise(SIGXXXX). What about this? * Print more callstacks for mempool errors: https://bugs.kde.org/show_bug.cgi?id=322256 Regards Matthias |
|
From: <sv...@va...> - 2015-08-04 04:59:49
|
Author: zliu
Date: Tue Aug 4 05:59:41 2015
New Revision: 15481
Log:
Fix the "make regtest" error caused by the new tilegx instruction tests
Modified:
trunk/none/tests/tilegx/Makefile.am
Modified: trunk/none/tests/tilegx/Makefile.am
==============================================================================
--- trunk/none/tests/tilegx/Makefile.am (original)
+++ trunk/none/tests/tilegx/Makefile.am Tue Aug 4 05:59:41 2015
@@ -4,7 +4,10 @@
dist_noinst_SCRIPTS = \
filter_stderr
-EXTRA_DIST = \
+EXTRA_DIST =
+
+if VGCONF_ARCHS_INCLUDE_TILEGX
+EXTRA_DIST += \
insn_test_move_X0.stdout.exp insn_test_move_X0.stderr.exp \
insn_test_move_X0.vgtest \
insn_test_move_X1.stdout.exp insn_test_move_X1.stderr.exp \
@@ -1058,10 +1061,14 @@
insn_test_xori_X0.vgtest \
insn_test_xori_X1.stdout.exp insn_test_xori_X1.stderr.exp \
insn_test_xori_X1.vgtest
+endif
bin_PROGRAMS = gen_insn_test
-insn_tests = \
+insn_tests =
+
+if VGCONF_ARCHS_INCLUDE_TILEGX
+insn_tests += \
insn_test_move_X0 \
insn_test_move_X1 \
insn_test_move_Y0 \
@@ -1577,6 +1584,7 @@
insn_test_xor_Y1 \
insn_test_xori_X0 \
insn_test_xori_X1
+endif
check_PROGRAMS = \
allexec \
|