You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(32) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
|
1
(6) |
|
2
(4) |
3
(9) |
4
(11) |
5
(16) |
6
(6) |
7
(1) |
8
(11) |
|
9
(11) |
10
(6) |
11
(10) |
12
(23) |
13
(23) |
14
(6) |
15
(10) |
|
16
(5) |
17
(13) |
18
(9) |
19
(4) |
20
(6) |
21
(16) |
22
(3) |
|
23
(5) |
24
(7) |
25
(6) |
26
(4) |
27
(8) |
28
|
29
(3) |
|
30
(2) |
31
(17) |
|
|
|
|
|
|
From: <sv...@va...> - 2015-08-05 12:10:02
|
Author: florian
Date: Wed Aug 5 13:09:55 2015
New Revision: 15491
Log:
The number of elements in an OSet cannot be negative.
Let the return type of VG_(OSetGen_Size) reflect that.
Also fix a few casts.
Modified:
trunk/cachegrind/cg_main.c
trunk/coregrind/m_oset.c
trunk/include/pub_tool_oset.h
trunk/memcheck/mc_leakcheck.c
Modified: trunk/cachegrind/cg_main.c
==============================================================================
--- trunk/cachegrind/cg_main.c (original)
+++ trunk/cachegrind/cg_main.c Wed Aug 5 13:09:55 2015
@@ -1698,11 +1698,11 @@
VG_(dmsg)("cachegrind: with zero info:%6.1f%% (%d)\n",
no_debugs * 100.0 / debug_lookups, no_debugs);
- VG_(dmsg)("cachegrind: string table size: %lu\n",
+ VG_(dmsg)("cachegrind: string table size: %u\n",
VG_(OSetGen_Size)(stringTable));
- VG_(dmsg)("cachegrind: CC table size: %lu\n",
+ VG_(dmsg)("cachegrind: CC table size: %u\n",
VG_(OSetGen_Size)(CC_table));
- VG_(dmsg)("cachegrind: InstrInfo table size: %lu\n",
+ VG_(dmsg)("cachegrind: InstrInfo table size: %u\n",
VG_(OSetGen_Size)(instrInfoTable));
}
}
Modified: trunk/coregrind/m_oset.c
==============================================================================
--- trunk/coregrind/m_oset.c (original)
+++ trunk/coregrind/m_oset.c Wed Aug 5 13:09:55 2015
@@ -117,7 +117,7 @@
OSetFree_t free_fn; // deallocator
PoolAlloc* node_pa; // (optional) pool allocator for nodes.
SizeT maxEltSize; // for node_pa, must be > 0. Otherwise unused.
- Word nElems; // number of elements in the tree
+ UInt nElems; // number of elements in the tree
AvlNode* root; // root node
AvlNode* nodeStack[STACK_MAX]; // Iterator node stack
@@ -912,7 +912,7 @@
/*--- Miscellaneous operations ---*/
/*--------------------------------------------------------------------*/
-Word VG_(OSetGen_Size)(const AvlTree* t)
+UInt VG_(OSetGen_Size)(const AvlTree* t)
{
vg_assert(t);
return t->nElems;
Modified: trunk/include/pub_tool_oset.h
==============================================================================
--- trunk/include/pub_tool_oset.h (original)
+++ trunk/include/pub_tool_oset.h Wed Aug 5 13:09:55 2015
@@ -298,7 +298,7 @@
// they will return NULL if VG_(OSetGen_Next)() is called without an
// intervening call to VG_(OSetGen_ResetIter)().
-extern Word VG_(OSetGen_Size) ( const OSet* os );
+extern UInt VG_(OSetGen_Size) ( const OSet* os );
extern void VG_(OSetGen_Insert) ( OSet* os, void* elem );
extern Bool VG_(OSetGen_Contains) ( const OSet* os, const void* key );
extern void* VG_(OSetGen_Lookup) ( const OSet* os, const void* key );
Modified: trunk/memcheck/mc_leakcheck.c
==============================================================================
--- trunk/memcheck/mc_leakcheck.c (original)
+++ trunk/memcheck/mc_leakcheck.c Wed Aug 5 13:09:55 2015
@@ -899,11 +899,10 @@
if (VG_DEBUG_CLIQUE) {
if (ex->IorC.indirect_szB > 0)
VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
- ch_no, clique, (unsigned long)ch->szB,
- (unsigned long)ex->IorC.indirect_szB);
+ ch_no, clique, (SizeT)ch->szB, ex->IorC.indirect_szB);
else
VG_(printf)(" block %d joining clique %d adding %lu\n",
- ch_no, clique, (unsigned long)ch->szB);
+ ch_no, clique, (SizeT)ch->szB);
}
lc_extras[clique].IorC.indirect_szB += ch->szB;
@@ -1178,8 +1177,8 @@
// allocates or reallocates lr_array, and set its elements to the loss records
// contains in lr_table.
-static Int get_lr_array_from_lr_table(void) {
- Int i, n_lossrecords;
+static UInt get_lr_array_from_lr_table(void) {
+ UInt i, n_lossrecords;
LossRecord* lr;
n_lossrecords = VG_(OSetGen_Size)(lr_table);
@@ -1321,7 +1320,7 @@
if (VG_DEBUG_LEAKCHECK)
VG_(printf)("heuristic %s %#lx len %lu\n",
pp_heuristic(ex->heuristic),
- ch->data, (unsigned long)ch->szB);
+ ch->data, (SizeT)ch->szB);
}
old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
@@ -1511,7 +1510,7 @@
static void print_clique (Int clique, UInt level)
{
Int ind;
- Int i, n_lossrecords;;
+ UInt i, n_lossrecords;
n_lossrecords = VG_(OSetGen_Size)(lr_table);
@@ -1522,7 +1521,7 @@
MC_Chunk* ind_ch = lc_chunks[ind];
LossRecord* ind_lr;
LossRecordKey ind_lrkey;
- Int lr_i;
+ UInt lr_i;
ind_lrkey.state = ind_ex->state;
ind_lrkey.allocated_at = MC_(allocated_at)(ind_ch);
ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
@@ -1531,13 +1530,13 @@
break;
for (i = 0; i < level; i++)
VG_(umsg)(" ");
- VG_(umsg)("%p[%lu] indirect loss record %d\n",
- (void *)ind_ch->data, (unsigned long)ind_ch->szB,
+ VG_(umsg)("%p[%lu] indirect loss record %u\n",
+ (void *)ind_ch->data, (SizeT)ind_ch->szB,
lr_i+1); // lr_i+1 for user numbering.
if (lr_i >= n_lossrecords)
VG_(umsg)
("error: no indirect loss record found for %p[%lu]?????\n",
- (void *)ind_ch->data, (unsigned long)ind_ch->szB);
+ (void *)ind_ch->data, (SizeT)ind_ch->szB);
print_clique(ind, level+1);
}
}
@@ -1545,7 +1544,7 @@
Bool MC_(print_block_list) ( UInt loss_record_nr)
{
- Int i, n_lossrecords;
+ UInt i, n_lossrecords;
LossRecord* lr;
if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
@@ -1584,7 +1583,7 @@
// If this is the loss record we are looking for, output the pointer.
if (old_lr == lr_array[loss_record_nr]) {
VG_(umsg)("%p[%lu]\n",
- (void *)ch->data, (unsigned long) ch->szB);
+ (void *)ch->data, (SizeT)ch->szB);
if (ex->state != Reachable) {
// We can print the clique in all states, except Reachable.
// In Unreached state, lc_chunk[i] is the clique leader.
@@ -1598,7 +1597,7 @@
} else {
// No existing loss record matches this chunk ???
VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
- (void *)ch->data, (unsigned long) ch->szB);
+ (void *)ch->data, (SizeT)ch->szB);
}
}
return True;
|
Author: florian
Date: Wed Aug 5 12:26:10 2015
New Revision: 15490
Log:
The number of elements in a hash table cannot be negative.
Let the return type of VG_(HT_count_nodes) reflect that.
Modified:
trunk/coregrind/m_deduppoolalloc.c
trunk/coregrind/m_gdbserver/m_gdbserver.c
trunk/coregrind/m_hashtable.c
trunk/helgrind/hg_main.c
trunk/include/pub_tool_hashtable.h
Modified: trunk/coregrind/m_deduppoolalloc.c
==============================================================================
--- trunk/coregrind/m_deduppoolalloc.c (original)
+++ trunk/coregrind/m_deduppoolalloc.c Wed Aug 5 12:26:10 2015
@@ -201,7 +201,7 @@
static void print_stats (DedupPoolAlloc *ddpa)
{
VG_(message)(Vg_DebugMsg,
- "dedupPA:%s %ld allocs (%d uniq)"
+ "dedupPA:%s %ld allocs (%u uniq)"
" %ld pools (%ld bytes free in last pool)\n",
ddpa->cc,
(long int) ddpa->nr_alloc_calls,
Modified: trunk/coregrind/m_gdbserver/m_gdbserver.c
==============================================================================
--- trunk/coregrind/m_gdbserver/m_gdbserver.c (original)
+++ trunk/coregrind/m_gdbserver/m_gdbserver.c Wed Aug 5 12:26:10 2015
@@ -542,7 +542,7 @@
int i;
dlog(1,
- "clear_gdbserved_addresses: scanning hash table nodes %d\n",
+ "clear_gdbserved_addresses: scanning hash table nodes %u\n",
VG_(HT_count_nodes) (gs_addresses));
ag = (GS_Address**) VG_(HT_to_array) (gs_addresses, &n_elems);
for (i = 0; i < n_elems; i++)
Modified: trunk/coregrind/m_hashtable.c
==============================================================================
--- trunk/coregrind/m_hashtable.c (original)
+++ trunk/coregrind/m_hashtable.c Wed Aug 5 12:26:10 2015
@@ -82,7 +82,7 @@
return table;
}
-Int VG_(HT_count_nodes) ( const VgHashTable *table )
+UInt VG_(HT_count_nodes) ( const VgHashTable *table )
{
return table->n_elements;
}
Modified: trunk/helgrind/hg_main.c
==============================================================================
--- trunk/helgrind/hg_main.c (original)
+++ trunk/helgrind/hg_main.c Wed Aug 5 12:26:10 2015
@@ -5653,7 +5653,7 @@
HG_(stats__LockN_to_P_queries),
HG_(stats__LockN_to_P_get_map_size)() );
- VG_(printf)("client malloc-ed blocks: %'8d\n",
+ VG_(printf)("client malloc-ed blocks: %'8u\n",
VG_(HT_count_nodes)(hg_mallocmeta_table));
VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
Modified: trunk/include/pub_tool_hashtable.h
==============================================================================
--- trunk/include/pub_tool_hashtable.h (original)
+++ trunk/include/pub_tool_hashtable.h Wed Aug 5 12:26:10 2015
@@ -58,7 +58,7 @@
extern VgHashTable *VG_(HT_construct) ( const HChar* name );
/* Count the number of nodes in a table. */
-extern Int VG_(HT_count_nodes) ( const VgHashTable *table );
+extern UInt VG_(HT_count_nodes) ( const VgHashTable *table );
/* Add a node to the table. Duplicate keys are permitted. */
extern void VG_(HT_add_node) ( VgHashTable *t, void* node );
|
|
From: <sv...@va...> - 2015-08-05 07:48:40
|
Author: florian
Date: Wed Aug 5 08:48:32 2015
New Revision: 15489
Log:
Add missing header files.
Modified:
trunk/include/Makefile.am
Modified: trunk/include/Makefile.am
==============================================================================
--- trunk/include/Makefile.am (original)
+++ trunk/include/Makefile.am Wed Aug 5 08:48:32 2015
@@ -84,8 +84,11 @@
vki/vki-xen-hvm.h \
vki/vki-xen-memory.h \
vki/vki-xen-mmuext.h \
+ vki/vki-xen-physdev.h \
+ vki/vki-xen-schedop.h \
vki/vki-xen-sysctl.h \
vki/vki-xen-tmem.h \
vki/vki-xen-version.h \
+ vki/vki-xen-xsm.h \
vki/vki-xen-x86.h \
vki/vki-linux-drm.h
|
|
From: Bart V. A. <bva...@ac...> - 2015-08-05 02:40:46
|
On 08/04/15 12:25, sv...@va... wrote: > Modified: trunk/none/tests/Makefile.am > ============================================================================== > --- trunk/none/tests/Makefile.am (original) > +++ trunk/none/tests/Makefile.am Tue Aug 4 20:25:53 2015 > -libvex_test_CFLAGS = @FLAG_FSANITIZE@ > +libvex_test_CFLAGS = $(AM_CFLAGS)@FLAG_FSANITIZE@ Hi Philippe, It looks like a space is missing between $(AM_CFLAGS) and @FLAG_FSANITIZE@ ? Bart. |
|
From: <sv...@va...> - 2015-08-05 02:38:17
|
Author: zliu
Date: Wed Aug 5 03:38:10 2015
New Revision: 15488
Log:
Let "make distclean" remove generated tilegx instruction test files
Modified:
trunk/none/tests/tilegx/Makefile.am
trunk/none/tests/tilegx/gen_test.sh
Modified: trunk/none/tests/tilegx/Makefile.am
==============================================================================
--- trunk/none/tests/tilegx/Makefile.am (original)
+++ trunk/none/tests/tilegx/Makefile.am Wed Aug 5 03:38:10 2015
@@ -1597,19 +1597,15 @@
gen_insn_test_LDADD = ../../../VEX/priv/tilegx_disasm.o
$(addsuffix .c, $(insn_tests)) : gen_insn_test
- @echo $@
- ./gen_test.sh $@
+ @./gen_test.sh $@
$(addsuffix .stdout.exp, $(insn_tests)) : $(insn_tests)
- @echo "Generate $@"
./$(basename $(basename $@)) > $@
$(addsuffix .stderr.exp, $(insn_tests)) :
- @echo "Generate $@"
touch $@
$(addsuffix .vgtest, $(insn_tests)) :
- @echo "Generate $@"
echo -e "prog: $(basename $@)\nvgopts: -q" > $@
check-am : $(addsuffix .stdout.exp, $(insn_tests)) $(addsuffix .stderr.exp, $(insn_tests)) $(addsuffix .vgtest, $(insn_tests))
@@ -1618,3 +1614,4 @@
@rm -f *.stderr.exp *.stdout.exp *.vgtest $(addsuffix .c, $(insn_tests)) $(addsuffix .o, $(insn_tests)) $(insn_tests)
@rm -f *.o $(bin_PROGRAMS)
+distclean-am : clean-am
Modified: trunk/none/tests/tilegx/gen_test.sh
==============================================================================
--- trunk/none/tests/tilegx/gen_test.sh (original)
+++ trunk/none/tests/tilegx/gen_test.sh Wed Aug 5 03:38:10 2015
@@ -573,13 +573,12 @@
if [ $# -gt 0 ]; then
#fname = "$1"
-echo "generate $1"
+
for f in "${FILES[@]}"
do
array=(${f// / })
if [ ${array[2]} = $1 ]; then
./gen_insn_test ${array[0]} ${array[1]} > ${array[2]}
-# cat hello.c > ${array[2]}
exit 0
fi
done
|
|
From: <sv...@va...> - 2015-08-05 01:51:08
|
Author: zliu
Date: Wed Aug 5 02:50:59 2015
New Revision: 15487
Log:
Fix issue of "make dist BUILD_ALL_DOCS=no"
Modified:
trunk/none/tests/tilegx/Makefile.am
trunk/none/tests/tilegx/gen_test.sh (props changed)
Modified: trunk/none/tests/tilegx/Makefile.am
==============================================================================
--- trunk/none/tests/tilegx/Makefile.am (original)
+++ trunk/none/tests/tilegx/Makefile.am Wed Aug 5 02:50:59 2015
@@ -1067,7 +1067,6 @@
insn_tests =
-if VGCONF_ARCHS_INCLUDE_TILEGX
insn_tests += \
insn_test_move_X0 \
insn_test_move_X1 \
@@ -1584,7 +1583,6 @@
insn_test_xor_Y1 \
insn_test_xori_X0 \
insn_test_xori_X1
-endif
check_PROGRAMS = \
allexec \
|
|
From: <sv...@va...> - 2015-08-04 21:26:18
|
Author: florian
Date: Tue Aug 4 22:26:10 2015
New Revision: 15486
Log:
Fix printf format inconsistencies as pointed out by gcc -Wformat-signedness.
Modified:
trunk/memcheck/mc_errors.c
trunk/memcheck/mc_leakcheck.c
trunk/memcheck/mc_main.c
trunk/memcheck/mc_malloc_wrappers.c
Modified: trunk/memcheck/mc_errors.c
==============================================================================
--- trunk/memcheck/mc_errors.c (original)
+++ trunk/memcheck/mc_errors.c Tue Aug 4 22:26:10 2015
@@ -375,8 +375,8 @@
lr->num_blocks, d_num_blocks,
str_leak_lossmode(lr->key.state),
n_this_record, n_total_records );
- emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
- emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
+ emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
+ emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
emit( " </xwhat>\n" );
}
VG_(pp_ExeContext)(lr->key.allocated_at);
@@ -439,7 +439,7 @@
MC_(any_value_errors) = True;
if (xml) {
emit( " <kind>UninitValue</kind>\n" );
- emit( " <what>Use of uninitialised value of size %ld</what>\n",
+ emit( " <what>Use of uninitialised value of size %lu</what>\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
@@ -448,7 +448,7 @@
} else {
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
- emit( "Use of uninitialised value of size %ld\n",
+ emit( "Use of uninitialised value of size %lu\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
@@ -594,7 +594,7 @@
if (xml) {
emit( " <kind>Invalid%s</kind>\n",
extra->Err.Addr.isWrite ? "Write" : "Read" );
- emit( " <what>Invalid %s of size %ld</what>\n",
+ emit( " <what>Invalid %s of size %lu</what>\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
@@ -602,7 +602,7 @@
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
} else {
- emit( "Invalid %s of size %ld\n",
+ emit( "Invalid %s of size %lu\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
Modified: trunk/memcheck/mc_leakcheck.c
==============================================================================
--- trunk/memcheck/mc_leakcheck.c (original)
+++ trunk/memcheck/mc_leakcheck.c Tue Aug 4 22:26:10 2015
@@ -1875,11 +1875,11 @@
&& addr_in_reg < searched_wpa + searched_szB) {
if (addr_in_reg == searched_wpa)
VG_(umsg)
- ("tid %d register %s pointing at %#lx\n",
+ ("tid %u register %s pointing at %#lx\n",
tid, regname, searched_wpa);
else
VG_(umsg)
- ("tid %d register %s interior pointing %lu bytes inside %#lx\n",
+ ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
tid, regname, (long unsigned) addr_in_reg - searched_wpa,
searched_wpa);
}
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 22:26:10 2015
@@ -6122,7 +6122,7 @@
VG_(printf) ("\n");
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
@@ -6272,11 +6272,11 @@
case -1: break;
case 0: /* addressable */
if (is_mem_addressable ( address, szB, &bad_addr ))
- VG_(printf) ("Address %p len %ld addressable\n",
+ VG_(printf) ("Address %p len %lu addressable\n",
(void *)address, szB);
else
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
MC_(pp_describe_addr) (address);
break;
@@ -6284,7 +6284,7 @@
res = is_mem_defined ( address, szB, &bad_addr, &otag );
if (MC_AddrErr == res)
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
else if (MC_ValueErr == res) {
okind = otag & 3;
@@ -6300,7 +6300,7 @@
default: tl_assert(0);
}
VG_(printf)
- ("Address %p len %ld not defined:\n"
+ ("Address %p len %lu not defined:\n"
"Uninitialised value at %p%s\n",
(void *)address, szB, (void *) bad_addr, src);
ecu = otag & ~3;
@@ -6310,7 +6310,7 @@
}
}
else
- VG_(printf) ("Address %p len %ld defined\n",
+ VG_(printf) ("Address %p len %lu defined\n",
(void *)address, szB);
MC_(pp_describe_addr) (address);
break;
@@ -6390,7 +6390,7 @@
gdb_xb (address + szB - szB % 8, szB % 8, res);
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
@@ -7392,7 +7392,7 @@
static void print_SM_info(const HChar* type, Int n_SMs)
{
VG_(message)(Vg_DebugMsg,
- " memcheck: SMs: %s = %d (%ldk, %ldM)\n",
+ " memcheck: SMs: %s = %d (%luk, %luM)\n",
type,
n_SMs,
n_SMs * sizeof(SecMap) / 1024UL,
@@ -7409,18 +7409,18 @@
" memcheck: sanity checks: %d cheap, %d expensive\n",
n_sanity_cheap, n_sanity_expensive );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use\n",
+ " memcheck: auxmaps: %llu auxmap entries (%lluk, %lluM) in use\n",
n_auxmap_L2_nodes,
n_auxmap_L2_nodes * 64,
n_auxmap_L2_nodes / 16 );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10\n",
+ " memcheck: auxmaps_L1: %llu searches, %llu cmps, ratio %llu:10\n",
n_auxmap_L1_searches, n_auxmap_L1_cmps,
(10ULL * n_auxmap_L1_cmps)
/ (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
);
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L2: %lld searches, %lld nodes\n",
+ " memcheck: auxmaps_L2: %llu searches, %llu nodes\n",
n_auxmap_L2_searches, n_auxmap_L2_nodes
);
@@ -7444,7 +7444,7 @@
max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
VG_(message)(Vg_DebugMsg,
- " memcheck: max sec V bit nodes: %d (%ldk, %ldM)\n",
+ " memcheck: max sec V bit nodes: %d (%luk, %luM)\n",
max_secVBit_nodes, max_secVBit_szB / 1024,
max_secVBit_szB / (1024 * 1024));
VG_(message)(Vg_DebugMsg,
@@ -7452,7 +7452,7 @@
sec_vbits_new_nodes + sec_vbits_updates,
sec_vbits_new_nodes, sec_vbits_updates );
VG_(message)(Vg_DebugMsg,
- " memcheck: max shadow mem size: %ldk, %ldM\n",
+ " memcheck: max shadow mem size: %luk, %luM\n",
max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
if (MC_(clo_mc_level) >= 3) {
@@ -7472,8 +7472,8 @@
stats_ocacheL1_found_at_N,
stats_ocacheL1_movefwds );
VG_(message)(Vg_DebugMsg,
- " ocacheL1: %'12lu sizeB %'12u useful\n",
- (UWord)sizeof(OCache),
+ " ocacheL1: %'12lu sizeB %'12d useful\n",
+ (SizeT)sizeof(OCache),
4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
VG_(message)(Vg_DebugMsg,
" ocacheL2: %'12lu refs %'12lu misses\n",
Modified: trunk/memcheck/mc_malloc_wrappers.c
==============================================================================
--- trunk/memcheck/mc_malloc_wrappers.c (original)
+++ trunk/memcheck/mc_malloc_wrappers.c Tue Aug 4 22:26:10 2015
@@ -680,7 +680,7 @@
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
+ VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %u, %d)\n",
pool, rzB, is_zeroed);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
@@ -779,7 +779,7 @@
}
VG_(message)(Vg_UserMsg,
- "Total mempools active: %d pools, %d chunks\n",
+ "Total mempools active: %u pools, %u chunks\n",
total_pools, total_chunks);
tick = 0;
}
@@ -792,7 +792,7 @@
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data > chunks[i+1]->data) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d is out of order "
+ "Mempool chunk %u / %u is out of order "
"wrt. its successor\n",
i+1, n_chunks);
bad = 1;
@@ -803,7 +803,7 @@
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d overlaps with its successor\n",
+ "Mempool chunk %u / %u overlaps with its successor\n",
i+1, n_chunks);
bad = 1;
}
@@ -811,11 +811,11 @@
if (bad) {
VG_(message)(Vg_UserMsg,
- "Bad mempool (%d chunks), dumping chunks for inspection:\n",
+ "Bad mempool (%u chunks), dumping chunks for inspection:\n",
n_chunks);
for (i = 0; i < n_chunks; ++i) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d: %ld bytes "
+ "Mempool chunk %u / %u: %lu bytes "
"[%lx,%lx), allocated:\n",
i+1,
n_chunks,
@@ -834,7 +834,7 @@
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@@ -884,7 +884,7 @@
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
- "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
+ "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
pool, addr, mc->szB + 0UL);
}
@@ -902,7 +902,7 @@
VgHashNode** chunks;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@@ -1036,7 +1036,7 @@
ThreadId tid = VG_(get_running_tid)();
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
pool, addrA, addrB, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
|
|
From: Florian K. <fl...@ei...> - 2015-08-04 20:58:29
|
On 04.08.2015 20:22, Matthias Schwarzott wrote: > Am 04.08.2015 um 17:58 schrieb sv...@va...: >> Author: florian >> Date: Tue Aug 4 16:58:41 2015 >> New Revision: 15482 >> >> Log: >> Rewrite memcheck's event counter machinery. >> - unique event counters >> - simplify PROF_EVENT >> - do not allocate more event counters than needed >> - compile cleanly >> >> Modified: >> trunk/memcheck/mc_include.h >> trunk/memcheck/mc_leakcheck.c >> trunk/memcheck/mc_main.c >> > > Hi Florian, > > this is definitely a nice solution. > If you do not like the two lists - enum constant definition and name > assignment - to be maintained, there is a possible improvement: > Have one header file with custom macro calls to define the events and > include it twice with different macro definitions. > Matthias, yes, that would have been an alternative implementation, more concise. Now that you mention it I remember having seen this trick being used t in the GCC source tree. But I did not think of it at the time. And now I'm too lazy to change it. :) That being said, I'm not opposed to receiving a patch. Note, there is a safe guard in the code. Should somebody add an enumerator and forget to define a corresponding event counter name, there will be an assert. Florian |
|
From: Philippe W. <phi...@sk...> - 2015-08-04 19:27:20
|
On Tue, 2015-08-04 at 08:35 +0200, Matthias Schwarzott wrote: > * Fix missing compiler options: [PATCH 1/2] Fix compilation of libvex > tests when additional compiler options are needed. This is committed as revision 15485. Thanks for the patch. Philippe |
|
From: <sv...@va...> - 2015-08-04 19:26:00
|
Author: philippe
Date: Tue Aug 4 20:25:53 2015
New Revision: 15485
Log:
Ensure libvex tests in none are compiling on amd64, when configuring for 32 bits only
Patch from Matthias Schwarzott
Modified:
trunk/none/tests/Makefile.am
Modified: trunk/none/tests/Makefile.am
==============================================================================
--- trunk/none/tests/Makefile.am (original)
+++ trunk/none/tests/Makefile.am Tue Aug 4 20:25:53 2015
@@ -276,10 +276,10 @@
nestedfns_CFLAGS = $(AM_CFLAGS)
mq_LDADD = -lrt
endif
-libvex_test_CFLAGS = @FLAG_FSANITIZE@
+libvex_test_CFLAGS = $(AM_CFLAGS)@FLAG_FSANITIZE@
libvex_test_LDADD = ../../VEX/libvex-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a \
@LIB_UBSAN@
-libvexmultiarch_test_CFLAGS= @FLAG_FSANITIZE@
+libvexmultiarch_test_CFLAGS= $(AM_CFLAGS) @FLAG_FSANITIZE@
libvexmultiarch_test_LDADD = \
../../VEX/libvexmultiarch-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a \
../../VEX/libvex-@VGCONF_ARCH_PRI@-@VGCONF_OS@.a @LIB_UBSAN@
|
|
From: <sv...@va...> - 2015-08-04 19:12:12
|
Author: philippe
Date: Tue Aug 4 20:12:05 2015
New Revision: 15484
Log:
Refine NEWS entry for memcheck xb monitor command
Modified:
trunk/NEWS
Modified: trunk/NEWS
==============================================================================
--- trunk/NEWS (original)
+++ trunk/NEWS Tue Aug 4 20:12:05 2015
@@ -16,9 +16,9 @@
- A new monitor command 'xb <addr> <len>' shows the validity bits
of <len> bytes at <addr>. Below the validity bits, the byte
values are shown using a layout similar to the GDB command
- 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
- (in particular on little endian computers) when you need to associate
- byte data value with their corresponding validity bits.
+ 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use than
+ get_vbits (in particular on little endian computers) when you need to
+ associate byte data value with their corresponding validity bits.
* Massif:
- New monitor command 'all_snapshots <filename>' that dumps all snapshots
|
|
From: <sv...@va...> - 2015-08-04 19:11:11
|
Author: philippe
Date: Tue Aug 4 20:11:03 2015
New Revision: 15483
Log:
* Memcheck:
- A new monitor command 'xb <addr> <len>' shows the validity bits
of <len> bytes at <addr>. Below the validity bits, the byte
values are shown using a layout similar to the GDB command
'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
(in particular on little endian computers) when you need to associate
byte data value with their corresponding validity bits.
Modified:
trunk/NEWS
trunk/gdbserver_tests/mchelp.stdoutB.exp
trunk/memcheck/docs/mc-manual.xml
trunk/memcheck/mc_main.c
Modified: trunk/NEWS
==============================================================================
--- trunk/NEWS (original)
+++ trunk/NEWS Tue Aug 4 20:11:03 2015
@@ -13,6 +13,12 @@
* ==================== TOOL CHANGES ====================
* Memcheck:
+ - A new monitor command 'xb <addr> <len>' shows the validity bits
+ of <len> bytes at <addr>. Below the validity bits, the byte
+ values are shown using a layout similar to the GDB command
+ 'x /<len>xb <addr>'. The monitor command 'xb' is easier to use
+ (in particular on little endian computers) when you need to associate
+ byte data value with their corresponding validity bits.
* Massif:
- New monitor command 'all_snapshots <filename>' that dumps all snapshots
Modified: trunk/gdbserver_tests/mchelp.stdoutB.exp
==============================================================================
--- trunk/gdbserver_tests/mchelp.stdoutB.exp (original)
+++ trunk/gdbserver_tests/mchelp.stdoutB.exp Tue Aug 4 20:11:03 2015
@@ -13,10 +13,14 @@
v.set merge-recursive-frames <num> : merge recursive calls in max <num> frames
v.set vgdb-error <errornr> : debug me at error >= <errornr>
memcheck monitor commands:
- get_vbits <addr> [<len>]
- returns validity bits for <len> (or 1) bytes at <addr>
+ xb <addr> [<len>]
+ prints validity bits for <len> (or 1) bytes at <addr>
bit values 0 = valid, 1 = invalid, __ = unaddressable byte
- Example: get_vbits 0x........ 10
+ Then prints the bytes values below the corresponding validity bits
+ in a layout similar to the gdb command 'x /<len>xb <addr>'
+ Example: xb 0x........ 10
+ get_vbits <addr> [<len>]
+ Similar to xb, but only prints the validity bytes by group of 4.
make_memory [noaccess|undefined
|defined|Definedifaddressable] <addr> [<len>]
mark <len> (or 1) bytes at <addr> with the given accessibility
@@ -73,10 +77,14 @@
(default traceflags 0b00100000 : show after instrumentation)
An additional flag 0b100000000 allows to show gdbserver instrumentation
memcheck monitor commands:
- get_vbits <addr> [<len>]
- returns validity bits for <len> (or 1) bytes at <addr>
+ xb <addr> [<len>]
+ prints validity bits for <len> (or 1) bytes at <addr>
bit values 0 = valid, 1 = invalid, __ = unaddressable byte
- Example: get_vbits 0x........ 10
+ Then prints the bytes values below the corresponding validity bits
+ in a layout similar to the gdb command 'x /<len>xb <addr>'
+ Example: xb 0x........ 10
+ get_vbits <addr> [<len>]
+ Similar to xb, but only prints the validity bytes by group of 4.
make_memory [noaccess|undefined
|defined|Definedifaddressable] <addr> [<len>]
mark <len> (or 1) bytes at <addr> with the given accessibility
Modified: trunk/memcheck/docs/mc-manual.xml
==============================================================================
--- trunk/memcheck/docs/mc-manual.xml (original)
+++ trunk/memcheck/docs/mc-manual.xml Tue Aug 4 20:11:03 2015
@@ -1648,36 +1648,51 @@
<itemizedlist>
<listitem>
- <para><varname>get_vbits <addr> [<len>]</varname>
- shows the definedness (V) bits for <len> (default 1) bytes
- starting at <addr>. The definedness of each byte in the
- range is given using two hexadecimal digits. These hexadecimal
- digits encode the validity of each bit of the corresponding byte,
- using 0 if the bit is defined and 1 if the bit is undefined.
- If a byte is not addressable, its validity bits are replaced
- by <varname>__</varname> (a double underscore).
+ <para><varname>xb <addr> [<len>]</varname>
+ shows the definedness (V) bits and values for <len> (default 1)
+ bytes starting at <addr>.
+ For each 8 bytes, two lines are output.
+ </para>
+ <para>
+ The first line shows the validity bits for 8 bytes.
+ The definedness of each byte in the range is given using two hexadecimal
+ digits. These hexadecimal digits encode the validity of each bit of the
+ corresponding byte,
+ using 0 if the bit is defined and 1 if the bit is undefined.
+ If a byte is not addressable, its validity bits are replaced
+ by <varname>__</varname> (a double underscore).
+ </para>
+ <para>
+ The second line shows the values of the bytes below the corresponding
+ validity bits. The format used to show the bytes data is similar to the
+ GDB command 'x /<len>xb <addr>'. The value for a non
+ addressable bytes is shown as ?? (two question marks).
</para>
<para>
- In the following example, <varname>string10</varname> is an array
- of 10 characters, in which the even numbered bytes are
- undefined. In the below example, the byte corresponding
- to <varname>string10[5]</varname> is not addressable.
+ In the following example, <varname>string10</varname> is an array
+ of 10 characters, in which the even numbered bytes are
+ undefined. In the below example, the byte corresponding
+ to <varname>string10[5]</varname> is not addressable.
</para>
<programlisting><![CDATA[
(gdb) p &string10
-$4 = (char (*)[10]) 0x8049e28
-(gdb) monitor get_vbits 0x8049e28 10
-ff00ff00 ff__ff00 ff00
-(gdb)
+$4 = (char (*)[10]) 0x804a2f0
+(gdb) mo xb 0x804a2f0 10
+ ff 00 ff 00 ff __ ff 00
+0x804A2F0: 0x3f 0x6e 0x3f 0x65 0x3f 0x?? 0x3f 0x65
+ ff 00
+0x804A2F8: 0x3f 0x00
+Address 0x804A2F0 len 10 has 1 bytes unaddressable
+(gdb)
]]></programlisting>
- <para> The command get_vbits cannot be used with registers. To get
- the validity bits of a register, you must start Valgrind with the
- option <option>--vgdb-shadow-registers=yes</option>. The validity
- bits of a register can be obtained by printing the 'shadow 1'
- corresponding register. In the below x86 example, the register
- eax has all its bits undefined, while the register ebx is fully
- defined.
+ <para> The command xb cannot be used with registers. To get
+ the validity bits of a register, you must start Valgrind with the
+ option <option>--vgdb-shadow-registers=yes</option>. The validity
+ bits of a register can then be obtained by printing the 'shadow 1'
+ corresponding register. In the below x86 example, the register
+ eax has all its bits undefined, while the register ebx is fully
+ defined.
</para>
<programlisting><![CDATA[
(gdb) p /x $eaxs1
@@ -1690,6 +1705,31 @@
</listitem>
<listitem>
+ <para><varname>get_vbits <addr> [<len>]</varname>
+ shows the definedness (V) bits for <len> (default 1) bytes
+ starting at <addr> using the same convention as the
+ <varname>xb</varname> command. <varname>get_vbits</varname> only
+ shows the V bits (grouped by 4 bytes). It does not show the values.
+ If you want to associate V bits with the corresponding byte values, the
+ <varname>xb</varname> command will be easier to use, in particular
+ on little endian computers when associating undefined parts of an integer
+ with their V bits values.
+ </para>
+ <para>
+ The following example shows the result of <varname>get_vibts</varname>
+ on the <varname>string10</varname> used in the <varname>xb</varname>
+ command explanation.
+ </para>
+<programlisting><![CDATA[
+(gdb) monitor get_vbits 0x804a2f0 10
+ff00ff00 ff__ff00 ff00
+Address 0x804A2F0 len 10 has 1 bytes unaddressable
+(gdb)
+]]></programlisting>
+
+ </listitem>
+
+ <listitem>
<para><varname>make_memory
[noaccess|undefined|defined|Definedifaddressable] <addr>
[<len>]</varname> marks the range of <len> (default 1)
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 20:11:03 2015
@@ -6010,10 +6010,14 @@
(
"\n"
"memcheck monitor commands:\n"
-" get_vbits <addr> [<len>]\n"
-" returns validity bits for <len> (or 1) bytes at <addr>\n"
+" xb <addr> [<len>]\n"
+" prints validity bits for <len> (or 1) bytes at <addr>\n"
" bit values 0 = valid, 1 = invalid, __ = unaddressable byte\n"
-" Example: get_vbits 0x8049c78 10\n"
+" Then prints the bytes values below the corresponding validity bits\n"
+" in a layout similar to the gdb command 'x /<len>xb <addr>'\n"
+" Example: xb 0x8049c78 10\n"
+" get_vbits <addr> [<len>]\n"
+" Similar to xb, but only prints the validity bytes by group of 4.\n"
" make_memory [noaccess|undefined\n"
" |defined|Definedifaddressable] <addr> [<len>]\n"
" mark <len> (or 1) bytes at <addr> with the given accessibility\n"
@@ -6043,6 +6047,28 @@
"\n");
}
+/* Print szB bytes at address, with a format similar to the gdb command
+ x /<szB>xb address.
+ res[i] == 1 indicates the corresponding byte is addressable. */
+static void gdb_xb (Addr address, SizeT szB, Int res[])
+{
+ UInt i;
+
+ for (i = 0; i < szB; i++) {
+ UInt bnr = i % 8;
+ if (bnr == 0) {
+ if (i != 0)
+ VG_(printf) ("\n"); // Terminate previous line
+ VG_(printf) ("%p:", (void*)(address+i));
+ }
+ if (res[i] == 1)
+ VG_(printf) ("\t0x%02x", *(UChar*)(address+i));
+ else
+ VG_(printf) ("\t0x??");
+ }
+ VG_(printf) ("\n"); // Terminate previous line
+}
+
/* return True if request recognised, False otherwise */
static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
{
@@ -6058,7 +6084,7 @@
command. This ensures a shorter abbreviation for the user. */
switch (VG_(keyword_id)
("help get_vbits leak_check make_memory check_memory "
- "block_list who_points_at",
+ "block_list who_points_at xb",
wcmd, kwd_report_duplicated_matches)) {
case -2: /* multiple matches */
return True;
@@ -6191,8 +6217,8 @@
else if (int_value > 0)
lcp.max_loss_records_output = (UInt) int_value;
else
- VG_(gdb_printf) ("max_loss_records_output must be >= 1, got %d\n",
- int_value);
+ VG_(gdb_printf) ("max_loss_records_output must be >= 1,"
+ " got %d\n", int_value);
break;
}
default:
@@ -6303,7 +6329,8 @@
if (wl == NULL || *endptr != '\0') {
VG_(gdb_printf) ("malformed or missing integer\n");
} else {
- // lr_nr-1 as what is shown to the user is 1 more than the index in lr_array.
+ /* lr_nr-1 as what is shown to the user is 1 more than the index
+ in lr_array. */
if (lr_nr == 0 || ! MC_(print_block_list) (lr_nr-1))
VG_(gdb_printf) ("invalid loss record nr\n");
}
@@ -6324,6 +6351,52 @@
return True;
}
+ case 7: { /* xb */
+ Addr address;
+ SizeT szB = 1;
+ if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
+ UChar vbits[8];
+ Int res[8];
+ Int i;
+ Int unaddressable = 0;
+ for (i = 0; i < szB; i++) {
+ Int bnr = i % 8;
+ res[bnr] = mc_get_or_set_vbits_for_client
+ (address+i, (Addr) &vbits[bnr], 1,
+ False, /* get them */
+ False /* is client request */ );
+ /* We going to print the first vabits of a new line.
+ Terminate the previous line if needed: prints a line with the
+ address and the data. */
+ if (bnr == 0) {
+ if (i != 0) {
+ VG_(printf) ("\n");
+ gdb_xb (address + i - 8, 8, res);
+ }
+ VG_(printf) ("\t"); // To align VABITS with gdb_xb layout
+ }
+ if (res[bnr] == 1) {
+ VG_(printf) ("\t %02x", vbits[bnr]);
+ } else {
+ tl_assert(3 == res[bnr]);
+ unaddressable++;
+ VG_(printf) ("\t __");
+ }
+ }
+ VG_(printf) ("\n");
+ if (szB % 8 == 0 && szB > 0)
+ gdb_xb (address + szB - 8, 8, res);
+ else
+ gdb_xb (address + szB - szB % 8, szB % 8, res);
+ if (unaddressable) {
+ VG_(printf)
+ ("Address %p len %ld has %d bytes unaddressable\n",
+ (void *)address, szB, unaddressable);
+ }
+ }
+ return True;
+ }
+
default:
tl_assert(0);
return False;
|
|
From: Matthias S. <zz...@ge...> - 2015-08-04 18:23:02
|
Am 04.08.2015 um 17:58 schrieb sv...@va...:
> Author: florian
> Date: Tue Aug 4 16:58:41 2015
> New Revision: 15482
>
> Log:
> Rewrite memcheck's event counter machinery.
> - unique event counters
> - simplify PROF_EVENT
> - do not allocate more event counters than needed
> - compile cleanly
>
> Modified:
> trunk/memcheck/mc_include.h
> trunk/memcheck/mc_leakcheck.c
> trunk/memcheck/mc_main.c
>
Hi Florian,
this is definitely a nice solution.
If you do not like the two lists - enum constant definition and name
assignment - to be maintained, there is a possible improvement:
Have one header file with custom macro calls to define the events and
include it twice with different macro definitions.
The alternative version with only one macro argument is added in comments.
mc_prof_events.h:
DEFINE_PROF_EVENT(MCPE_LOADV8, "LOADV8")
// or: DEFINE_PROF_EVENT(LOADV8)
...
mc_include.h:
#define DEFINE_PROF_EVENT(a,b) a,
// or: #define DEFINE_PROF_EVENT(a) MCPE_##a,
enum {
# include "mc_prof_events.h"
/* Do not add enumerators past this line. */
MCPE_LAST
};
#undef DEFINE_PROF_EVENT
mc_main.c:
#define DEFINE_PROF_EVENT(a,b) [a] = b,
// or: #define DEFINE_PROF_EVENT(a) [MCPE_##a] = #a,
static const HChar* MC_(event_ctr_name)[MCPE_LAST] =
{
# include "mc_prof_events.h"
};
#undef DEFINE_PROF_EVENT
Regards
Matthias
|
|
From: <sv...@va...> - 2015-08-04 15:58:51
|
Author: florian
Date: Tue Aug 4 16:58:41 2015
New Revision: 15482
Log:
Rewrite memcheck's event counter machinery.
- unique event counters
- simplify PROF_EVENT
- do not allocate more event counters than needed
- compile cleanly
Modified:
trunk/memcheck/mc_include.h
trunk/memcheck/mc_leakcheck.c
trunk/memcheck/mc_main.c
Modified: trunk/memcheck/mc_include.h
==============================================================================
--- trunk/memcheck/mc_include.h (original)
+++ trunk/memcheck/mc_include.h Tue Aug 4 16:58:41 2015
@@ -207,26 +207,129 @@
/* Define to collect detailed performance info. */
/* #define MC_PROFILE_MEMORY */
-
#ifdef MC_PROFILE_MEMORY
-# define N_PROF_EVENTS 500
-
-UInt MC_(event_ctr)[N_PROF_EVENTS];
-HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
-# define PROF_EVENT(ev, name) \
- do { tl_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
- /* crude and inaccurate check to ensure the same */ \
- /* event isn't being used with > 1 name */ \
- if (MC_(event_ctr_name)[ev]) \
- tl_assert(name == MC_(event_ctr_name)[ev]); \
- MC_(event_ctr)[ev]++; \
- MC_(event_ctr_name)[ev] = (name); \
+/* Order of enumerators does not matter. But MCPE_LAST has to be the
+ last entry in the list as it is used as an array bound. */
+enum {
+ MCPE_LOADV8,
+ MCPE_LOADV8_SLOW1,
+ MCPE_LOADV8_SLOW2,
+ MCPE_LOADV16,
+ MCPE_LOADV16_SLOW1,
+ MCPE_LOADV16_SLOW2,
+ MCPE_LOADV32,
+ MCPE_LOADV32_SLOW1,
+ MCPE_LOADV32_SLOW2,
+ MCPE_LOADV64,
+ MCPE_LOADV64_SLOW1,
+ MCPE_LOADV64_SLOW2,
+ MCPE_LOADV_128_OR_256,
+ MCPE_LOADV_128_OR_256_SLOW_LOOP,
+ MCPE_LOADV_128_OR_256_SLOW1,
+ MCPE_LOADV_128_OR_256_SLOW2,
+ MCPE_LOADVN_SLOW,
+ MCPE_LOADVN_SLOW_LOOP,
+ MCPE_STOREV8,
+ MCPE_STOREV8_SLOW1,
+ MCPE_STOREV8_SLOW2,
+ MCPE_STOREV8_SLOW3,
+ MCPE_STOREV8_SLOW4,
+ MCPE_STOREV16,
+ MCPE_STOREV16_SLOW1,
+ MCPE_STOREV16_SLOW2,
+ MCPE_STOREV16_SLOW3,
+ MCPE_STOREV16_SLOW4,
+ MCPE_STOREV32,
+ MCPE_STOREV32_SLOW1,
+ MCPE_STOREV32_SLOW2,
+ MCPE_STOREV32_SLOW3,
+ MCPE_STOREV32_SLOW4,
+ MCPE_STOREV64,
+ MCPE_STOREV64_SLOW1,
+ MCPE_STOREV64_SLOW2,
+ MCPE_STOREV64_SLOW3,
+ MCPE_STOREV64_SLOW4,
+ MCPE_STOREVN_SLOW,
+ MCPE_STOREVN_SLOW_LOOP,
+ MCPE_MAKE_ALIGNED_WORD32_UNDEFINED,
+ MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW,
+ MCPE_MAKE_ALIGNED_WORD64_UNDEFINED,
+ MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW,
+ MCPE_MAKE_ALIGNED_WORD32_NOACCESS,
+ MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW,
+ MCPE_MAKE_ALIGNED_WORD64_NOACCESS,
+ MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW,
+ MCPE_MAKE_MEM_NOACCESS,
+ MCPE_MAKE_MEM_UNDEFINED,
+ MCPE_MAKE_MEM_UNDEFINED_W_OTAG,
+ MCPE_MAKE_MEM_DEFINED,
+ MCPE_CHEAP_SANITY_CHECK,
+ MCPE_EXPENSIVE_SANITY_CHECK,
+ MCPE_COPY_ADDRESS_RANGE_STATE,
+ MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1,
+ MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2,
+ MCPE_CHECK_MEM_IS_NOACCESS,
+ MCPE_CHECK_MEM_IS_NOACCESS_LOOP,
+ MCPE_IS_MEM_ADDRESSABLE,
+ MCPE_IS_MEM_ADDRESSABLE_LOOP,
+ MCPE_IS_MEM_DEFINED,
+ MCPE_IS_MEM_DEFINED_LOOP,
+ MCPE_IS_MEM_DEFINED_COMPREHENSIVE,
+ MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP,
+ MCPE_IS_DEFINED_ASCIIZ,
+ MCPE_IS_DEFINED_ASCIIZ_LOOP,
+ MCPE_FIND_CHUNK_FOR_OLD,
+ MCPE_FIND_CHUNK_FOR_OLD_LOOP,
+ MCPE_SET_ADDRESS_RANGE_PERMS,
+ MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP,
+ MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP,
+ MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK,
+ MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K,
+ MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM,
+ MCPE_NEW_MEM_STACK,
+ MCPE_NEW_MEM_STACK_4,
+ MCPE_NEW_MEM_STACK_8,
+ MCPE_NEW_MEM_STACK_12,
+ MCPE_NEW_MEM_STACK_16,
+ MCPE_NEW_MEM_STACK_32,
+ MCPE_NEW_MEM_STACK_112,
+ MCPE_NEW_MEM_STACK_128,
+ MCPE_NEW_MEM_STACK_144,
+ MCPE_NEW_MEM_STACK_160,
+ MCPE_DIE_MEM_STACK,
+ MCPE_DIE_MEM_STACK_4,
+ MCPE_DIE_MEM_STACK_8,
+ MCPE_DIE_MEM_STACK_12,
+ MCPE_DIE_MEM_STACK_16,
+ MCPE_DIE_MEM_STACK_32,
+ MCPE_DIE_MEM_STACK_112,
+ MCPE_DIE_MEM_STACK_128,
+ MCPE_DIE_MEM_STACK_144,
+ MCPE_DIE_MEM_STACK_160,
+ /* Do not add enumerators past this line. */
+ MCPE_LAST
+};
+
+extern UInt MC_(event_ctr)[MCPE_LAST];
+
+# define PROF_EVENT(ev) \
+ do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST); \
+ MC_(event_ctr)[ev]++; \
} while (False);
#else
-# define PROF_EVENT(ev, name) /* */
+# define PROF_EVENT(ev) /* */
#endif /* MC_PROFILE_MEMORY */
Modified: trunk/memcheck/mc_leakcheck.c
==============================================================================
--- trunk/memcheck/mc_leakcheck.c (original)
+++ trunk/memcheck/mc_leakcheck.c Tue Aug 4 16:58:41 2015
@@ -265,9 +265,9 @@
{
Int i;
Addr a_lo, a_hi;
- PROF_EVENT(70, "find_chunk_for_OLD");
+ PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
for (i = 0; i < n_chunks; i++) {
- PROF_EVENT(71, "find_chunk_for_OLD(loop)");
+ PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD_LOOP);
a_lo = chunks[i]->data;
a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
if (a_lo <= ptr && ptr < a_hi)
Modified: trunk/memcheck/mc_main.c
==============================================================================
--- trunk/memcheck/mc_main.c (original)
+++ trunk/memcheck/mc_main.c Tue Aug 4 16:58:41 2015
@@ -1,3 +1,4 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
/*--------------------------------------------------------------------*/
/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
@@ -1235,7 +1236,7 @@
ULong pessim64 = V_BITS64_DEFINED;
UWord long_index = byte_offset_w(szL, bigendian, j);
for (i = 8-1; i >= 0; i--) {
- PROF_EVENT(29, "mc_LOADV_128_or_256_slow(loop)");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW_LOOP);
ai = a + 8*long_index + byte_offset_w(8, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
@@ -1316,7 +1317,7 @@
this function may get called from hand written assembly. */
ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
{
- PROF_EVENT(30, "mc_LOADVn_slow");
+ PROF_EVENT(MCPE_LOADVN_SLOW);
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
@@ -1370,7 +1371,7 @@
info can be gleaned from pessim64) but is used as a
cross-check. */
for (i = szB-1; i >= 0; i--) {
- PROF_EVENT(31, "mc_LOADVn_slow(loop)");
+ PROF_EVENT(MCPE_LOADVN_SLOW_LOOP);
ai = a + byte_offset_w(szB, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
@@ -1468,7 +1469,7 @@
Addr ai;
Bool ok;
- PROF_EVENT(35, "mc_STOREVn_slow");
+ PROF_EVENT(MCPE_STOREVN_SLOW);
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
@@ -1530,7 +1531,7 @@
/* Dump vbytes in memory, iterating from least to most significant
byte. At the same time establish addressibility of the location. */
for (i = 0; i < szB; i++) {
- PROF_EVENT(36, "mc_STOREVn_slow(loop)");
+ PROF_EVENT(MCPE_STOREVN_SLOW_LOOP);
ai = a + byte_offset_w(szB, bigendian, i);
vbits8 = vbytes & 0xff;
ok = set_vbits8(ai, vbits8);
@@ -1559,7 +1560,7 @@
SecMap** sm_ptr;
SecMap* example_dsm;
- PROF_EVENT(150, "set_address_range_perms");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS);
/* Check the V+A bits make sense. */
tl_assert(VA_BITS16_NOACCESS == vabits16 ||
@@ -1637,19 +1638,19 @@
len_to_next_secmap = aNext - a;
if ( lenT <= len_to_next_secmap ) {
// Range entirely within one sec-map. Covers almost all cases.
- PROF_EVENT(151, "set_address_range_perms-single-secmap");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP);
lenA = lenT;
lenB = 0;
} else if (is_start_of_sm(a)) {
// Range spans at least one whole sec-map, and starts at the beginning
// of a sec-map; skip to Part 2.
- PROF_EVENT(152, "set_address_range_perms-startof-secmap");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP);
lenA = 0;
lenB = lenT;
goto part2;
} else {
// Range spans two or more sec-maps, first one is partial.
- PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS);
lenA = len_to_next_secmap;
lenB = lenT - lenA;
}
@@ -1666,11 +1667,11 @@
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so skip.
- PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK);
a = aNext;
lenA = 0;
} else {
- PROF_EVENT(155, "set_address_range_perms-dist-sm1");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1);
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
@@ -1680,7 +1681,7 @@
while (True) {
if (VG_IS_8_ALIGNED(a)) break;
if (lenA < 1) break;
- PROF_EVENT(156, "set_address_range_perms-loop1a");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1689,7 +1690,7 @@
// 8-aligned, 8 byte steps
while (True) {
if (lenA < 8) break;
- PROF_EVENT(157, "set_address_range_perms-loop8a");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
@@ -1698,7 +1699,7 @@
// 1 byte steps
while (True) {
if (lenA < 1) break;
- PROF_EVENT(158, "set_address_range_perms-loop1b");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1719,10 +1720,10 @@
while (True) {
if (lenB < SM_SIZE) break;
tl_assert(is_start_of_sm(a));
- PROF_EVENT(159, "set_address_range_perms-loop64K");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K);
sm_ptr = get_secmap_ptr(a);
if (!is_distinguished_sm(*sm_ptr)) {
- PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM);
// Free the non-distinguished sec-map that we're replacing. This
// case happens moderately often, enough to be worthwhile.
SysRes sres = VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
@@ -1750,10 +1751,10 @@
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so stop.
- PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK);
return;
} else {
- PROF_EVENT(162, "set_address_range_perms-dist-sm2");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2);
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
@@ -1762,7 +1763,7 @@
// 8-aligned, 8 byte steps
while (True) {
if (lenB < 8) break;
- PROF_EVENT(163, "set_address_range_perms-loop8b");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
@@ -1771,7 +1772,7 @@
// 1 byte steps
while (True) {
if (lenB < 1) return;
- PROF_EVENT(164, "set_address_range_perms-loop1c");
+ PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C);
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
@@ -1784,7 +1785,7 @@
void MC_(make_mem_noaccess) ( Addr a, SizeT len )
{
- PROF_EVENT(40, "MC_(make_mem_noaccess)");
+ PROF_EVENT(MCPE_MAKE_MEM_NOACCESS);
DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1793,14 +1794,14 @@
static void make_mem_undefined ( Addr a, SizeT len )
{
- PROF_EVENT(41, "make_mem_undefined");
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED);
DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
}
void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
{
- PROF_EVENT(43, "MC_(make_mem_undefined)");
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED_W_OTAG);
DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1837,7 +1838,7 @@
void MC_(make_mem_defined) ( Addr a, SizeT len )
{
- PROF_EVENT(42, "MC_(make_mem_defined)");
+ PROF_EVENT(MCPE_MAKE_MEM_DEFINED);
DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
@@ -1897,7 +1898,7 @@
Bool aligned, nooverlap;
DEBUG("MC_(copy_address_range_state)\n");
- PROF_EVENT(50, "MC_(copy_address_range_state)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE);
if (len == 0 || src == dst)
return;
@@ -1947,7 +1948,7 @@
/* We have to do things the slow way */
if (src < dst) {
for (i = 0, j = len-1; i < len; i++, j--) {
- PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1);
vabits2 = get_vabits2( src+j );
set_vabits2( dst+j, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
@@ -1958,7 +1959,7 @@
if (src > dst) {
for (i = 0; i < len; i++) {
- PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
+ PROF_EVENT(MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2);
vabits2 = get_vabits2( src+i );
set_vabits2( dst+i, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
@@ -2606,7 +2607,7 @@
static INLINE void make_aligned_word32_undefined ( Addr a )
{
- PROF_EVENT(300, "make_aligned_word32_undefined");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_UNDEFINED);
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 4);
@@ -2616,7 +2617,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW);
make_mem_undefined(a, 4);
return;
}
@@ -2649,7 +2650,7 @@
static INLINE
void make_aligned_word32_noaccess ( Addr a )
{
- PROF_EVENT(310, "make_aligned_word32_noaccess");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_NOACCESS);
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 4);
@@ -2659,7 +2660,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW);
MC_(make_mem_noaccess)(a, 4);
return;
}
@@ -2690,7 +2691,7 @@
static INLINE void make_aligned_word64_undefined ( Addr a )
{
- PROF_EVENT(320, "make_aligned_word64_undefined");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_UNDEFINED);
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 8);
@@ -2700,7 +2701,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW);
make_mem_undefined(a, 8);
return;
}
@@ -2734,7 +2735,7 @@
static INLINE
void make_aligned_word64_noaccess ( Addr a )
{
- PROF_EVENT(330, "make_aligned_word64_noaccess");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_NOACCESS);
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 8);
@@ -2744,7 +2745,7 @@
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW);
MC_(make_mem_noaccess)(a, 8);
return;
}
@@ -2786,7 +2787,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(110, "new_mem_stack_4");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
} else {
@@ -2797,7 +2798,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
{
- PROF_EVENT(110, "new_mem_stack_4");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else {
@@ -2808,7 +2809,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
{
- PROF_EVENT(120, "die_mem_stack_4");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_4);
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
@@ -2822,7 +2823,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(111, "new_mem_stack_8");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2836,7 +2837,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
{
- PROF_EVENT(111, "new_mem_stack_8");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2850,7 +2851,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
{
- PROF_EVENT(121, "die_mem_stack_8");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_8);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
} else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
@@ -2867,7 +2868,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_12_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(112, "new_mem_stack_12");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -2885,7 +2886,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
{
- PROF_EVENT(112, "new_mem_stack_12");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -2903,7 +2904,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
{
- PROF_EVENT(122, "die_mem_stack_12");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_12);
/* Note the -12 in the test */
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
/* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
@@ -2927,7 +2928,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_16_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(113, "new_mem_stack_16");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
@@ -2946,7 +2947,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
{
- PROF_EVENT(113, "new_mem_stack_16");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
@@ -2965,7 +2966,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
{
- PROF_EVENT(123, "die_mem_stack_16");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_16);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
@@ -2986,7 +2987,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_32_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(114, "new_mem_stack_32");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
@@ -3009,7 +3010,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
{
- PROF_EVENT(114, "new_mem_stack_32");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
@@ -3032,7 +3033,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
{
- PROF_EVENT(124, "die_mem_stack_32");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_32);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
/* Straightforward */
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
@@ -3058,7 +3059,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_112_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(115, "new_mem_stack_112");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
@@ -3082,7 +3083,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
{
- PROF_EVENT(115, "new_mem_stack_112");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3106,7 +3107,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
{
- PROF_EVENT(125, "die_mem_stack_112");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_112);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
@@ -3133,7 +3134,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_128_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(116, "new_mem_stack_128");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
@@ -3159,7 +3160,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
{
- PROF_EVENT(116, "new_mem_stack_128");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3185,7 +3186,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
{
- PROF_EVENT(126, "die_mem_stack_128");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_128);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
@@ -3214,7 +3215,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_144_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(117, "new_mem_stack_144");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -3242,7 +3243,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
{
- PROF_EVENT(117, "new_mem_stack_144");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3270,7 +3271,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
{
- PROF_EVENT(127, "die_mem_stack_144");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_144);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
@@ -3301,7 +3302,7 @@
static void VG_REGPARM(2) mc_new_mem_stack_160_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(118, "new_mem_stack_160");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
@@ -3331,7 +3332,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
{
- PROF_EVENT(118, "new_mem_stack_160");
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
@@ -3361,7 +3362,7 @@
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
{
- PROF_EVENT(128, "die_mem_stack_160");
+ PROF_EVENT(MCPE_DIE_MEM_STACK_160);
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
@@ -3393,19 +3394,19 @@
static void mc_new_mem_stack_w_ECU ( Addr a, SizeT len, UInt ecu )
{
UInt otag = ecu | MC_OKIND_STACK;
- PROF_EVENT(115, "new_mem_stack_w_otag");
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + a, len, otag );
}
static void mc_new_mem_stack ( Addr a, SizeT len )
{
- PROF_EVENT(115, "new_mem_stack");
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
make_mem_undefined ( -VG_STACK_REDZONE_SZB + a, len );
}
static void mc_die_mem_stack ( Addr a, SizeT len )
{
- PROF_EVENT(125, "die_mem_stack");
+ PROF_EVENT(MCPE_DIE_MEM_STACK);
MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
}
@@ -3759,9 +3760,9 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(60, "check_mem_is_noaccess");
+ PROF_EVENT(MCPE_CHECK_MEM_IS_NOACCESS);
for (i = 0; i < len; i++) {
- PROF_EVENT(61, "check_mem_is_noaccess(loop)");
+ PROF_EVENT(MCPE_CHECK_MEM_IS_NOACCESS_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS != vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -3778,9 +3779,9 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(62, "is_mem_addressable");
+ PROF_EVENT(MCPE_IS_MEM_ADDRESSABLE);
for (i = 0; i < len; i++) {
- PROF_EVENT(63, "is_mem_addressable(loop)");
+ PROF_EVENT(MCPE_IS_MEM_ADDRESSABLE_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS == vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -3798,13 +3799,13 @@
SizeT i;
UWord vabits2;
- PROF_EVENT(64, "is_mem_defined");
+ PROF_EVENT(MCPE_IS_MEM_DEFINED);
DEBUG("is_mem_defined\n");
if (otag) *otag = 0;
if (bad_addr) *bad_addr = 0;
for (i = 0; i < len; i++) {
- PROF_EVENT(65, "is_mem_defined(loop)");
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
@@ -3851,13 +3852,13 @@
UWord vabits2;
Bool already_saw_errV = False;
- PROF_EVENT(64, "is_mem_defined"); // fixme
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_COMPREHENSIVE);
DEBUG("is_mem_defined_comprehensive\n");
tl_assert(!(*errorV || *errorA));
for (i = 0; i < len; i++) {
- PROF_EVENT(65, "is_mem_defined(loop)"); // fixme
+ PROF_EVENT(MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP);
vabits2 = get_vabits2(a);
switch (vabits2) {
case VA_BITS2_DEFINED:
@@ -3896,13 +3897,13 @@
{
UWord vabits2;
- PROF_EVENT(66, "mc_is_defined_asciiz");
+ PROF_EVENT(MCPE_IS_DEFINED_ASCIIZ);
DEBUG("mc_is_defined_asciiz\n");
if (otag) *otag = 0;
if (bad_addr) *bad_addr = 0;
while (True) {
- PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
+ PROF_EVENT(MCPE_IS_DEFINED_ASCIIZ_LOOP);
vabits2 = get_vabits2(a);
if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
@@ -4436,7 +4437,7 @@
void mc_LOADV_128_or_256 ( /*OUT*/ULong* res,
Addr a, SizeT nBits, Bool isBigEndian )
{
- PROF_EVENT(200, "mc_LOADV_128_or_256");
+ PROF_EVENT(MCPE_LOADV_128_OR_256);
#ifndef PERF_FAST_LOADV
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
@@ -4449,7 +4450,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,nBits) )) {
- PROF_EVENT(201, "mc_LOADV_128_or_256-slow1");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW1);
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
return;
}
@@ -4470,7 +4471,7 @@
} else {
/* Slow case: some block of 8 bytes are not all-defined or
all-undefined. */
- PROF_EVENT(202, "mc_LOADV_128_or_256-slow2");
+ PROF_EVENT(MCPE_LOADV_128_OR_256_SLOW2);
mc_LOADV_128_or_256_slow( res, a, nBits, isBigEndian );
return;
}
@@ -4505,7 +4506,7 @@
static INLINE
ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(200, "mc_LOADV64");
+ PROF_EVENT(MCPE_LOADV64);
#ifndef PERF_FAST_LOADV
return mc_LOADVn_slow( a, 64, isBigEndian );
@@ -4515,7 +4516,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
- PROF_EVENT(201, "mc_LOADV64-slow1");
+ PROF_EVENT(MCPE_LOADV64_SLOW1);
return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
}
@@ -4532,7 +4533,7 @@
return V_BITS64_UNDEFINED;
} else {
/* Slow case: the 8 bytes are not all-defined or all-undefined. */
- PROF_EVENT(202, "mc_LOADV64-slow2");
+ PROF_EVENT(MCPE_LOADV64_SLOW2);
return mc_LOADVn_slow( a, 64, isBigEndian );
}
}
@@ -4636,7 +4637,7 @@
static INLINE
void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
{
- PROF_EVENT(210, "mc_STOREV64");
+ PROF_EVENT(MCPE_STOREV64);
#ifndef PERF_FAST_STOREV
// XXX: this slow case seems to be marginally faster than the fast case!
@@ -4648,7 +4649,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
- PROF_EVENT(211, "mc_STOREV64-slow1");
+ PROF_EVENT(MCPE_STOREV64_SLOW1);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
@@ -4667,7 +4668,7 @@
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV64-slow2");
+ PROF_EVENT(MCPE_STOREV64_SLOW2);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
@@ -4679,12 +4680,12 @@
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV64-slow3");
+ PROF_EVENT(MCPE_STOREV64_SLOW3);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
}
- PROF_EVENT(212, "mc_STOREV64-slow4");
+ PROF_EVENT(MCPE_STOREV64_SLOW4);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
}
#endif
@@ -4706,7 +4707,7 @@
static INLINE
UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(220, "mc_LOADV32");
+ PROF_EVENT(MCPE_LOADV32);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
@@ -4716,7 +4717,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
- PROF_EVENT(221, "mc_LOADV32-slow1");
+ PROF_EVENT(MCPE_LOADV32_SLOW1);
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
@@ -4735,7 +4736,7 @@
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
} else {
/* Slow case: the 4 bytes are not all-defined or all-undefined. */
- PROF_EVENT(222, "mc_LOADV32-slow2");
+ PROF_EVENT(MCPE_LOADV32_SLOW2);
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
}
@@ -4832,7 +4833,7 @@
static INLINE
void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
{
- PROF_EVENT(230, "mc_STOREV32");
+ PROF_EVENT(MCPE_STOREV32);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
@@ -4842,7 +4843,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
- PROF_EVENT(231, "mc_STOREV32-slow1");
+ PROF_EVENT(MCPE_STOREV32_SLOW1);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
@@ -4861,7 +4862,7 @@
sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
return;
}
- PROF_EVENT(232, "mc_STOREV32-slow2");
+ PROF_EVENT(MCPE_STOREV32_SLOW2);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
@@ -4873,12 +4874,12 @@
sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
return;
}
- PROF_EVENT(233, "mc_STOREV32-slow3");
+ PROF_EVENT(MCPE_STOREV32_SLOW3);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
return;
}
- PROF_EVENT(234, "mc_STOREV32-slow4");
+ PROF_EVENT(MCPE_STOREV32_SLOW4);
mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
}
#endif
@@ -4900,7 +4901,7 @@
static INLINE
UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
{
- PROF_EVENT(240, "mc_LOADV16");
+ PROF_EVENT(MCPE_LOADV16);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
@@ -4910,7 +4911,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
- PROF_EVENT(241, "mc_LOADV16-slow1");
+ PROF_EVENT(MCPE_LOADV16_SLOW1);
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
@@ -4930,7 +4931,7 @@
else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
else {
/* Slow case: the two bytes are not all-defined or all-undefined. */
- PROF_EVENT(242, "mc_LOADV16-slow2");
+ PROF_EVENT(MCPE_LOADV16_SLOW2);
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
}
@@ -5070,7 +5071,7 @@
static INLINE
void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
{
- PROF_EVENT(250, "mc_STOREV16");
+ PROF_EVENT(MCPE_STOREV16);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
@@ -5080,7 +5081,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
- PROF_EVENT(251, "mc_STOREV16-slow1");
+ PROF_EVENT(MCPE_STOREV16_SLOW1);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
return;
}
@@ -5101,7 +5102,7 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(232, "mc_STOREV16-slow2");
+ PROF_EVENT(MCPE_STOREV16_SLOW2);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
}
if (V_BITS16_UNDEFINED == vbits16) {
@@ -5114,12 +5115,12 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(233, "mc_STOREV16-slow3");
+ PROF_EVENT(MCPE_STOREV16_SLOW3);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
return;
}
- PROF_EVENT(234, "mc_STOREV16-slow4");
+ PROF_EVENT(MCPE_STOREV16_SLOW4);
mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
}
#endif
@@ -5238,7 +5239,7 @@
VG_REGPARM(1)
UWord MC_(helperc_LOADV8) ( Addr a )
{
- PROF_EVENT(260, "mc_LOADV8");
+ PROF_EVENT(MCPE_LOADV8);
#ifndef PERF_FAST_LOADV
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
@@ -5248,7 +5249,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
- PROF_EVENT(261, "mc_LOADV8-slow1");
+ PROF_EVENT(MCPE_LOADV8_SLOW1);
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
@@ -5268,7 +5269,7 @@
else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
else {
/* Slow case: the byte is not all-defined or all-undefined. */
- PROF_EVENT(262, "mc_LOADV8-slow2");
+ PROF_EVENT(MCPE_LOADV8_SLOW2);
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
}
@@ -5284,7 +5285,7 @@
VG_REGPARM(2)
void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
{
- PROF_EVENT(270, "mc_STOREV8");
+ PROF_EVENT(MCPE_STOREV8);
#ifndef PERF_FAST_STOREV
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
@@ -5294,7 +5295,7 @@
SecMap* sm;
if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
- PROF_EVENT(271, "mc_STOREV8-slow1");
+ PROF_EVENT(MCPE_STOREV8_SLOW1);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
@@ -5350,7 +5351,7 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(232, "mc_STOREV8-slow2");
+ PROF_EVENT(MCPE_STOREV8_SLOW2);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
@@ -5366,13 +5367,13 @@
&(sm->vabits8[sm_off]) );
return;
}
- PROF_EVENT(233, "mc_STOREV8-slow3");
+ PROF_EVENT(MCPE_STOREV8_SLOW3);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
return;
}
// Partially defined word
- PROF_EVENT(234, "mc_STOREV8-slow4");
+ PROF_EVENT(MCPE_STOREV8_SLOW4);
mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
}
#endif
@@ -5583,7 +5584,7 @@
static Bool mc_cheap_sanity_check ( void )
{
n_sanity_cheap++;
- PROF_EVENT(490, "cheap_sanity_check");
+ PROF_EVENT(MCPE_CHEAP_SANITY_CHECK);
/* Check for sane operating level */
if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
return False;
@@ -5603,7 +5604,7 @@
if (0) return True;
n_sanity_expensive++;
- PROF_EVENT(491, "expensive_sanity_check");
+ PROF_EVENT(MCPE_EXPENSIVE_SANITY_CHECK);
/* Check for sane operating level */
if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
@@ -6672,33 +6673,159 @@
#ifdef MC_PROFILE_MEMORY
-UInt MC_(event_ctr)[N_PROF_EVENTS];
-HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
+UInt MC_(event_ctr)[MCPE_LAST];
+
+/* Event counter names. Use the name of the function that increases the
+ event counter. Drop any MC_() and mc_ prefices. */
+static const HChar* MC_(event_ctr_name)[MCPE_LAST] = {
+ [MCPE_LOADVN_SLOW] = "LOADVn_slow",
+ [MCPE_LOADVN_SLOW_LOOP] = "LOADVn_slow_loop",
+ [MCPE_STOREVN_SLOW] = "STOREVn_slow",
+ [MCPE_STOREVN_SLOW_LOOP] = "STOREVn_slow(loop)",
+ [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED] = "make_aligned_word32_undefined",
+ [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW] =
+ "make_aligned_word32_undefined_slow",
+ [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED] = "make_aligned_word64_undefined",
+ [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW] =
+ "make_aligned_word64_undefined_slow",
+ [MCPE_MAKE_ALIGNED_WORD32_NOACCESS] = "make_aligned_word32_noaccess",
+ [MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW] =
+ "make_aligned_word32_noaccess_slow",
+ [MCPE_MAKE_ALIGNED_WORD64_NOACCESS] = "make_aligned_word64_noaccess",
+ [MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW] =
+ "make_aligned_word64_noaccess_slow",
+ [MCPE_MAKE_MEM_NOACCESS] = "make_mem_noaccess",
+ [MCPE_MAKE_MEM_UNDEFINED] = "make_mem_undefined",
+ [MCPE_MAKE_MEM_UNDEFINED_W_OTAG] = "make_mem_undefined_w_otag",
+ [MCPE_MAKE_MEM_DEFINED] = "make_mem_defined",
+ [MCPE_CHEAP_SANITY_CHECK] = "cheap_sanity_check",
+ [MCPE_EXPENSIVE_SANITY_CHECK] = "expensive_sanity_check",
+ [MCPE_COPY_ADDRESS_RANGE_STATE] = "copy_address_range_state",
+ [MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1] = "copy_address_range_state(loop1)",
+ [MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2] = "copy_address_range_state(loop2)",
+ [MCPE_CHECK_MEM_IS_NOACCESS] = "check_mem_is_noaccess",
+ [MCPE_CHECK_MEM_IS_NOACCESS_LOOP] = "check_mem_is_noaccess(loop)",
+ [MCPE_IS_MEM_ADDRESSABLE] = "is_mem_addressable",
+ [MCPE_IS_MEM_ADDRESSABLE_LOOP] = "is_mem_addressable(loop)",
+ [MCPE_IS_MEM_DEFINED] = "is_mem_defined",
+ [MCPE_IS_MEM_DEFINED_LOOP] = "is_mem_defined(loop)",
+ [MCPE_IS_MEM_DEFINED_COMPREHENSIVE] = "is_mem_defined_comprehensive",
+ [MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP] =
+ "is_mem_defined_comprehensive(loop)",
+ [MCPE_IS_DEFINED_ASCIIZ] = "is_defined_asciiz",
+ [MCPE_IS_DEFINED_ASCIIZ_LOOP] = "is_defined_asciiz(loop)",
+ [MCPE_FIND_CHUNK_FOR_OLD] = "find_chunk_for_OLD",
+ [MCPE_FIND_CHUNK_FOR_OLD_LOOP] = "find_chunk_for_OLD(loop)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS] = "set_address_range_perms",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP] =
+ "set_address_range_perms(single-secmap)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP] =
+ "set_address_range_perms(startof-secmap)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS] =
+ "set_address_range_perms(multiple-secmaps)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1] =
+ "set_address_range_perms(dist-sm1)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2] =
+ "set_address_range_perms(dist-sm2)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK] =
+ "set_address_range_perms(dist-sm1-quick)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK] =
+ "set_address_range_perms(dist-sm2-quick)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A] = "set_address_range_perms(loop1a)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B] = "set_address_range_perms(loop1b)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C] = "set_address_range_perms(loop1c)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A] = "set_address_range_perms(loop8a)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B] = "set_address_range_perms(loop8b)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K] = "set_address_range_perms(loop64K)",
+ [MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM] =
+ "set_address_range_perms(loop64K-free-dist-sm)",
+ [MCPE_LOADV_128_OR_256_SLOW_LOOP] = "LOADV_128_or_256_slow(loop)",
+ [MCPE_LOADV_128_OR_256] = "LOADV_128_or_256",
+ [MCPE_LOADV_128_OR_256_SLOW1] = "LOADV_128_or_256-slow1",
+ [MCPE_LOADV_128_OR_256_SLOW2] = "LOADV_128_or_256-slow2",
+ [MCPE_LOADV64] = "LOADV64",
+ [MCPE_LOADV64_SLOW1] = "LOADV64-slow1",
+ [MCPE_LOADV64_SLOW2] = "LOADV64-slow2",
+ [MCPE_STOREV64] = "STOREV64",
+ [MCPE_STOREV64_SLOW1] = "STOREV64-slow1",
+ [MCPE_STOREV64_SLOW2] = "STOREV64-slow2",
+ [MCPE_STOREV64_SLOW3] = "STOREV64-slow3",
+ [MCPE_STOREV64_SLOW4] = "STOREV64-slow4",
+ [MCPE_LOADV32] = "LOADV32",
+ [MCPE_LOADV32_SLOW1] = "LOADV32-slow1",
+ [MCPE_LOADV32_SLOW2] = "LOADV32-slow2",
+ [MCPE_STOREV32] = "STOREV32",
+ [MCPE_STOREV32_SLOW1] = "STOREV32-slow1",
+ [MCPE_STOREV32_SLOW2] = "STOREV32-slow2",
+ [MCPE_STOREV32_SLOW3] = "STOREV32-slow3",
+ [MCPE_STOREV32_SLOW4] = "STOREV32-slow4",
+ [MCPE_LOADV16] = "LOADV16",
+ [MCPE_LOADV16_SLOW1] = "LOADV16-slow1",
+ [MCPE_LOADV16_SLOW2] = "LOADV16-slow2",
+ [MCPE_STOREV16] = "STOREV16",
+ [MCPE_STOREV16_SLOW1] = "STOREV16-slow1",
+ [MCPE_STOREV16_SLOW2] = "STOREV16-slow2",
+ [MCPE_STOREV16_SLOW3] = "STOREV16-slow3",
+ [MCPE_STOREV16_SLOW4] = "STOREV16-slow4",
+ [MCPE_LOADV8] = "LOADV8",
+ [MCPE_LOADV8_SLOW1] = "LOADV8-slow1",
+ [MCPE_LOADV8_SLOW2] = "LOADV8-slow2",
+ [MCPE_STOREV8] = "STOREV8",
+ [MCPE_STOREV8_SLOW1] = "STOREV8-slow1",
+ [MCPE_STOREV8_SLOW2] = "STOREV8-slow2",
+ [MCPE_STOREV8_SLOW3] = "STOREV8-slow3",
+ [MCPE_STOREV8_SLOW4] = "STOREV8-slow4",
+ [MCPE_NEW_MEM_STACK_4] = "new_mem_stack_4",
+ [MCPE_NEW_MEM_STACK_8] = "new_mem_stack_8",
+ [MCPE_NEW_MEM_STACK_12] = "new_mem_stack_12",
+ [MCPE_NEW_MEM_STACK_16] = "new_mem_stack_16",
+ [MCPE_NEW_MEM_STACK_32] = "new_mem_stack_32",
+ [MCPE_NEW_MEM_STACK_112] = "new_mem_stack_112",
+ [MCPE_NEW_MEM_STACK_128] = "new_mem_stack_128",
+ [MCPE_NEW_MEM_STACK_144] = "new_mem_stack_144",
+ [MCPE_NEW_MEM_STACK_160] = "new_mem_stack_160",
+ [MCPE_DIE_MEM_STACK_4] = "die_mem_stack_4",
+ [MCPE_DIE_MEM_STACK_8] = "die_mem_stack_8",
+ [MCPE_DIE_MEM_STACK_12] = "die_mem_stack_12",
+ [MCPE_DIE_MEM_STACK_16] = "die_mem_stack_16",
+ [MCPE_DIE_MEM_STACK_32] = "die_mem_stack_32",
+ [MCPE_DIE_MEM_STACK_112] = "die_mem_stack_112",
+ [MCPE_DIE_MEM_STACK_128] = "die_mem_stack_128",
+ [MCPE_DIE_MEM_STACK_144] = "die_mem_stack_144",
+ [MCPE_DIE_MEM_STACK_160] = "die_mem_stack_160",
+ [MCPE_NEW_MEM_STACK] = "new_mem_stack",
+ [MCPE_DIE_MEM_STACK] = "die_mem_stack",
+};
static void init_prof_mem ( void )
{
- Int i;
- for (i = 0; i < N_PROF_EVENTS; i++) {
+ Int i, name_count = 0;
+
+ for (i = 0; i < MCPE_LAST; i++) {
MC_(event_ctr)[i] = 0;
- MC_(event_ctr_name)[i] = NULL;
+ if (MC_(event_ctr_name)[i] != NULL)
+ ++name_count;
}
+
+ /* Make sure every profiling event has a name */
+ tl_assert(name_count == MCPE_LAST);
}
static void done_prof_mem ( void )
{
- Int i;
+ Int i, n;
Bool spaced = False;
- for (i = 0; i < N_PROF_EVENTS; i++) {
- if (!spaced && (i % 10) == 0) {
+ for (i = n = 0; i < MCPE_LAST; i++) {
+ if (!spaced && (n % 10) == 0) {
VG_(printf)("\n");
spaced = True;
}
if (MC_(event_ctr)[i] > 0) {
spaced = False;
- VG_(printf)( "prof mem event %3d: %9d %s\n",
+ ++n;
+ VG_(printf)( "prof mem event %3d: %9u %s\n",
i, MC_(event_ctr)[i],
- MC_(event_ctr_name)[i]
- ? MC_(event_ctr_name)[i] : "unnamed");
+ MC_(event_ctr_name)[i]);
}
}
}
|
|
From: Florian K. <fl...@ei...> - 2015-08-04 07:08:03
|
On 04.08.2015 03:37, Zhi-Gang Liu wrote:> Sorry, it failed for the
Makefile consistency check. I will fix it.
Yes, that has been fixed. However, I also said:
> On Mon, Aug 3, 2015 at 5:33 AM, Florian Krohm <fl...@ei...>
wrote:
>
>>
>> Also make sure that "make dist BUILD_ALL_DOCS=no" runs successfully
to completion.
This is broken:
$ make dist BUILD_ALL_DOCS=no
make dist-bzip2 dist-gzip am__post_remove_distdir='@:'
make[1]: Entering directory `/home/florian/valgrind/orig'
....
(cd mips64 && make top_distdir=../../../valgrind-3.11.0.SVN
distdir=../../../valgrind-3.11.0.SVN/none/tests/mips64 \
am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=:
distdir)
make[4]: Entering directory `/home/florian/valgrind/orig/none/tests/mips64'
make[4]: Leaving directory `/home/florian/valgrind/orig/none/tests/mips64'
(cd tilegx && make top_distdir=../../../valgrind-3.11.0.SVN
distdir=../../../valgrind-3.11.0.SVN/none/tests/tilegx \
am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=:
distdir)
make[4]: Entering directory `/home/florian/valgrind/orig/none/tests/tilegx'
make[4]: *** No rule to make target `insn_test_add_X0.c', needed by
`distdir'. Stop.
make[4]: Leaving directory `/home/florian/valgrind/orig/none/tests/tilegx'
make[3]: *** [distdir] Error 1
make[3]: Leaving directory `/home/florian/valgrind/orig/none/tests'
make[2]: *** [distdir] Error 1
make[2]: Leaving directory `/home/florian/valgrind/orig/none'
make[1]: *** [distdir] Error 1
make[1]: Leaving directory `/home/florian/valgrind/orig'
make: *** [dist] Error 2
|
|
From: Matthias S. <zz...@ge...> - 2015-08-04 06:35:55
|
Am 10.07.2015 um 11:56 schrieb Julian Seward: > > Greetings. > Hi! > It'll soon be time for another X.Y.0 release. Personally, I'd prefer > to call it 4.0.0 rather than 3.11.0, in keeping with recent > rationalisation of the Linux kernel and GCC numbering schemes. > > I'd like to propose the following: > > 7 August 2015: feature freeze > 1 September 2015: release > > This puts it at roughly a year since 3.10, and also ties in > conveniently with the tentative Fedora 23 schedule. > > On the whole I think the tree is in a pretty good shape. Items I'd > like to see completed: > > Make sure that gcc 5.1 and glibc 2.22 support is solid > Fix as many bugs in docs/internals/3_10_BUGSTATUS.txt as possible > Initial support for MacOSX 10.11 > Finish reviewing and merge the Solaris port, if feasible > > Comments on the timing? Other stuff people want get in? My wishlist is: Bugfixes: * Fix missing compiler options: [PATCH 1/2] Fix compilation of libvex tests when additional compiler options are needed. * Fix profiling numbers that trigger assertion fails: [PATCH 2/2] memcheck: Fix prof_event numbers * Fix signedness of profile prints: Re: [Valgrind-developers] [PATCH 2/2] memcheck: Fix prof_event numbers Features: * Write callstack of fatal signal to xml output: https://bugs.kde.org/show_bug.cgi?id=191069 * Not implemented yet: Let exit from replaced functions not call exit, but either a client request or raise(SIGXXXX). What about this? * Print more callstacks for mempool errors: https://bugs.kde.org/show_bug.cgi?id=322256 Regards Matthias |
|
From: <sv...@va...> - 2015-08-04 04:59:49
|
Author: zliu
Date: Tue Aug 4 05:59:41 2015
New Revision: 15481
Log:
Fix the "make regtest" error caused by the new tilegx instruction tests
Modified:
trunk/none/tests/tilegx/Makefile.am
Modified: trunk/none/tests/tilegx/Makefile.am
==============================================================================
--- trunk/none/tests/tilegx/Makefile.am (original)
+++ trunk/none/tests/tilegx/Makefile.am Tue Aug 4 05:59:41 2015
@@ -4,7 +4,10 @@
dist_noinst_SCRIPTS = \
filter_stderr
-EXTRA_DIST = \
+EXTRA_DIST =
+
+if VGCONF_ARCHS_INCLUDE_TILEGX
+EXTRA_DIST += \
insn_test_move_X0.stdout.exp insn_test_move_X0.stderr.exp \
insn_test_move_X0.vgtest \
insn_test_move_X1.stdout.exp insn_test_move_X1.stderr.exp \
@@ -1058,10 +1061,14 @@
insn_test_xori_X0.vgtest \
insn_test_xori_X1.stdout.exp insn_test_xori_X1.stderr.exp \
insn_test_xori_X1.vgtest
+endif
bin_PROGRAMS = gen_insn_test
-insn_tests = \
+insn_tests =
+
+if VGCONF_ARCHS_INCLUDE_TILEGX
+insn_tests += \
insn_test_move_X0 \
insn_test_move_X1 \
insn_test_move_Y0 \
@@ -1577,6 +1584,7 @@
insn_test_xor_Y1 \
insn_test_xori_X0 \
insn_test_xori_X1
+endif
check_PROGRAMS = \
allexec \
|
|
From: <sv...@va...> - 2015-08-03 21:21:49
|
Author: florian
Date: Mon Aug 3 22:21:42 2015
New Revision: 15480
Log:
Fix printf format inconsistencies as pointed out by gcc -Wformat-signedness.
Modified:
trunk/helgrind/hg_basics.c
trunk/helgrind/hg_basics.h
trunk/helgrind/hg_errors.c
trunk/helgrind/hg_main.c
trunk/helgrind/libhb_core.c
Modified: trunk/helgrind/hg_basics.c
==============================================================================
--- trunk/helgrind/hg_basics.c (original)
+++ trunk/helgrind/hg_basics.c Mon Aug 3 22:21:42 2015
@@ -77,7 +77,7 @@
UWord HG_(clo_conflict_cache_size) = 2000000;
-Word HG_(clo_sanity_flags) = 0;
+UWord HG_(clo_sanity_flags) = 0;
Bool HG_(clo_free_is_write) = False;
Modified: trunk/helgrind/hg_basics.h
==============================================================================
--- trunk/helgrind/hg_basics.h (original)
+++ trunk/helgrind/hg_basics.h Mon Aug 3 22:21:42 2015
@@ -101,7 +101,7 @@
/* Sanity check level. This is an or-ing of
SCE_{THREADS,LOCKS,BIGRANGE,ACCESS,LAOG}. */
-extern Word HG_(clo_sanity_flags);
+extern UWord HG_(clo_sanity_flags);
/* Treat heap frees as if the memory was written immediately prior to
the free. This shakes out races in which memory is referenced by
Modified: trunk/helgrind/hg_errors.c
==============================================================================
--- trunk/helgrind/hg_errors.c (original)
+++ trunk/helgrind/hg_errors.c Mon Aug 3 22:21:42 2015
@@ -1296,7 +1296,7 @@
if (threadp->coretid == VG_INVALID_THREADID)
VG_(printf)(" tid (exited)\n");
else
- VG_(printf)(" tid %d\n", threadp->coretid);
+ VG_(printf)(" tid %u\n", threadp->coretid);
{
Lock** locksHeldW_P;
locksHeldW_P = enumerate_WordSet_into_LockP_vector(
Modified: trunk/helgrind/hg_main.c
==============================================================================
--- trunk/helgrind/hg_main.c (original)
+++ trunk/helgrind/hg_main.c Mon Aug 3 22:21:42 2015
@@ -520,7 +520,7 @@
if (thr->coretid == VG_INVALID_THREADID)
VG_(printf)("tid (exited) ");
else
- VG_(printf)("tid %d ", thr->coretid);
+ VG_(printf)("tid %u ", thr->coretid);
}
}
@@ -1037,7 +1037,7 @@
static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
{
if (0 && len > 500)
- VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
+ VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
// has no effect (NoFX)
libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
}
@@ -1045,7 +1045,7 @@
static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
{
if (0 && len > 500)
- VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
+ VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
// Actually Has An Effect (AHAE)
libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
}
@@ -1053,7 +1053,7 @@
static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
{
if (0 && len > 500)
- VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
+ VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
libhb_srange_untrack( thr->hbthr, aIN, len );
}
@@ -5021,7 +5021,7 @@
/* --- --- User-visible client requests --- --- */
case VG_USERREQ__HG_CLEAN_MEMORY:
- if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
+ if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
args[1], args[2]);
/* Call die_mem to (expensively) tidy up properly, if there
are any held locks etc in the area. Calling evh__die_mem
@@ -5053,7 +5053,7 @@
}
case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
- if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
+ if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
args[1], args[2]);
if (args[2] > 0) { /* length */
evh__untrack_mem(args[1], args[2]);
@@ -5061,7 +5061,7 @@
break;
case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
- if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
+ if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
args[1], args[2]);
if (args[2] > 0) { /* length */
evh__new_mem(args[1], args[2]);
@@ -5069,7 +5069,7 @@
break;
case _VG_USERREQ__HG_GET_ABITS:
- if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%ld)\n",
+ if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
args[1], args[2], args[3]);
UChar *zzabit = (UChar *) args[2];
if (zzabit == NULL
Modified: trunk/helgrind/libhb_core.c
==============================================================================
--- trunk/helgrind/libhb_core.c (original)
+++ trunk/helgrind/libhb_core.c Mon Aug 3 22:21:42 2015
@@ -1503,7 +1503,7 @@
sequentialise_CacheLine( csvals, &csvalsUsed,
N_LINE_ARANGE, cl );
tl_assert(csvalsUsed >= 1 && csvalsUsed <= N_LINE_ARANGE);
- if (0) VG_(printf)("%lu ", csvalsUsed);
+ if (0) VG_(printf)("%ld ", csvalsUsed);
lineZ->dict[0] = lineZ->dict[1]
= lineZ->dict[2] = lineZ->dict[3] = SVal_INVALID;
@@ -2625,7 +2625,7 @@
n = vts->usedTS;
for (i = 0; i < n; i++) {
const ScalarTS *st = &vts->ts[i];
- VG_(printf)(i < n-1 ? "%u:%llu " : "%u:%llu", st->thrid, (ULong)st->tym);
+ VG_(printf)(i < n-1 ? "%d:%llu " : "%d:%llu", st->thrid, (ULong)st->tym);
}
VG_(printf)("]");
}
@@ -5050,7 +5050,7 @@
(XACmpFn_t)cmp__ULong_n_EC__by_ULong
);
if (0) VG_(printf)("record_race_info %u %u %u confThr %p "
- "confTym %llu found %d (%lu,%lu)\n",
+ "confTym %llu found %d (%ld,%ld)\n",
Cfailed, Kfailed, Cw,
confThr, confTym, found, firstIx, lastIx);
/* We can't indefinitely collect stack traces at VTS
@@ -5779,7 +5779,7 @@
stats__cache_make_New_arange += (ULong)len;
if (0 && len > 500)
- VG_(printf)("make New ( %#lx, %ld )\n", a, len );
+ VG_(printf)("make New ( %#lx, %lu )\n", a, len );
if (0) {
static UWord n_New_in_cache = 0;
@@ -6445,8 +6445,8 @@
live++;
hgthread = hgthread->admin;
}
- VG_(printf)(" libhb: threads live: %d exit_and_joinedwith %d"
- " exit %d joinedwith %d\n",
+ VG_(printf)(" libhb: threads live: %u exit_and_joinedwith %u"
+ " exit %u joinedwith %u\n",
live, llexit_and_joinedwith_done,
llexit_done, joinedwith_done);
VG_(printf)(" libhb: %d verydead_threads, "
@@ -6505,7 +6505,7 @@
/ (Double)(non0chain ? non0chain : 1));
for (i = 0; i <= MAXCHAIN; i++) {
if (chains[i] != 0)
- VG_(printf)( "[%d%s]=%d ",
+ VG_(printf)( "[%u%s]=%u ",
i, i == MAXCHAIN ? "+" : "",
chains[i]);
}
@@ -6860,9 +6860,9 @@
PlenCONSUME(APC, ARE, 1, APClen);
if (0)
- VG_(printf) ("addr %p[%ld] ARE %p"
- " BPC %p[%ld] BFC %p[%ld] FSM %p[%ld]"
- " AFC %p[%ld] APC %p[%ld]\n",
+ VG_(printf) ("addr %p[%lu] ARE %p"
+ " BPC %p[%lu] BFC %p[%lu] FSM %p[%lu]"
+ " AFC %p[%lu] APC %p[%lu]\n",
(void*)addr, len, (void*)ARE,
(void*)BPC, BPClen, (void*)BFC, BFClen, (void*)FSM, FSMlen,
(void*)AFC, AFClen, (void*)APC, APClen);
|
|
From: <sv...@va...> - 2015-08-03 21:05:32
|
Author: florian
Date: Mon Aug 3 22:05:20 2015
New Revision: 15479
Log:
Fix printf format inconsistencies as pointed out by gcc -Wformat-signedness.
Modified:
trunk/exp-bbv/bbv_main.c
trunk/exp-dhat/dh_main.c
trunk/exp-sgcheck/pc_common.c
trunk/exp-sgcheck/sg_main.c
trunk/massif/ms_main.c
Modified: trunk/exp-bbv/bbv_main.c
==============================================================================
--- trunk/exp-bbv/bbv_main.c (original)
+++ trunk/exp-bbv/bbv_main.c Mon Aug 3 22:05:20 2015
@@ -116,8 +116,8 @@
/* and function name for each basic block */
VG_(OSetGen_ResetIter)(instr_info_table);
while ( (bb_elem = VG_(OSetGen_Next)(instr_info_table)) ) {
- VG_(fprintf)( fp, "F:%d:%x:%s\n", bb_elem->block_num,
- (Int)bb_elem->BB_addr, bb_elem->fn_name);
+ VG_(fprintf)( fp, "F:%d:%lx:%s\n", bb_elem->block_num,
+ bb_elem->BB_addr, bb_elem->fn_name);
}
VG_(fclose)(fp);
@@ -565,10 +565,10 @@
VG_(sprintf)(buf,"\n\n"
"# Thread %d\n"
"# Total intervals: %d (Interval Size %d)\n"
- "# Total instructions: %lld\n"
- "# Total reps: %lld\n"
- "# Unique reps: %lld\n"
- "# Total fldcw instructions: %lld\n\n",
+ "# Total instructions: %llu\n"
+ "# Total reps: %llu\n"
+ "# Unique reps: %llu\n"
+ "# Total fldcw instructions: %llu\n\n",
i,
(Int)(bbv_thread[i].total_instr/(ULong)interval_size),
interval_size,
Modified: trunk/exp-dhat/dh_main.c
==============================================================================
--- trunk/exp-dhat/dh_main.c (original)
+++ trunk/exp-dhat/dh_main.c Mon Aug 3 22:05:20 2015
@@ -494,7 +494,7 @@
intro_Block(bk);
- if (0) VG_(printf)("ALLOC %ld -> %p\n", req_szB, p);
+ if (0) VG_(printf)("ALLOC %lu -> %p\n", req_szB, p);
return p;
}
@@ -537,7 +537,7 @@
static
void* renew_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
{
- if (0) VG_(printf)("REALL %p %ld\n", p_old, new_req_szB);
+ if (0) VG_(printf)("REALL %p %lu\n", p_old, new_req_szB);
void* p_new = NULL;
tl_assert(new_req_szB > 0); // map 0 to 1
Modified: trunk/exp-sgcheck/pc_common.c
==============================================================================
--- trunk/exp-sgcheck/pc_common.c (original)
+++ trunk/exp-sgcheck/pc_common.c Mon Aug 3 22:05:20 2015
@@ -564,8 +564,8 @@
what, s );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- emit( " <auxwhat>Address %#lx is %ld bytes inside a "
- "%ld-byte block free'd</auxwhat>\n",
+ emit( " <auxwhat>Address %#lx is %lu bytes inside a "
+ "%lu-byte block free'd</auxwhat>\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
VG_(pp_ExeContext)(Seg__where(seglo));
@@ -575,8 +575,8 @@
what, s );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- emit( " Address %#lx is %ld bytes inside a "
- "%ld-byte block free'd\n",
+ emit( " Address %#lx is %lu bytes inside a "
+ "%lu-byte block free'd\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
VG_(pp_ExeContext)(Seg__where(seglo));
@@ -595,8 +595,8 @@
emit( " <auxwhat>First byte is "
"not inside a known block</auxwhat>\n" );
} else {
- emit( " <auxwhat>First byte (%#lx) is %ld bytes inside a "
- "%ld-byte block alloc'd</auxwhat>\n",
+ emit( " <auxwhat>First byte (%#lx) is %lu bytes inside a "
+ "%lu-byte block alloc'd</auxwhat>\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
VG_(pp_ExeContext)(Seg__where(seglo));
}
@@ -605,8 +605,8 @@
emit( " <auxwhat>Last byte is "
"not inside a known block</auxwhat>\n" );
} else {
- emit( " <auxwhat>Last byte (%#lx) is %ld bytes inside a "
- "%ld-byte block alloc'd</auxwhat>\n",
+ emit( " <auxwhat>Last byte (%#lx) is %lu bytes inside a "
+ "%lu-byte block alloc'd</auxwhat>\n",
hi, hi-Seg__addr(seghi), Seg__size(seghi) );
VG_(pp_ExeContext)(Seg__where(seghi));
}
@@ -620,8 +620,8 @@
if (UNKNOWN == seglo) {
emit( " First byte is not inside a known block\n" );
} else {
- emit( " First byte (%#lx) is %ld bytes inside a "
- "%ld-byte block alloc'd\n",
+ emit( " First byte (%#lx) is %lu bytes inside a "
+ "%lu-byte block alloc'd\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
VG_(pp_ExeContext)(Seg__where(seglo));
}
@@ -629,8 +629,8 @@
if (UNKNOWN == seghi) {
emit( " Last byte is not inside a known block\n" );
} else {
- emit( " Last byte (%#lx) is %ld bytes inside a "
- "%ld-byte block alloc'd\n",
+ emit( " Last byte (%#lx) is %lu bytes inside a "
+ "%lu-byte block alloc'd\n",
hi, hi-Seg__addr(seghi), Seg__size(seghi) );
VG_(pp_ExeContext)(Seg__where(seghi));
}
Modified: trunk/exp-sgcheck/sg_main.c
==============================================================================
--- trunk/exp-sgcheck/sg_main.c (original)
+++ trunk/exp-sgcheck/sg_main.c Mon Aug 3 22:05:20 2015
@@ -631,7 +631,7 @@
static void GlobalTreeNode__pp ( GlobalTreeNode* nd ) {
tl_assert(nd->descr);
- VG_(printf)("GTNode [%#lx,+%ld) %s",
+ VG_(printf)("GTNode [%#lx,+%lu) %s",
nd->addr, nd->szB, nd->descr->name);
}
@@ -1648,7 +1648,7 @@
sKey.szB = szB;
gKey.addr = ea;
gKey.szB = szB;
- if (0) VG_(printf)("Tree sizes %ld %ld\n",
+ if (0) VG_(printf)("Tree sizes %lu %lu\n",
VG_(sizeFM)(siTrees[tid]), VG_(sizeFM)(giTree));
sOK = VG_(findBoundsFM)( siTrees[tid],
(UWord*)&sLB, NULL/*unused*/,
@@ -1896,7 +1896,7 @@
if (0 && (sb || gb))
VG_(message)(Vg_DebugMsg,
"exp-sgcheck: new max tree sizes: "
- "StackTree %ld, GlobalTree %ld\n",
+ "StackTree %lu, GlobalTree %lu\n",
stats__max_sitree_size, stats__max_gitree_size );
}
} else {
Modified: trunk/massif/ms_main.c
==============================================================================
--- trunk/massif/ms_main.c (original)
+++ trunk/massif/ms_main.c Mon Aug 3 22:05:20 2015
@@ -1115,7 +1115,7 @@
default:
tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
}
- VERB(verbosity, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
+ VERB(verbosity, "%s S%s%3d (t:%lld, hp:%lu, ex:%lu, st:%lu)\n",
prefix, suffix, i,
snapshot->time,
snapshot->heap_szB,
@@ -1722,7 +1722,8 @@
}
VERB(3, ">>> (%ld, %ld)\n",
- new_req_szB - old_req_szB, new_slop_szB - old_slop_szB);
+ (SSizeT)(new_req_szB - old_req_szB),
+ (SSizeT)(new_slop_szB - old_slop_szB));
}
return p_new;
@@ -1902,7 +1903,7 @@
static INLINE void new_mem_stack_2(SizeT len, const HChar* what)
{
if (have_started_executing_code) {
- VERB(3, "<<< new_mem_stack (%ld)\n", len);
+ VERB(3, "<<< new_mem_stack (%lu)\n", len);
n_stack_allocs++;
update_stack_stats(len);
maybe_take_snapshot(Normal, what);
@@ -1913,7 +1914,7 @@
static INLINE void die_mem_stack_2(SizeT len, const HChar* what)
{
if (have_started_executing_code) {
- VERB(3, "<<< die_mem_stack (%ld)\n", -len);
+ VERB(3, "<<< die_mem_stack (-%lu)\n", len);
n_stack_frees++;
maybe_take_snapshot(Peak, "stkPEAK");
update_stack_stats(-len);
@@ -2149,7 +2150,7 @@
}
// Do the non-ip_desc part first...
- FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
+ FP("%sn%u: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
// For ip_descs beginning with "0xABCD...:" addresses, we first
// measure the length of the "0xabcd: " address at the start of the
@@ -2407,7 +2408,7 @@
STATS("stack allocs: %u\n", n_stack_allocs);
STATS("stack frees: %u\n", n_stack_frees);
STATS("XPts: %u\n", n_xpts);
- STATS("top-XPts: %u (%d%%)\n",
+ STATS("top-XPts: %u (%u%%)\n",
alloc_xpt->n_children,
( n_xpts ? alloc_xpt->n_children * 100 / n_xpts : 0));
STATS("XPt init expansions: %u\n", n_xpt_init_expansions);
|
|
From: <sv...@va...> - 2015-08-03 20:03:48
|
Author: florian
Date: Mon Aug 3 21:03:41 2015
New Revision: 15478
Log:
Improve the script to also show failures in the post-regtest checks
in the regtest log.
Modified:
trunk/nightly/bin/nightly
Modified: trunk/nightly/bin/nightly
==============================================================================
--- trunk/nightly/bin/nightly (original)
+++ trunk/nightly/bin/nightly Mon Aug 3 21:03:41 2015
@@ -175,6 +175,9 @@
"Running regression tests " \
"cd valgrind-$logfile && ${ABT_RUN_REGTEST}"
+ # Stash away the return code of the regression run
+ regrun_rc=$?
+
# Grab some indicative text for the short log file -- if the regtests
# succeeded, show their results. If we didn't make it that far, show the
# last 20 lines.
@@ -183,6 +186,14 @@
echo "Regression test results follow" >> $logfile.short
echo >> $logfile.short
awk '/^== [0-9]+ tests/, /^$/ { print }' $logfile.verbose >> $logfile.short
+ # Check the return code of the regression run; we might have successfully
+ # run all tests but still failed in the post-regtest checks.
+ if [ $regrun_rc != "0" ]; then
+ echo >> $logfile.short
+ echo "Last 20 lines of verbose log follow" >> $logfile.short \
+ echo >> $logfile.short
+ tail -20 $logfile.verbose >> $logfile.short
+ fi
) || (
echo >> $logfile.short
echo "Last 20 lines of verbose log follow" >> $logfile.short \
|
|
From: <sv...@va...> - 2015-08-03 16:03:29
|
Author: florian
Date: Mon Aug 3 17:03:13 2015
New Revision: 3167
Log:
Fix printf format inconsistencies as pointed out by GCC's
-Wformat-signedness.
Modified:
trunk/priv/guest_amd64_toIR.c
trunk/priv/guest_arm64_toIR.c
trunk/priv/guest_arm_toIR.c
trunk/priv/guest_mips_toIR.c
trunk/priv/guest_ppc_toIR.c
trunk/priv/guest_tilegx_toIR.c
trunk/priv/guest_x86_toIR.c
trunk/priv/host_amd64_defs.c
trunk/priv/host_arm_defs.c
trunk/priv/host_mips_defs.c
trunk/priv/host_ppc_defs.c
trunk/priv/host_ppc_isel.c
trunk/priv/host_s390_defs.c
trunk/priv/host_s390_isel.c
trunk/priv/host_tilegx_defs.c
trunk/priv/host_tilegx_isel.c
trunk/priv/host_x86_defs.c
trunk/priv/ir_defs.c
Modified: trunk/priv/guest_amd64_toIR.c
==============================================================================
--- trunk/priv/guest_amd64_toIR.c (original)
+++ trunk/priv/guest_amd64_toIR.c Mon Aug 3 17:03:13 2015
@@ -5420,7 +5420,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xD8\n");
goto decode_fail;
}
@@ -5439,7 +5440,7 @@
/* Dunno if this is right */
case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD0;
- DIP("fcom %%st(0),%%st(%d)\n", r_dst);
+ DIP("fcom %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
unop(Iop_32Uto64,
@@ -5454,7 +5455,7 @@
/* Dunno if this is right */
case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD8;
- DIP("fcomp %%st(0),%%st(%d)\n", r_dst);
+ DIP("fcomp %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
unop(Iop_32Uto64,
@@ -5679,7 +5680,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xD9\n");
goto decode_fail;
}
@@ -6078,7 +6080,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDA\n");
goto decode_fail;
}
@@ -6242,7 +6245,8 @@
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDB\n");
goto decode_fail;
}
@@ -6425,7 +6429,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDC\n");
goto decode_fail;
}
@@ -6660,7 +6665,8 @@
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDD\n");
goto decode_fail;
}
@@ -6793,7 +6799,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDE\n");
goto decode_fail;
}
@@ -6909,7 +6916,8 @@
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDF\n");
goto decode_fail;
}
@@ -7125,7 +7133,7 @@
case 0xFB: op = Iop_Sub64; break;
default:
- vex_printf("\n0x%x\n", (Int)opc);
+ vex_printf("\n0x%x\n", (UInt)opc);
vpanic("dis_MMXop_regmem_to_reg");
}
@@ -9200,8 +9208,8 @@
assign( plain, binop(op, getXMMReg(gregOfRexRM(pfx,rm)),
getXMMReg(eregOfRexRM(pfx,rm))) );
delta += 2;
- DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ DIP("%s $%u,%s,%s\n", opname,
+ imm8,
nameXMMReg(eregOfRexRM(pfx,rm)),
nameXMMReg(gregOfRexRM(pfx,rm)) );
} else {
@@ -9224,8 +9232,8 @@
)
);
delta += alen+1;
- DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ DIP("%s $%u,%s,%s\n", opname,
+ imm8,
dis_buf,
nameXMMReg(gregOfRexRM(pfx,rm)) );
}
@@ -11396,8 +11404,8 @@
assign(sV, getXMMReg(rE));
imm8 = getUChar(delta+1) & 7;
delta += 1+1;
- DIP("%spextrw $%d,%s,%s\n", isAvx ? "v" : "",
- (Int)imm8, nameXMMReg(rE), nameIReg32(rG));
+ DIP("%spextrw $%u,%s,%s\n", isAvx ? "v" : "",
+ imm8, nameXMMReg(rE), nameIReg32(rG));
} else {
/* The memory case is disallowed, apparently. */
return deltaIN; /* FAIL */
@@ -13842,7 +13850,7 @@
assign(t4, getIReg16(eregOfRexRM(pfx,modrm)));
delta += 1+1;
lane = getUChar(delta-1);
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
nameIReg16(eregOfRexRM(pfx,modrm)),
nameMMXReg(gregLO3ofRM(modrm)));
} else {
@@ -13850,7 +13858,7 @@
delta += 1+alen;
lane = getUChar(delta-1);
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
dis_buf,
nameMMXReg(gregLO3ofRM(modrm)));
}
@@ -13879,7 +13887,7 @@
delta += 1+1;
lane = getUChar(delta-1);
DIP("pinsrw $%d,%s,%s\n",
- (Int)lane, nameIReg16(rE), nameXMMReg(rG));
+ lane, nameIReg16(rE), nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf,
1/*byte after the amode*/ );
@@ -13887,7 +13895,7 @@
lane = getUChar(delta-1);
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
DIP("pinsrw $%d,%s,%s\n",
- (Int)lane, dis_buf, nameXMMReg(rG));
+ lane, dis_buf, nameXMMReg(rG));
}
IRTemp src_vec = newTemp(Ity_V128);
assign(src_vec, getXMMReg(rG));
@@ -15895,7 +15903,7 @@
assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
d64 = (Long)getUChar(delta+1);
delta += 1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
nameXMMReg(eregOfRexRM(pfx,modrm)),
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
@@ -15904,7 +15912,7 @@
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
d64 = (Long)getUChar(delta+alen);
delta += alen+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
}
@@ -15927,7 +15935,7 @@
assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
d64 = (Long)getUChar(delta+1);
delta += 1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
nameMMXReg(eregLO3ofRM(modrm)),
nameMMXReg(gregLO3ofRM(modrm)));
} else {
@@ -15935,7 +15943,7 @@
assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
d64 = (Long)getUChar(delta+alen);
delta += alen+1;
- DIP("palignr $%d%s,%s\n", (Int)d64,
+ DIP("palignr $%lld%s,%s\n", d64,
dis_buf,
nameMMXReg(gregLO3ofRM(modrm)));
}
@@ -19846,7 +19854,8 @@
guest_RIP_bbstart+delta, d64 );
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("j%s-8 0x%llx %s\n", name_AMD64Condcode(opc - 0x70), d64, comment);
+ DIP("j%s-8 0x%llx %s\n", name_AMD64Condcode(opc - 0x70), (ULong)d64,
+ comment);
return delta;
}
@@ -20258,7 +20267,7 @@
assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
putIRegRAX(sz, loadLE( ty, mkexpr(addr) ));
DIP("mov%c %s0x%llx, %s\n", nameISize(sz),
- segRegTxt(pfx), d64,
+ segRegTxt(pfx), (ULong)d64,
nameIRegRAX(sz));
return delta;
@@ -20276,7 +20285,7 @@
assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
storeLE( mkexpr(addr), getIRegRAX(sz) );
DIP("mov%c %s, %s0x%llx\n", nameISize(sz), nameIRegRAX(sz),
- segRegTxt(pfx), d64);
+ segRegTxt(pfx), (ULong)d64);
return delta;
case 0xA4:
@@ -20750,7 +20759,7 @@
}
stmt( IRStmt_Exit(cond, Ijk_Boring, IRConst_U64(d64), OFFB_RIP) );
- DIP("loop%s%s 0x%llx\n", xtra, haveASO(pfx) ? "l" : "", d64);
+ DIP("loop%s%s 0x%llx\n", xtra, haveASO(pfx) ? "l" : "", (ULong)d64);
return delta;
}
@@ -20768,7 +20777,7 @@
IRConst_U64(d64),
OFFB_RIP
));
- DIP("jecxz 0x%llx\n", d64);
+ DIP("jecxz 0x%llx\n", (ULong)d64);
} else {
/* 64-bit */
stmt( IRStmt_Exit( binop(Iop_CmpEQ64,
@@ -20778,7 +20787,7 @@
IRConst_U64(d64),
OFFB_RIP
));
- DIP("jrcxz 0x%llx\n", d64);
+ DIP("jrcxz 0x%llx\n", (ULong)d64);
}
return delta;
@@ -20899,7 +20908,7 @@
jmp_lit(dres, Ijk_Call, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("call 0x%llx\n",d64);
+ DIP("call 0x%llx\n", (ULong)d64);
return delta;
case 0xE9: /* Jv (jump, 16/32 offset) */
@@ -20916,7 +20925,7 @@
jmp_lit(dres, Ijk_Boring, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("jmp 0x%llx\n", d64);
+ DIP("jmp 0x%llx\n", (ULong)d64);
return delta;
case 0xEB: /* Jb (jump, byte offset) */
@@ -20933,7 +20942,7 @@
jmp_lit(dres, Ijk_Boring, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("jmp-8 0x%llx\n", d64);
+ DIP("jmp-8 0x%llx\n", (ULong)d64);
return delta;
case 0xF5: /* CMC */
@@ -21408,7 +21417,8 @@
guest_RIP_bbstart+delta, d64 );
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("j%s-32 0x%llx %s\n", name_AMD64Condcode(opc - 0x80), d64, comment);
+ DIP("j%s-32 0x%llx %s\n", name_AMD64Condcode(opc - 0x80), (ULong)d64,
+ comment);
return delta;
}
@@ -22915,8 +22925,8 @@
UInt rE = eregOfRexRM(pfx,rm);
assign(argR, getXMMReg(rE));
delta += 1+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8,
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8,
nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
@@ -22928,8 +22938,8 @@
: sz == 8 ? unop( Iop_64UtoV128, loadLE(Ity_I64, mkexpr(addr)))
: /*sz==4*/ unop( Iop_32UtoV128, loadLE(Ity_I32, mkexpr(addr))));
delta += alen+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
}
assign(plain, preSwap ? binop(op, mkexpr(argR), mkexpr(argL))
@@ -23030,8 +23040,8 @@
UInt rE = eregOfRexRM(pfx,rm);
assign(argR, getYMMReg(rE));
delta += 1+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8,
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8,
nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
@@ -23041,8 +23051,8 @@
if (!ok) return deltaIN; /* FAIL */
assign(argR, loadLE(Ity_V256, mkexpr(addr)) );
delta += alen+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
}
breakupV256toV128s( preSwap ? argR : argL, &argLhi, &argLlo );
@@ -30511,14 +30521,14 @@
assign( sV, getXMMReg(rE) );
imm8 = getUChar(delta+1);
delta += 1+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameXMMReg(rE),
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameXMMReg(rE),
nameXMMReg(rV), nameXMMReg(rG));
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
imm8 = getUChar(delta+alen);
delta += alen+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
nameXMMReg(rV), nameXMMReg(rG));
}
@@ -30546,14 +30556,14 @@
assign( sV, getYMMReg(rE) );
imm8 = getUChar(delta+1);
delta += 1+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameYMMReg(rE),
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameYMMReg(rE),
nameYMMReg(rV), nameYMMReg(rG));
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
imm8 = getUChar(delta+alen);
delta += alen+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
nameYMMReg(rV), nameYMMReg(rG));
}
@@ -31867,14 +31877,14 @@
if (sigill_diag) {
vex_printf("vex amd64->IR: unhandled instruction bytes: "
"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- (Int)getUChar(delta_start+0),
- (Int)getUChar(delta_start+1),
- (Int)getUChar(delta_start+2),
- (Int)getUChar(delta_start+3),
- (Int)getUChar(delta_start+4),
- (Int)getUChar(delta_start+5),
- (Int)getUChar(delta_start+6),
- (Int)getUChar(delta_start+7) );
+ getUChar(delta_start+0),
+ getUChar(delta_start+1),
+ getUChar(delta_start+2),
+ getUChar(delta_start+3),
+ getUChar(delta_start+4),
+ getUChar(delta_start+5),
+ getUChar(delta_start+6),
+ getUChar(delta_start+7) );
vex_printf("vex amd64->IR: REX=%d REX.W=%d REX.R=%d REX.X=%d REX.B=%d\n",
haveREX(pfx) ? 1 : 0, getRexW(pfx), getRexR(pfx),
getRexX(pfx), getRexB(pfx));
Modified: trunk/priv/guest_arm64_toIR.c
==============================================================================
--- trunk/priv/guest_arm64_toIR.c (original)
+++ trunk/priv/guest_arm64_toIR.c Mon Aug 3 17:03:13 2015
@@ -5141,7 +5141,7 @@
vassert(0);
}
putIReg64orSP(nn, mkexpr(tEA));
- DIP(atRN ? "ldrs%c %s, [%s], #%lld\n" : "ldrs%c %s, [%s, #%lld]!",
+ DIP(atRN ? "ldrs%c %s, [%s], #%llu\n" : "ldrs%c %s, [%s, #%llu]!",
ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
return True;
}
@@ -5216,7 +5216,7 @@
vassert(0);
}
DIP("ldurs%c %s, [%s, #%lld]",
- ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
+ ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), (Long)simm9);
return True;
}
/* else fall through */
@@ -5575,7 +5575,7 @@
putIReg64orSP(nn, mkexpr(tEA));
DIP(atRN ? "%s %s, [%s], #%lld\n" : "%s %s, [%s, #%lld]!\n",
isLD ? "ldr" : "str",
- nameQRegLO(tt, ty), nameIReg64orSP(nn), simm9);
+ nameQRegLO(tt, ty), nameIReg64orSP(nn), (Long)simm9);
return True;
}
@@ -8275,7 +8275,7 @@
putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
const HChar* Ta = bitQ ==1 ? "16b" : "8b";
const HChar* nm = isTBX ? "tbx" : "tbl";
- DIP("%s %s.%s, {v%d.16b .. v%d.16b}, %s.%s\n",
+ DIP("%s %s.%s, {v%u.16b .. v%u.16b}, %s.%s\n",
nm, nameQReg128(dd), Ta, nn, (nn + len) % 32, nameQReg128(mm), Ta);
return True;
}
@@ -9495,7 +9495,7 @@
: (ks == 1 ? "sqdmlal" : "sqdmlsl");
const HChar arrNarrow = "bhsd"[size];
const HChar arrWide = "bhsd"[size+1];
- DIP("%s %c%d, %c%d, %c%d\n",
+ DIP("%s %c%u, %c%u, %c%u\n",
nm, arrWide, dd, arrNarrow, nn, arrNarrow, mm);
return True;
}
@@ -9705,7 +9705,7 @@
math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(sat1n)));
const HChar arr = "bhsd"[size];
const HChar* nm = isR ? "sqrdmulh" : "sqdmulh";
- DIP("%s %c%d, %c%d, %c%d\n", nm, arr, dd, arr, nn, arr, mm);
+ DIP("%s %c%u, %c%u, %c%u\n", nm, arr, dd, arr, nn, arr, mm);
return True;
}
@@ -10267,7 +10267,7 @@
: (ks == 1 ? "sqdmlal" : "sqdmlsl");
const HChar arrNarrow = "bhsd"[size];
const HChar arrWide = "bhsd"[size+1];
- DIP("%s %c%d, %c%d, v%d.%c[%u]\n",
+ DIP("%s %c%u, %c%u, v%u.%c[%u]\n",
nm, arrWide, dd, arrNarrow, nn, dd, arrNarrow, ix);
return True;
}
@@ -10302,7 +10302,7 @@
updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
const HChar* nm = isR ? "sqrdmulh" : "sqdmulh";
HChar ch = size == X01 ? 'h' : 's';
- DIP("%s %c%d, %c%d, v%d.%c[%u]\n", nm, ch, dd, ch, nn, ch, dd, ix);
+ DIP("%s %c%u, %c%u, v%d.%c[%u]\n", nm, ch, dd, ch, nn, ch, (Int)dd, ix);
return True;
}
@@ -10698,7 +10698,7 @@
/* */
if (res) {
putQReg128(dd, res);
- DIP("%cshll%s %s.%s, %s.%s, #%d\n",
+ DIP("%cshll%s %s.%s, %s.%s, #%u\n",
isU ? 'u' : 's', isQ ? "2" : "",
nameQReg128(dd), ta, nameQReg128(nn), tb, sh);
return True;
@@ -12168,7 +12168,7 @@
putQReg128(dd, mkexpr(res));
const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
const HChar* arrWide = nameArr_Q_SZ(1, size+1);
- DIP("shll%s %s.%s, %s.%s, #%u\n", is2 ? "2" : "",
+ DIP("shll%s %s.%s, %s.%s, #%d\n", is2 ? "2" : "",
nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow, 8 << size);
return True;
}
Modified: trunk/priv/guest_arm_toIR.c
==============================================================================
--- trunk/priv/guest_arm_toIR.c (original)
+++ trunk/priv/guest_arm_toIR.c Mon Aug 3 17:03:13 2015
@@ -2870,7 +2870,7 @@
putDRegI64(dreg, triop(Iop_Slice64, /*hiI64*/getDRegI64(mreg),
/*loI64*/getDRegI64(nreg), mkU8(imm4)), condT);
}
- DIP("vext.8 %c%d, %c%d, %c%d, #%d\n", reg_t, dreg, reg_t, nreg,
+ DIP("vext.8 %c%u, %c%u, %c%u, #%u\n", reg_t, dreg, reg_t, nreg,
reg_t, mreg, imm4);
return True;
}
@@ -3030,7 +3030,7 @@
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vdup.%d %c%d, d%d[%d]\n", size, Q ? 'q' : 'd', dreg, mreg, index);
+ DIP("vdup.%u %c%u, d%u[%u]\n", size, Q ? 'q' : 'd', dreg, mreg, index);
return True;
}
@@ -3137,7 +3137,7 @@
binop(andOp, mkexpr(arg_m), imm_val),
binop(andOp, mkexpr(arg_n), imm_val)),
mkU8(1))));
- DIP("vhadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vhadd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size, regType,
dreg, regType, nreg, regType, mreg);
} else {
@@ -3196,7 +3196,7 @@
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vqadd.%c%d %c%u %c%u, %c%u\n",
U ? 'u' : 's',
8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
}
@@ -3307,7 +3307,7 @@
mkU8(1))),
mkexpr(cc)));
}
- DIP("vrhadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vrhadd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's',
8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
} else {
@@ -3323,7 +3323,7 @@
assign(res, binop(Iop_And64, mkexpr(arg_n),
mkexpr(arg_m)));
}
- DIP("vand %c%d, %c%d, %c%d\n",
+ DIP("vand %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
@@ -3337,7 +3337,7 @@
assign(res, binop(Iop_And64, mkexpr(arg_n),
unop(Iop_Not64, mkexpr(arg_m))));
}
- DIP("vbic %c%d, %c%d, %c%d\n",
+ DIP("vbic %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
@@ -3352,13 +3352,13 @@
assign(res, binop(Iop_Or64, mkexpr(arg_n),
mkexpr(arg_m)));
}
- DIP("vorr %c%d, %c%d, %c%d\n",
+ DIP("vorr %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
} else {
/* VMOV */
HChar reg_t = Q ? 'q' : 'd';
assign(res, mkexpr(arg_m));
- DIP("vmov %c%d, %c%d\n", reg_t, dreg, reg_t, mreg);
+ DIP("vmov %c%u, %c%u\n", reg_t, dreg, reg_t, mreg);
}
break;
case 3:{
@@ -3371,7 +3371,7 @@
assign(res, binop(Iop_Or64, mkexpr(arg_n),
unop(Iop_Not64, mkexpr(arg_m))));
}
- DIP("vorn %c%d, %c%d, %c%d\n",
+ DIP("vorn %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
@@ -3548,7 +3548,7 @@
unop(notOp, mkexpr(arg_n)),
mkexpr(arg_m)),
imm_val)));
- DIP("vhsub.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vhsub.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -3606,7 +3606,7 @@
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqsub.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqsub.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -3634,7 +3634,7 @@
if (B == 0) {
/* VCGT */
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vcgt.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vcgt.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -3647,7 +3647,7 @@
assign(res,
unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, mkexpr(arg_m), mkexpr(arg_n))));
- DIP("vcge.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vcge.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -3709,7 +3709,7 @@
else
assign(res, binop(op, mkexpr(arg_m), mkexpr(tmp)));
}
- DIP("vshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
@@ -3833,7 +3833,7 @@
binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_m), mkexpr(mask)),
Q, condT);
- DIP("vqshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
@@ -3973,7 +3973,7 @@
binop(op, mkexpr(arg_m), mkexpr(arg_n)),
mkexpr(round)));
}
- DIP("vrshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vrshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
@@ -4130,7 +4130,7 @@
binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_m), mkexpr(mask)),
Q, condT);
- DIP("vqrshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqrshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
@@ -4159,7 +4159,7 @@
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmax.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmax.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4184,7 +4184,7 @@
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmin.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmin.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4253,7 +4253,7 @@
mkexpr(arg_n)),
unop(Q ? Iop_NotV128 : Iop_Not64,
mkexpr(cond)))));
- DIP("vabd.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vabd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4332,7 +4332,7 @@
unop(Q ? Iop_NotV128 : Iop_Not64,
mkexpr(cond)))));
assign(res, binop(op_add, mkexpr(acc), mkexpr(tmp)));
- DIP("vaba.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vaba.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4350,7 +4350,7 @@
case 3: op = Q ? Iop_Add64x2 : Iop_Add64; break;
default: vassert(0);
}
- DIP("vadd.i%u %c%u, %c%u, %c%u\n",
+ DIP("vadd.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
@@ -4362,7 +4362,7 @@
case 3: op = Q ? Iop_Sub64x2 : Iop_Sub64; break;
default: vassert(0);
}
- DIP("vsub.i%u %c%u, %c%u, %c%u\n",
+ DIP("vsub.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
@@ -4381,7 +4381,7 @@
assign(res, unop(op, binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_n),
mkexpr(arg_m))));
- DIP("vtst.%u %c%u, %c%u, %c%u\n",
+ DIP("vtst.%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
@@ -4391,7 +4391,7 @@
binop(Q ? Iop_XorV128 : Iop_Xor64,
mkexpr(arg_n),
mkexpr(arg_m)))));
- DIP("vceq.i%u %c%u, %c%u, %c%u\n",
+ DIP("vceq.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
@@ -4444,7 +4444,7 @@
assign(res, binop(op2,
Q ? getQReg(dreg) : getDRegI64(dreg),
binop(op, mkexpr(arg_n), mkexpr(arg_m))));
- DIP("vml%c.i%u %c%u, %c%u, %c%u\n",
+ DIP("vml%c.i%d %c%u, %c%u, %c%u\n",
P ? 's' : 'a', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4470,7 +4470,7 @@
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmul.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmul.%c%d %c%u, %c%u, %c%u\n",
P ? 'p' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
@@ -4500,7 +4500,7 @@
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vp%s.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vp%s.%c%d %c%u, %c%u, %c%u\n",
P ? "min" : "max", U ? 'u' : 's',
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg,
Q ? 'q' : 'd', mreg);
@@ -4539,7 +4539,7 @@
Q ? mkU128(imm) : mkU64(imm))),
Q ? mkU128(0) : mkU64(0),
Q, condT);
- DIP("vqdmulh.s%u %c%u, %c%u, %c%u\n",
+ DIP("vqdmulh.s%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
@@ -4573,7 +4573,7 @@
Q ? mkU128(imm) : mkU64(imm))),
Q ? mkU128(0) : mkU64(0),
Q, condT);
- DIP("vqrdmulh.s%u %c%u, %c%u, %c%u\n",
+ DIP("vqrdmulh.s%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
@@ -4874,7 +4874,7 @@
assign(arg_m, unop(cvt, getDRegI64(mreg)));
putQReg(dreg, binop(op, mkexpr(arg_n), mkexpr(arg_m)),
condT);
- DIP("v%s%c.%c%u q%u, %c%u, d%u\n", (A & 2) ? "sub" : "add",
+ DIP("v%s%c.%c%d q%u, %c%u, d%u\n", (A & 2) ? "sub" : "add",
(A & 1) ? 'w' : 'l', U ? 'u' : 's', 8 << size, dreg,
(A & 1) ? 'q' : 'd', nreg, mreg);
return True;
@@ -4926,7 +4926,7 @@
}
putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
condT);
- DIP("v%saddhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+ DIP("v%saddhn.i%d d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
nreg, mreg);
return True;
case 5:
@@ -4982,7 +4982,7 @@
unop(Iop_NotV128, mkexpr(cond)))),
getQReg(dreg)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vabal.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vabal.%c%d q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg);
return True;
case 6:
@@ -5036,7 +5036,7 @@
}
putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
condT);
- DIP("v%ssubhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+ DIP("v%ssubhn.i%d d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
nreg, mreg);
return True;
case 7:
@@ -5087,7 +5087,7 @@
binop(op, mkexpr(arg_m), mkexpr(arg_n)),
unop(Iop_NotV128, mkexpr(cond)))));
putQReg(dreg, mkexpr(res), condT);
- DIP("vabdl.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vabdl.%c%d q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg);
return True;
case 8:
@@ -5118,7 +5118,7 @@
res = newTemp(Ity_V128);
assign(res, binop(op, getDRegI64(nreg),getDRegI64(mreg)));
putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
- DIP("vml%cl.%c%u q%u, d%u, d%u\n", P ? 's' : 'a', U ? 'u' : 's',
+ DIP("vml%cl.%c%d q%u, d%u, d%u\n", P ? 's' : 'a', U ? 'u' : 's',
8 << size, dreg, nreg, mreg);
return True;
case 9:
@@ -5165,7 +5165,7 @@
mkU64(0),
False, condT);
putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
- DIP("vqdml%cl.s%u q%u, d%u, d%u\n", P ? 's' : 'a', 8 << size, dreg,
+ DIP("vqdml%cl.s%d q%u, d%u, d%u\n", P ? 's' : 'a', 8 << size, dreg,
nreg, mreg);
return True;
case 12:
@@ -5192,7 +5192,7 @@
}
putQReg(dreg, binop(op, getDRegI64(nreg),
getDRegI64(mreg)), condT);
- DIP("vmull.%c%u q%u, d%u, d%u\n", P ? 'p' : (U ? 'u' : 's'),
+ DIP("vmull.%c%d q%u, d%u, d%u\n", P ? 'p' : (U ? 'u' : 's'),
8 << size, dreg, nreg, mreg);
return True;
case 13:
@@ -5230,7 +5230,7 @@
binop(op2, getDRegI64(mreg), mkU64(imm))),
mkU64(0),
False, condT);
- DIP("vqdmull.s%u q%u, d%u, d%u\n", 8 << size, dreg, nreg, mreg);
+ DIP("vqdmull.s%d q%u, d%u, d%u\n", 8 << size, dreg, nreg, mreg);
return True;
default:
return False;
@@ -5355,7 +5355,7 @@
else
putDRegI64(dreg, binop(op2, getDRegI64(dreg), mkexpr(res)),
condT);
- DIP("vml%c.%c%u %c%u, %c%u, d%u[%u]\n", INSN(10,10) ? 's' : 'a',
+ DIP("vml%c.%c%d %c%u, %c%u, d%u[%u]\n", INSN(10,10) ? 's' : 'a',
INSN(8,8) ? 'f' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, mreg, index);
return True;
@@ -5412,7 +5412,7 @@
op2 = INSN(10,10) ? sub : add;
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
- DIP("vml%cl.%c%u q%u, d%u, d%u[%u]\n",
+ DIP("vml%cl.%c%d q%u, d%u, d%u[%u]\n",
INSN(10,10) ? 's' : 'a', U ? 'u' : 's',
8 << size, dreg, nreg, mreg, index);
return True;
@@ -5487,7 +5487,7 @@
setFlag_QC(mkexpr(tmp), binop(add, getQReg(dreg), mkexpr(res)),
True, condT);
putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
- DIP("vqdml%cl.s%u q%u, d%u, d%u[%u]\n", P ? 's' : 'a', 8 << size,
+ DIP("vqdml%cl.s%d q%u, d%u, d%u[%u]\n", P ? 's' : 'a', 8 << size,
dreg, nreg, mreg, index);
return True;
}
@@ -5583,7 +5583,7 @@
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vmul.%c%u %c%u, %c%u, d%u[%u]\n", INSN(8,8) ? 'f' : 'i',
+ DIP("vmul.%c%d %c%u, %c%u, d%u[%u]\n", INSN(8,8) ? 'f' : 'i',
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
@@ -5628,7 +5628,7 @@
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vmull.%c%u q%u, d%u, d%u[%u]\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vmull.%c%d q%u, d%u, d%u[%u]\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg, index);
return True;
}
@@ -5691,7 +5691,7 @@
binop(op2, mkexpr(arg_m), mkU64(imm))),
mkU64(0),
False, condT);
- DIP("vqdmull.s%u q%u, d%u, d%u[%u]\n", 8 << size, dreg, nreg, mreg,
+ DIP("vqdmull.s%d q%u, d%u, d%u[%u]\n", 8 << size, dreg, nreg, mreg,
index);
return True;
}
@@ -5788,7 +5788,7 @@
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vqdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+ DIP("vqdmulh.s%d %c%u, %c%u, d%u[%u]\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
@@ -5886,7 +5886,7 @@
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vqrdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+ DIP("vqrdmulh.s%d %c%u, %c%u, d%u[%u]\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
@@ -6036,7 +6036,7 @@
putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
condT);
}
- DIP("vrsra.%c%u %c%u, %c%u, #%u\n",
+ DIP("vrsra.%c%d %c%u, %c%u, #%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
@@ -6045,7 +6045,7 @@
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vrshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vrshr.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
return True;
@@ -6113,7 +6113,7 @@
putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
condT);
}
- DIP("vsra.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vsra.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
if (Q) {
@@ -6121,7 +6121,7 @@
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vshr.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
return True;
@@ -6170,7 +6170,7 @@
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vsri.%u %c%u, %c%u, #%u\n",
+ DIP("vsri.%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
@@ -6219,7 +6219,7 @@
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vsli.%u %c%u, %c%u, #%u\n",
+ DIP("vsli.%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
@@ -6245,7 +6245,7 @@
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vshl.i%u %c%u, %c%u, #%u\n",
+ DIP("vshl.i%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
@@ -6277,7 +6277,7 @@
default:
vassert(0);
}
- DIP("vqshl.u%u %c%u, %c%u, #%u\n",
+ DIP("vqshl.u%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
@@ -6301,7 +6301,7 @@
default:
vassert(0);
}
- DIP("vqshlu.s%u %c%u, %c%u, #%u\n",
+ DIP("vqshlu.s%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
@@ -6328,7 +6328,7 @@
default:
vassert(0);
}
- DIP("vqshl.s%u %c%u, %c%u, #%u\n",
+ DIP("vqshl.s%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
@@ -6388,7 +6388,7 @@
mkexpr(reg_m),
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vshrn.i%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
return True;
} else {
@@ -6438,10 +6438,10 @@
imm_val))));
putDRegI64(dreg, mkexpr(res), condT);
if (shift_imm == 0) {
- DIP("vmov%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vmov%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
} else {
- DIP("vrshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vrshrn.i%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
}
return True;
@@ -6476,7 +6476,7 @@
default:
vassert(0);
}
- DIP("vq%sshrn.%c%u d%u, q%u, #%u\n", B ? "r" : "",
+ DIP("vq%sshrn.%c%d d%u, q%u, #%u\n", B ? "r" : "",
U ? 'u' : 's', 8 << size, dreg, mreg, shift_imm);
} else {
vassert(U);
@@ -6499,7 +6499,7 @@
default:
vassert(0);
}
- DIP("vq%sshrun.s%u d%u, q%u, #%u\n", B ? "r" : "",
+ DIP("vq%sshrun.s%d d%u, q%u, #%u\n", B ? "r" : "",
8 << size, dreg, mreg, shift_imm);
}
if (B) {
@@ -6570,10 +6570,10 @@
assign(res, binop(op, unop(cvt, getDRegI64(mreg)), mkU8(shift_imm)));
putQReg(dreg, mkexpr(res), condT);
if (shift_imm == 0) {
- DIP("vmovl.%c%u q%u, d%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vmovl.%c%d q%u, d%u\n", U ? 'u' : 's', 8 << size,
dreg, mreg);
} else {
- DIP("vshll.%c%u q%u, d%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vshll.%c%d q%u, d%u, #%u\n", U ? 'u' : 's', 8 << size,
dreg, mreg, shift_imm);
}
return True;
@@ -6662,7 +6662,7 @@
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev64.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev64.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -6683,7 +6683,7 @@
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev32.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev32.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -6702,7 +6702,7 @@
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev16.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev16.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -6731,7 +6731,7 @@
}
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vpaddl.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vpaddl.%c%d %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -6749,7 +6749,7 @@
default: vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vcls.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vcls.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
@@ -6764,7 +6764,7 @@
default: vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vclz.i%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vclz.i%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
@@ -6836,7 +6836,7 @@
}
assign(res, binop(add_op, unop(op, mkexpr(arg_m)),
mkexpr(arg_d)));
- DIP("vpadal.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vpadal.%c%d %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -6898,7 +6898,7 @@
mkexpr(mask)),
neg2)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqabs.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vqabs.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
@@ -6932,7 +6932,7 @@
assign(res, binop(op, zero, mkexpr(arg_m)));
setFlag_QC(mkexpr(res), binop(op2, zero, mkexpr(arg_m)),
Q, condT);
- DIP("vqneg.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vqneg.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
@@ -6981,7 +6981,7 @@
}
}
assign(res, binop(op, mkexpr(arg_m), zero));
- DIP("vcgt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcgt.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -7012,7 +7012,7 @@
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, zero, mkexpr(arg_m))));
}
- DIP("vcge.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcge.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -7043,7 +7043,7 @@
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
unop(op, mkexpr(arg_m))));
}
- DIP("vceq.%c%u %c%u, %c%u, #0\n", F ? 'f' : 'i', 8 << size,
+ DIP("vceq.%c%d %c%u, %c%u, #0\n", F ? 'f' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -7074,7 +7074,7 @@
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, mkexpr(arg_m), zero)));
}
- DIP("vcle.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcle.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -7104,7 +7104,7 @@
}
assign(res, binop(op, zero, mkexpr(arg_m)));
}
- DIP("vclt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vclt.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
@@ -7126,7 +7126,7 @@
assign(res, unop(Q ? Iop_Abs32Fx4 : Iop_Abs32Fx2,
mkexpr(arg_m)));
}
- DIP("vabs.%c%u %c%u, %c%u\n",
+ DIP("vabs.%c%d %c%u, %c%u\n",
F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
@@ -7157,7 +7157,7 @@
}
assign(res, binop(op, zero, mkexpr(arg_m)));
}
- DIP("vneg.%c%u %c%u, %c%u\n",
+ DIP("vneg.%c%d %c%u, %c%u\n",
F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
@@ -7255,7 +7255,7 @@
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vtrn.%u %c%u, %c%u\n",
+ DIP("vtrn.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if ((B >> 1) == 2) {
@@ -7306,7 +7306,7 @@
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vuzp.%u %c%u, %c%u\n",
+ DIP("vuzp.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if ((B >> 1) == 3) {
@@ -7357,7 +7357,7 @@
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vzip.%u %c%u, %c%u\n",
+ DIP("vzip.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if (B == 8) {
@@ -7372,7 +7372,7 @@
default: vassert(0);
}
putDRegI64(dreg, unop(op, getQReg(mreg)), condT);
- DIP("vmovn.i%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vmovn.i%d d%u, q%u\n", 16 << size, dreg, mreg);
return True;
} else if (B == 9 || (B >> 1) == 5) {
/* VQMOVN, VQMOVUN */
@@ -7401,7 +7401,7 @@
case 3: return False;
default: vassert(0);
}
- DIP("vqmovun.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovun.s%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
case 2:
switch (size) {
@@ -7411,7 +7411,7 @@
case 3: return False;
default: vassert(0);
}
- DIP("vqmovn.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovn.s%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
case 3:
switch (size) {
@@ -7421,7 +7421,7 @@
case 3: return False;
default: vassert(0);
}
- DIP("vqmovn.u%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovn.u%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
default:
vassert(0);
@@ -7454,7 +7454,7 @@
assign(res, binop(op, unop(cvt, getDRegI64(mreg)),
mkU8(shift_imm)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vshll.i%u q%u, d%u, #%u\n", 8 << size, dreg, mreg, 8 << size);
+ DIP("vshll.i%d q%u, d%u, #%d\n", 8 << size, dreg, mreg, 8 << size);
return True;
} else if ((B >> 3) == 3 && (B & 3) == 0) {
/* VCVT (half<->single) */
@@ -8387,7 +8387,7 @@
mk_neon_elem_load_to_one_lane(rD, inc, i, N, size, addr);
else
mk_neon_elem_store_from_one_lane(rD, inc, i, N, size, addr);
- DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << size);
+ DIP("v%s%u.%d {", bL ? "ld" : "st", N + 1, 8 << size);
for (j = 0; j <= N; j++) {
if (j)
DIP(", ");
@@ -8482,7 +8482,7 @@
}
}
}
- DIP("vld%u.%u {", N + 1, 8 << size);
+ DIP("vld%u.%d {", N + 1, 8 << size);
for (r = 0; r < regs; r++) {
for (i = 0; i <= N; i++) {
if (i || r)
@@ -8783,7 +8783,7 @@
putIRegA(rN, e, IRTemp_INVALID, Ijk_Boring);
}
- DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << INSN(7,6));
+ DIP("v%s%u.%d {", bL ? "ld" : "st", N + 1, 8 << INSN(7,6));
if ((inc == 1 && regs * (N + 1) > 1)
|| (inc == 2 && regs > 1 && N > 0)) {
DIP("d%u-d%u", rD, rD + regs * (N + 1) - 1);
@@ -12674,9 +12674,9 @@
transfer last for a load and first for a store. Requires
reordering xOff/xReg. */
if (0) {
- vex_printf("\nREG_LIST_PRE: (rN=%d)\n", rN);
+ vex_printf("\nREG_LIST_PRE: (rN=%u)\n", rN);
for (i = 0; i < nX; i++)
- vex_printf("reg %d off %d\n", xReg[i], xOff[i]);
+ vex_printf("reg %u off %u\n", xReg[i], xOff[i]);
vex_printf("\n");
}
@@ -12715,7 +12715,7 @@
if (0) {
vex_printf("REG_LIST_POST:\n");
for (i = 0; i < nX; i++)
- vex_printf("reg %d off %d\n", xReg[i], xOff[i]);
+ vex_printf("reg %u off %u\n", xReg[i], xOff[i]);
vex_printf("\n");
}
}
@@ -13398,7 +13398,7 @@
default:
vassert(0);
}
- DIP("vdup.%u q%u, r%u\n", 32 / (1<<size), rD, rT);
+ DIP("vdup.%d q%u, r%u\n", 32 / (1<<size), rD, rT);
} else {
switch (size) {
case 0:
@@ -13415,7 +13415,7 @@
default:
vassert(0);
}
- DIP("vdup.%u d%u, r%u\n", 32 / (1<<size), rD, rT);
+ DIP("vdup.%d d%u, r%u\n", 32 / (1<<size), rD, rT);
}
goto decode_success_vfp;
}
@@ -16303,7 +16303,7 @@
vassert(0); // guarded by "if" above
}
putIRegA(rD, mkexpr(dstT), condT, Ijk_Boring);
- DIP("%s%s r%u, r%u, ROR #%u\n", nm, nCC(INSN_COND), rD, rM, rot);
+ DIP("%s%s r%u, r%u, ROR #%d\n", nm, nCC(INSN_COND), rD, rM, rot);
goto decode_success;
}
/* fall through */
@@ -17305,9 +17305,9 @@
if (sigill_diag) {
vex_printf("disInstr(arm): unhandled instruction: "
"0x%x\n", insn);
- vex_printf(" cond=%d(0x%x) 27:20=%u(0x%02x) "
+ vex_printf(" cond=%d(0x%x) 27:20=%d(0x%02x) "
"4:4=%d "
- "3:0=%u(0x%x)\n",
+ "3:0=%d(0x%x)\n",
(Int)INSN_COND, (UInt)INSN_COND,
(Int)INSN(27,20), (UInt)INSN(27,20),
(Int)INSN(4,4),
@@ -21549,7 +21549,7 @@
UInt bW = INSN0(5,5);
UInt imm2 = INSN1(5,4);
if (!isBadRegT(rM)) {
- DIP("pld%s [r%u, r%u, lsl %d]\n", bW ? "w" : "", rN, rM, imm2);
+ DIP("pld%s [r%u, r%u, lsl %u]\n", bW ? "w" : "", rN, rM, imm2);
goto decode_success;
}
/* fall through */
Modified: trunk/priv/guest_mips_toIR.c
==============================================================================
--- trunk/priv/guest_mips_toIR.c (original)
+++ trunk/priv/guest_mips_toIR.c Mon Aug 3 17:03:13 2015
@@ -1554,13 +1554,13 @@
case 0x3A:
if ((regRs & 0x01) == 0) {
/* Doubleword Shift Right Logical - DSRL; MIPS64 */
- DIP("dsrl r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsrl r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
putIReg(regRd, mkexpr(tmpRd));
} else if ((regRs & 0x01) == 1) {
/* Doubleword Rotate Right - DROTR; MIPS64r2 */
vassert(mode64);
- DIP("drotr r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("drotr r%u, r%u, %lld", regRd, regRt, sImmsa);
IRTemp tmpL = newTemp(ty);
IRTemp tmpR = newTemp(ty);
assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
@@ -1575,12 +1575,12 @@
case 0x3E:
if ((regRs & 0x01) == 0) {
/* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
- DIP("dsrl32 r%u, r%u, %d", regRd, regRt, (Int)(sImmsa + 32));
+ DIP("dsrl32 r%u, r%u, %lld", regRd, regRt, sImmsa + 32);
assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
putIReg(regRd, mkexpr(tmpRd));
} else if ((regRs & 0x01) == 1) {
/* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
- DIP("drotr32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("drotr32 r%u, r%u, %lld", regRd, regRt, sImmsa);
vassert(mode64);
IRTemp tmpL = newTemp(ty);
IRTemp tmpR = newTemp(ty);
@@ -1634,14 +1634,14 @@
break;
case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */
- DIP("dsll r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsll r%u, r%u, %lld", regRd, regRt, sImmsa);
vassert(mode64);
assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa)));
putIReg(regRd, mkexpr(tmpRd));
break;
case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
- DIP("dsll32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsll32 r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
...
[truncated message content] |
|
From: <sv...@va...> - 2015-08-03 10:48:26
|
Author: iraisr
Date: Mon Aug 3 11:48:16 2015
New Revision: 15477
Log:
Fix a typo in README.solaris.
Spotted by Florian Krohm <fl...@ei...>.
Modified:
trunk/README.solaris
Modified: trunk/README.solaris
==============================================================================
--- trunk/README.solaris (original)
+++ trunk/README.solaris Mon Aug 3 11:48:16 2015
@@ -74,11 +74,11 @@
itself it is not possible to reverse map the intended pathname.
Examples are fexecve(3C) and isaexec(3C).
- Program headers PT_SUNW_SYSSTAT and PT_SUNW_SYSSTAT_ZONE are not supported.
- That is, programs linked with mapfile directive RESERVE_SEGMENT and atribute
+ That is, programs linked with mapfile directive RESERVE_SEGMENT and attribute
TYPE equal to SYSSTAT or SYSSTAT_ZONE will cause Valgrind exit. It is not
possible for Valgrind to arrange mapping of a kernel shared page at the
address specified in the mapfile for the guest application. There is currently
- no such mechanism in Solaris.
+ no such mechanism in Solaris. Hacky workarounds are possible, though.
- When a thread has no stack then all system calls will result in Valgrind
crash, even though such system calls use just parameters passed in registers.
This should happen only in pathological situations when a thread is created
|
|
From: Florian K. <fl...@ei...> - 2015-08-03 09:33:49
|
You ought to run "make regtest" when making changes that have the potential to break stuff. There are zillions of messages like this one: none/tests/tilegx/Makefile.am:1: error: insn_test_ldnt4u_add_X1.stderr.exp is in EXTRA_DIST but doesn't exist none/tests/tilegx/Makefile.am:1: error: insn_test_ldnt4u_add_X1.vgtest is in EXTRA_DIST but doesn't exist none/tests/tilegx/Makefile.am:1: error: insn_test_ldnt_add_X1.stdout.exp is in EXTRA_DIST but doesn't exist Also make sure that "make dist BUILD_ALL_DOCS=no" runs successfully to completion. Florian On 01.08.2015 05:52, sv...@va... wrote: > Author: zliu > Date: Sat Aug 1 04:52:03 2015 > New Revision: 15466 > > Log: > Merge tilegx instruction test patch "valgrind-tilegx-instruction-test.patch" > in Bug 345921 - Add TileGX instruction set test in none/tests/tilegx > Submitted by Liming Sun, ls...@ez... > > > Added: > trunk/none/tests/tilegx/filter_stderr > trunk/none/tests/tilegx/gen_insn_test.c > trunk/none/tests/tilegx/gen_test.sh > Modified: > trunk/none/tests/tilegx/Makefile.am > > Modified: trunk/none/tests/tilegx/Makefile.am > ============================================================================== > --- trunk/none/tests/tilegx/Makefile.am (original) > +++ trunk/none/tests/tilegx/Makefile.am Sat Aug 1 04:52:03 2015 > @@ -1,13 +1,1614 @@ > > include $(top_srcdir)/Makefile.tool-tests.am > > -EXTRA_DIST = > +dist_noinst_SCRIPTS = \ > + filter_stderr > + > +EXTRA_DIST = \ > + insn_test_move_X0.stdout.exp insn_test_move_X0.stderr.exp \ > + insn_test_move_X0.vgtest \ > + insn_test_move_X1.stdout.exp insn_test_move_X1.stderr.exp \ > + insn_test_move_X1.vgtest \ > + insn_test_move_Y0.stdout.exp insn_test_move_Y0.stderr.exp \ > + insn_test_move_Y0.vgtest \ > + insn_test_move_Y1.stdout.exp insn_test_move_Y1.stderr.exp \ > + insn_test_move_Y1.vgtest \ > + insn_test_movei_X0.stdout.exp insn_test_movei_X0.stderr.exp \ > + insn_test_movei_X0.vgtest \ > + insn_test_movei_X1.stdout.exp insn_test_movei_X1.stderr.exp \ > + insn_test_movei_X1.vgtest \ > + insn_test_movei_Y0.stdout.exp insn_test_movei_Y0.stderr.exp \ > + insn_test_movei_Y0.vgtest \ > + insn_test_movei_Y1.stdout.exp insn_test_movei_Y1.stderr.exp \ > + insn_test_movei_Y1.vgtest \ > + insn_test_moveli_X0.stdout.exp insn_test_moveli_X0.stderr.exp \ > + insn_test_moveli_X0.vgtest \ > + insn_test_moveli_X1.stdout.exp insn_test_moveli_X1.stderr.exp \ > + insn_test_moveli_X1.vgtest \ > + insn_test_prefetch_X1.stdout.exp insn_test_prefetch_X1.stderr.exp \ > + insn_test_prefetch_X1.vgtest \ > + insn_test_prefetch_Y2.stdout.exp insn_test_prefetch_Y2.stderr.exp \ > + insn_test_prefetch_Y2.vgtest \ > + insn_test_prefetch_l1_X1.stdout.exp \ > + insn_test_prefetch_l1_X1.stderr.exp \ > + insn_test_prefetch_l1_X1.vgtest \ > + insn_test_prefetch_l1_Y2.stdout.exp \ > + insn_test_prefetch_l1_Y2.stderr.exp \ > + insn_test_prefetch_l1_Y2.vgtest \ > + insn_test_prefetch_l2_X1.stdout.exp \ > + insn_test_prefetch_l2_X1.stderr.exp \ > + insn_test_prefetch_l2_X1.vgtest \ > + insn_test_prefetch_l2_Y2.stdout.exp \ > + insn_test_prefetch_l2_Y2.stderr.exp \ > + insn_test_prefetch_l2_Y2.vgtest \ > + insn_test_prefetch_l3_X1.stdout.exp \ > + insn_test_prefetch_l3_X1.stderr.exp \ > + insn_test_prefetch_l3_X1.vgtest \ > + insn_test_prefetch_l3_Y2.stdout.exp \ > + insn_test_prefetch_l3_Y2.stderr.exp \ > + insn_test_prefetch_l3_Y2.vgtest \ > + insn_test_add_X0.stdout.exp insn_test_add_X0.stderr.exp \ > + insn_test_add_X0.vgtest \ > + insn_test_add_X1.stdout.exp insn_test_add_X1.stderr.exp \ > + insn_test_add_X1.vgtest \ > + insn_test_add_Y0.stdout.exp insn_test_add_Y0.stderr.exp \ > + insn_test_add_Y0.vgtest \ > + insn_test_add_Y1.stdout.exp insn_test_add_Y1.stderr.exp \ > + insn_test_add_Y1.vgtest \ > + insn_test_addi_X0.stdout.exp insn_test_addi_X0.stderr.exp \ > + insn_test_addi_X0.vgtest \ > + insn_test_addi_X1.stdout.exp insn_test_addi_X1.stderr.exp \ > + insn_test_addi_X1.vgtest \ > + insn_test_addi_Y0.stdout.exp insn_test_addi_Y0.stderr.exp \ > + insn_test_addi_Y0.vgtest \ > + insn_test_addi_Y1.stdout.exp insn_test_addi_Y1.stderr.exp \ > + insn_test_addi_Y1.vgtest \ > + insn_test_addli_X0.stdout.exp insn_test_addli_X0.stderr.exp \ > + insn_test_addli_X0.vgtest \ > + insn_test_addli_X1.stdout.exp insn_test_addli_X1.stderr.exp \ > + insn_test_addli_X1.vgtest \ > + insn_test_addx_X0.stdout.exp insn_test_addx_X0.stderr.exp \ > + insn_test_addx_X0.vgtest \ > + insn_test_addx_X1.stdout.exp insn_test_addx_X1.stderr.exp \ > + insn_test_addx_X1.vgtest \ > + insn_test_addx_Y0.stdout.exp insn_test_addx_Y0.stderr.exp \ > + insn_test_addx_Y0.vgtest \ > + insn_test_addx_Y1.stdout.exp insn_test_addx_Y1.stderr.exp \ > + insn_test_addx_Y1.vgtest \ > + insn_test_addxi_X0.stdout.exp insn_test_addxi_X0.stderr.exp \ > + insn_test_addxi_X0.vgtest \ > + insn_test_addxi_X1.stdout.exp insn_test_addxi_X1.stderr.exp \ > + insn_test_addxi_X1.vgtest \ > + insn_test_addxi_Y0.stdout.exp insn_test_addxi_Y0.stderr.exp \ > + insn_test_addxi_Y0.vgtest \ > + insn_test_addxi_Y1.stdout.exp insn_test_addxi_Y1.stderr.exp \ > + insn_test_addxi_Y1.vgtest \ > + insn_test_addxli_X0.stdout.exp insn_test_addxli_X0.stderr.exp \ > + insn_test_addxli_X0.vgtest \ > + insn_test_addxli_X1.stdout.exp insn_test_addxli_X1.stderr.exp \ > + insn_test_addxli_X1.vgtest \ > + insn_test_addxsc_X0.stdout.exp insn_test_addxsc_X0.stderr.exp \ > + insn_test_addxsc_X0.vgtest \ > + insn_test_addxsc_X1.stdout.exp insn_test_addxsc_X1.stderr.exp \ > + insn_test_addxsc_X1.vgtest \ > + insn_test_and_X0.stdout.exp insn_test_and_X0.stderr.exp \ > + insn_test_and_X0.vgtest \ > + insn_test_and_X1.stdout.exp insn_test_and_X1.stderr.exp \ > + insn_test_and_X1.vgtest \ > + insn_test_and_Y0.stdout.exp insn_test_and_Y0.stderr.exp \ > + insn_test_and_Y0.vgtest \ > + insn_test_and_Y1.stdout.exp insn_test_and_Y1.stderr.exp \ > + insn_test_and_Y1.vgtest \ > + insn_test_andi_X0.stdout.exp insn_test_andi_X0.stderr.exp \ > + insn_test_andi_X0.vgtest \ > + insn_test_andi_X1.stdout.exp insn_test_andi_X1.stderr.exp \ > + insn_test_andi_X1.vgtest \ > + insn_test_andi_Y0.stdout.exp insn_test_andi_Y0.stderr.exp \ > + insn_test_andi_Y0.vgtest \ > + insn_test_andi_Y1.stdout.exp insn_test_andi_Y1.stderr.exp \ > + insn_test_andi_Y1.vgtest \ > + insn_test_beqz_X1.stdout.exp insn_test_beqz_X1.stderr.exp \ > + insn_test_beqz_X1.vgtest \ > + insn_test_beqzt_X1.stdout.exp insn_test_beqzt_X1.stderr.exp \ > + insn_test_beqzt_X1.vgtest \ > + insn_test_bfexts_X0.stdout.exp insn_test_bfexts_X0.stderr.exp \ > + insn_test_bfexts_X0.vgtest \ > + insn_test_bfextu_X0.stdout.exp insn_test_bfextu_X0.stderr.exp \ > + insn_test_bfextu_X0.vgtest \ > + insn_test_bfins_X0.stdout.exp insn_test_bfins_X0.stderr.exp \ > + insn_test_bfins_X0.vgtest \ > + insn_test_bgez_X1.stdout.exp insn_test_bgez_X1.stderr.exp \ > + insn_test_bgez_X1.vgtest \ > + insn_test_bgezt_X1.stdout.exp insn_test_bgezt_X1.stderr.exp \ > + insn_test_bgezt_X1.vgtest \ > + insn_test_bgtz_X1.stdout.exp insn_test_bgtz_X1.stderr.exp \ > + insn_test_bgtz_X1.vgtest \ > + insn_test_bgtzt_X1.stdout.exp insn_test_bgtzt_X1.stderr.exp \ > + insn_test_bgtzt_X1.vgtest \ > + insn_test_blbc_X1.stdout.exp insn_test_blbc_X1.stderr.exp \ > + insn_test_blbc_X1.vgtest \ > + insn_test_blbct_X1.stdout.exp insn_test_blbct_X1.stderr.exp \ > + insn_test_blbct_X1.vgtest \ > + insn_test_blbs_X1.stdout.exp insn_test_blbs_X1.stderr.exp \ > + insn_test_blbs_X1.vgtest \ > + insn_test_blbst_X1.stdout.exp insn_test_blbst_X1.stderr.exp \ > + insn_test_blbst_X1.vgtest \ > + insn_test_blez_X1.stdout.exp insn_test_blez_X1.stderr.exp \ > + insn_test_blez_X1.vgtest \ > + insn_test_blezt_X1.stdout.exp insn_test_blezt_X1.stderr.exp \ > + insn_test_blezt_X1.vgtest \ > + insn_test_bltz_X1.stdout.exp insn_test_bltz_X1.stderr.exp \ > + insn_test_bltz_X1.vgtest \ > + insn_test_bltzt_X1.stdout.exp insn_test_bltzt_X1.stderr.exp \ > + insn_test_bltzt_X1.vgtest \ > + insn_test_bnez_X1.stdout.exp insn_test_bnez_X1.stderr.exp \ > + insn_test_bnez_X1.vgtest \ > + insn_test_bnezt_X1.stdout.exp insn_test_bnezt_X1.stderr.exp \ > + insn_test_bnezt_X1.vgtest \ > + insn_test_clz_X0.stdout.exp insn_test_clz_X0.stderr.exp \ > + insn_test_clz_X0.vgtest \ > + insn_test_clz_Y0.stdout.exp insn_test_clz_Y0.stderr.exp \ > + insn_test_clz_Y0.vgtest \ > + insn_test_cmoveqz_X0.stdout.exp insn_test_cmoveqz_X0.stderr.exp \ > + insn_test_cmoveqz_X0.vgtest \ > + insn_test_cmoveqz_Y0.stdout.exp insn_test_cmoveqz_Y0.stderr.exp \ > + insn_test_cmoveqz_Y0.vgtest \ > + insn_test_cmovnez_X0.stdout.exp insn_test_cmovnez_X0.stderr.exp \ > + insn_test_cmovnez_X0.vgtest \ > + insn_test_cmovnez_Y0.stdout.exp insn_test_cmovnez_Y0.stderr.exp \ > + insn_test_cmovnez_Y0.vgtest \ > + insn_test_cmpeq_X0.stdout.exp insn_test_cmpeq_X0.stderr.exp \ > + insn_test_cmpeq_X0.vgtest \ > + insn_test_cmpeq_X1.stdout.exp insn_test_cmpeq_X1.stderr.exp \ > + insn_test_cmpeq_X1.vgtest \ > + insn_test_cmpeq_Y0.stdout.exp insn_test_cmpeq_Y0.stderr.exp \ > + insn_test_cmpeq_Y0.vgtest \ > + insn_test_cmpeq_Y1.stdout.exp insn_test_cmpeq_Y1.stderr.exp \ > + insn_test_cmpeq_Y1.vgtest \ > + insn_test_cmpeqi_X0.stdout.exp insn_test_cmpeqi_X0.stderr.exp \ > + insn_test_cmpeqi_X0.vgtest \ > + insn_test_cmpeqi_X1.stdout.exp insn_test_cmpeqi_X1.stderr.exp \ > + insn_test_cmpeqi_X1.vgtest \ > + insn_test_cmpeqi_Y0.stdout.exp insn_test_cmpeqi_Y0.stderr.exp \ > + insn_test_cmpeqi_Y0.vgtest \ > + insn_test_cmpeqi_Y1.stdout.exp insn_test_cmpeqi_Y1.stderr.exp \ > + insn_test_cmpeqi_Y1.vgtest \ > + insn_test_cmples_X0.stdout.exp insn_test_cmples_X0.stderr.exp \ > + insn_test_cmples_X0.vgtest \ > + insn_test_cmples_X1.stdout.exp insn_test_cmples_X1.stderr.exp \ > + insn_test_cmples_X1.vgtest \ > + insn_test_cmples_Y0.stdout.exp insn_test_cmples_Y0.stderr.exp \ > + insn_test_cmples_Y0.vgtest \ > + insn_test_cmples_Y1.stdout.exp insn_test_cmples_Y1.stderr.exp \ > + insn_test_cmples_Y1.vgtest \ > + insn_test_cmpleu_X0.stdout.exp insn_test_cmpleu_X0.stderr.exp \ > + insn_test_cmpleu_X0.vgtest \ > + insn_test_cmpleu_X1.stdout.exp insn_test_cmpleu_X1.stderr.exp \ > + insn_test_cmpleu_X1.vgtest \ > + insn_test_cmpleu_Y0.stdout.exp insn_test_cmpleu_Y0.stderr.exp \ > + insn_test_cmpleu_Y0.vgtest \ > + insn_test_cmpleu_Y1.stdout.exp insn_test_cmpleu_Y1.stderr.exp \ > + insn_test_cmpleu_Y1.vgtest \ > + insn_test_cmplts_X0.stdout.exp insn_test_cmplts_X0.stderr.exp \ > + insn_test_cmplts_X0.vgtest \ > + insn_test_cmplts_X1.stdout.exp insn_test_cmplts_X1.stderr.exp \ > + insn_test_cmplts_X1.vgtest \ > + insn_test_cmplts_Y0.stdout.exp insn_test_cmplts_Y0.stderr.exp \ > + insn_test_cmplts_Y0.vgtest \ > + insn_test_cmplts_Y1.stdout.exp insn_test_cmplts_Y1.stderr.exp \ > + insn_test_cmplts_Y1.vgtest \ > + insn_test_cmpltsi_X0.stdout.exp insn_test_cmpltsi_X0.stderr.exp \ > + insn_test_cmpltsi_X0.vgtest \ > + insn_test_cmpltsi_X1.stdout.exp insn_test_cmpltsi_X1.stderr.exp \ > + insn_test_cmpltsi_X1.vgtest \ > + insn_test_cmpltsi_Y0.stdout.exp insn_test_cmpltsi_Y0.stderr.exp \ > + insn_test_cmpltsi_Y0.vgtest \ > + insn_test_cmpltsi_Y1.stdout.exp insn_test_cmpltsi_Y1.stderr.exp \ > + insn_test_cmpltsi_Y1.vgtest \ > + insn_test_cmpltu_X0.stdout.exp insn_test_cmpltu_X0.stderr.exp \ > + insn_test_cmpltu_X0.vgtest \ > + insn_test_cmpltu_X1.stdout.exp insn_test_cmpltu_X1.stderr.exp \ > + insn_test_cmpltu_X1.vgtest \ > + insn_test_cmpltu_Y0.stdout.exp insn_test_cmpltu_Y0.stderr.exp \ > + insn_test_cmpltu_Y0.vgtest \ > + insn_test_cmpltu_Y1.stdout.exp insn_test_cmpltu_Y1.stderr.exp \ > + insn_test_cmpltu_Y1.vgtest \ > + insn_test_cmpltui_X0.stdout.exp insn_test_cmpltui_X0.stderr.exp \ > + insn_test_cmpltui_X0.vgtest \ > + insn_test_cmpltui_X1.stdout.exp insn_test_cmpltui_X1.stderr.exp \ > + insn_test_cmpltui_X1.vgtest \ > + insn_test_cmpne_X0.stdout.exp insn_test_cmpne_X0.stderr.exp \ > + insn_test_cmpne_X0.vgtest \ > + insn_test_cmpne_X1.stdout.exp insn_test_cmpne_X1.stderr.exp \ > + insn_test_cmpne_X1.vgtest \ > + insn_test_cmpne_Y0.stdout.exp insn_test_cmpne_Y0.stderr.exp \ > + insn_test_cmpne_Y0.vgtest \ > + insn_test_cmpne_Y1.stdout.exp insn_test_cmpne_Y1.stderr.exp \ > + insn_test_cmpne_Y1.vgtest \ > + insn_test_cmul_X0.stdout.exp insn_test_cmul_X0.stderr.exp \ > + insn_test_cmul_X0.vgtest \ > + insn_test_cmula_X0.stdout.exp insn_test_cmula_X0.stderr.exp \ > + insn_test_cmula_X0.vgtest \ > + insn_test_cmulaf_X0.stdout.exp insn_test_cmulaf_X0.stderr.exp \ > + insn_test_cmulaf_X0.vgtest \ > + insn_test_cmulf_X0.stdout.exp insn_test_cmulf_X0.stderr.exp \ > + insn_test_cmulf_X0.vgtest \ > + insn_test_cmulfr_X0.stdout.exp insn_test_cmulfr_X0.stderr.exp \ > + insn_test_cmulfr_X0.vgtest \ > + insn_test_cmulh_X0.stdout.exp insn_test_cmulh_X0.stderr.exp \ > + insn_test_cmulh_X0.vgtest \ > + insn_test_cmulhr_X0.stdout.exp insn_test_cmulhr_X0.stderr.exp \ > + insn_test_cmulhr_X0.vgtest \ > + insn_test_crc32_32_X0.stdout.exp insn_test_crc32_32_X0.stderr.exp \ > + insn_test_crc32_32_X0.vgtest \ > + insn_test_crc32_8_X0.stdout.exp insn_test_crc32_8_X0.stderr.exp \ > + insn_test_crc32_8_X0.vgtest \ > + insn_test_ctz_X0.stdout.exp insn_test_ctz_X0.stderr.exp \ > + insn_test_ctz_X0.vgtest \ > + insn_test_ctz_Y0.stdout.exp insn_test_ctz_Y0.stderr.exp \ > + insn_test_ctz_Y0.vgtest \ > + insn_test_dblalign_X0.stdout.exp insn_test_dblalign_X0.stderr.exp \ > + insn_test_dblalign_X0.vgtest \ > + insn_test_dblalign2_X0.stdout.exp insn_test_dblalign2_X0.stderr.exp \ > + insn_test_dblalign2_X0.vgtest \ > + insn_test_dblalign2_X1.stdout.exp insn_test_dblalign2_X1.stderr.exp \ > + insn_test_dblalign2_X1.vgtest \ > + insn_test_dblalign4_X0.stdout.exp insn_test_dblalign4_X0.stderr.exp \ > + insn_test_dblalign4_X0.vgtest \ > + insn_test_dblalign4_X1.stdout.exp insn_test_dblalign4_X1.stderr.exp \ > + insn_test_dblalign4_X1.vgtest \ > + insn_test_dblalign6_X0.stdout.exp insn_test_dblalign6_X0.stderr.exp \ > + insn_test_dblalign6_X0.vgtest \ > + insn_test_dblalign6_X1.stdout.exp insn_test_dblalign6_X1.stderr.exp \ > + insn_test_dblalign6_X1.vgtest \ > + insn_test_dtlbpr_X1.stdout.exp insn_test_dtlbpr_X1.stderr.exp \ > + insn_test_dtlbpr_X1.vgtest \ > + insn_test_fdouble_add_flags_X0.stdout.exp \ > + insn_test_fdouble_add_flags_X0.stderr.exp \ > + insn_test_fdouble_add_flags_X0.vgtest \ > + insn_test_fdouble_addsub_X0.stdout.exp \ > + insn_test_fdouble_addsub_X0.stderr.exp \ > + insn_test_fdouble_addsub_X0.vgtest \ > + insn_test_fdouble_mul_flags_X0.stdout.exp \ > + insn_test_fdouble_mul_flags_X0.stderr.exp \ > + insn_test_fdouble_mul_flags_X0.vgtest \ > + insn_test_fdouble_pack1_X0.stdout.exp \ > + insn_test_fdouble_pack1_X0.stderr.exp \ > + insn_test_fdouble_pack1_X0.vgtest \ > + insn_test_fdouble_pack2_X0.stdout.exp \ > + insn_test_fdouble_pack2_X0.stderr.exp \ > + insn_test_fdouble_pack2_X0.vgtest \ > + insn_test_fdouble_sub_flags_X0.stdout.exp \ > + insn_test_fdouble_sub_flags_X0.stderr.exp \ > + insn_test_fdouble_sub_flags_X0.vgtest \ > + insn_test_fdouble_unpack_max_X0.stdout.exp \ > + insn_test_fdouble_unpack_max_X0.stderr.exp \ > + insn_test_fdouble_unpack_max_X0.vgtest \ > + insn_test_fdouble_unpack_min_X0.stdout.exp \ > + insn_test_fdouble_unpack_min_X0.stderr.exp \ > + insn_test_fdouble_unpack_min_X0.vgtest \ > + insn_test_flushwb_X1.stdout.exp insn_test_flushwb_X1.stderr.exp \ > + insn_test_flushwb_X1.vgtest \ > + insn_test_fnop_X0.stdout.exp insn_test_fnop_X0.stderr.exp \ > + insn_test_fnop_X0.vgtest \ > + insn_test_fnop_X1.stdout.exp insn_test_fnop_X1.stderr.exp \ > + insn_test_fnop_X1.vgtest \ > + insn_test_fnop_Y0.stdout.exp insn_test_fnop_Y0.stderr.exp \ > + insn_test_fnop_Y0.vgtest \ > + insn_test_fnop_Y1.stdout.exp insn_test_fnop_Y1.stderr.exp \ > + insn_test_fnop_Y1.vgtest \ > + insn_test_fsingle_add1_X0.stdout.exp \ > + insn_test_fsingle_add1_X0.stderr.exp \ > + insn_test_fsingle_add1_X0.vgtest \ > + insn_test_fsingle_addsub2_X0.stdout.exp \ > + insn_test_fsingle_addsub2_X0.stderr.exp \ > + insn_test_fsingle_addsub2_X0.vgtest \ > + insn_test_fsingle_mul1_X0.stdout.exp \ > + insn_test_fsingle_mul1_X0.stderr.exp \ > + insn_test_fsingle_mul1_X0.vgtest \ > + insn_test_fsingle_mul2_X0.stdout.exp \ > + insn_test_fsingle_mul2_X0.stderr.exp \ > + insn_test_fsingle_mul2_X0.vgtest \ > + insn_test_fsingle_pack1_X0.stdout.exp \ > + insn_test_fsingle_pack1_X0.stderr.exp \ > + insn_test_fsingle_pack1_X0.vgtest \ > + insn_test_fsingle_pack1_Y0.stdout.exp \ > + insn_test_fsingle_pack1_Y0.stderr.exp \ > + insn_test_fsingle_pack1_Y0.vgtest \ > + insn_test_fsingle_pack2_X0.stdout.exp \ > + insn_test_fsingle_pack2_X0.stderr.exp \ > + insn_test_fsingle_pack2_X0.vgtest \ > + insn_test_fsingle_sub1_X0.stdout.exp \ > + insn_test_fsingle_sub1_X0.stderr.exp \ > + insn_test_fsingle_sub1_X0.vgtest \ > + insn_test_icoh_X1.stdout.exp insn_test_icoh_X1.stderr.exp \ > + insn_test_icoh_X1.vgtest \ > + insn_test_j_X1.stdout.exp insn_test_j_X1.stderr.exp \ > + insn_test_j_X1.vgtest \ > + insn_test_jal_X1.stdout.exp insn_test_jal_X1.stderr.exp \ > + insn_test_jal_X1.vgtest \ > + insn_test_jalr_X1.stdout.exp insn_test_jalr_X1.stderr.exp \ > + insn_test_jalr_X1.vgtest \ > + insn_test_jalr_Y1.stdout.exp insn_test_jalr_Y1.stderr.exp \ > + insn_test_jalr_Y1.vgtest \ > + insn_test_jalrp_X1.stdout.exp insn_test_jalrp_X1.stderr.exp \ > + insn_test_jalrp_X1.vgtest \ > + insn_test_jalrp_Y1.stdout.exp insn_test_jalrp_Y1.stderr.exp \ > + insn_test_jalrp_Y1.vgtest \ > + insn_test_jr_X1.stdout.exp insn_test_jr_X1.stderr.exp \ > + insn_test_jr_X1.vgtest \ > + insn_test_jr_Y1.stdout.exp insn_test_jr_Y1.stderr.exp \ > + insn_test_jr_Y1.vgtest \ > + insn_test_jrp_X1.stdout.exp insn_test_jrp_X1.stderr.exp \ > + insn_test_jrp_X1.vgtest \ > + insn_test_jrp_Y1.stdout.exp insn_test_jrp_Y1.stderr.exp \ > + insn_test_jrp_Y1.vgtest \ > + insn_test_ld_X1.stdout.exp insn_test_ld_X1.stderr.exp \ > + insn_test_ld_X1.vgtest \ > + insn_test_ld_Y2.stdout.exp insn_test_ld_Y2.stderr.exp \ > + insn_test_ld_Y2.vgtest \ > + insn_test_ld1s_X1.stdout.exp insn_test_ld1s_X1.stderr.exp \ > + insn_test_ld1s_X1.vgtest \ > + insn_test_ld1s_Y2.stdout.exp insn_test_ld1s_Y2.stderr.exp \ > + insn_test_ld1s_Y2.vgtest \ > + insn_test_ld1s_add_X1.stdout.exp insn_test_ld1s_add_X1.stderr.exp \ > + insn_test_ld1s_add_X1.vgtest \ > + insn_test_ld1u_X1.stdout.exp insn_test_ld1u_X1.stderr.exp \ > + insn_test_ld1u_X1.vgtest \ > + insn_test_ld1u_Y2.stdout.exp insn_test_ld1u_Y2.stderr.exp \ > + insn_test_ld1u_Y2.vgtest \ > + insn_test_ld1u_add_X1.stdout.exp insn_test_ld1u_add_X1.stderr.exp \ > + insn_test_ld1u_add_X1.vgtest \ > + insn_test_ld2s_X1.stdout.exp insn_test_ld2s_X1.stderr.exp \ > + insn_test_ld2s_X1.vgtest \ > + insn_test_ld2s_Y2.stdout.exp insn_test_ld2s_Y2.stderr.exp \ > + insn_test_ld2s_Y2.vgtest \ > + insn_test_ld2u_X1.stdout.exp insn_test_ld2u_X1.stderr.exp \ > + insn_test_ld2u_X1.vgtest \ > + insn_test_ld2u_Y2.stdout.exp insn_test_ld2u_Y2.stderr.exp \ > + insn_test_ld2u_Y2.vgtest \ > + insn_test_ld4s_X1.stdout.exp insn_test_ld4s_X1.stderr.exp \ > + insn_test_ld4s_X1.vgtest \ > + insn_test_ld4s_add_X1.stdout.exp insn_test_ld4s_add_X1.stderr.exp \ > + insn_test_ld4s_add_X1.vgtest \ > + insn_test_ld4u_X1.stdout.exp insn_test_ld4u_X1.stderr.exp \ > + insn_test_ld4u_X1.vgtest \ > + insn_test_ld4u_Y2.stdout.exp insn_test_ld4u_Y2.stderr.exp \ > + insn_test_ld4u_Y2.vgtest \ > + insn_test_ld4u_add_X1.stdout.exp insn_test_ld4u_add_X1.stderr.exp \ > + insn_test_ld4u_add_X1.vgtest \ > + insn_test_ld_add_X1.stdout.exp insn_test_ld_add_X1.stderr.exp \ > + insn_test_ld_add_X1.vgtest \ > + insn_test_ldna_X1.stdout.exp insn_test_ldna_X1.stderr.exp \ > + insn_test_ldna_X1.vgtest \ > + insn_test_ldna_add_X1.stdout.exp insn_test_ldna_add_X1.stderr.exp \ > + insn_test_ldna_add_X1.vgtest \ > + insn_test_ldnt_X1.stdout.exp insn_test_ldnt_X1.stderr.exp \ > + insn_test_ldnt_X1.vgtest \ > + insn_test_ldnt1s_X1.stdout.exp insn_test_ldnt1s_X1.stderr.exp \ > + insn_test_ldnt1s_X1.vgtest \ > + insn_test_ldnt1s_add_X1.stdout.exp insn_test_ldnt1s_add_X1.stderr.exp \ > + insn_test_ldnt1s_add_X1.vgtest \ > + insn_test_ldnt1u_X1.stdout.exp insn_test_ldnt1u_X1.stderr.exp \ > + insn_test_ldnt1u_X1.vgtest \ > + insn_test_ldnt1u_add_X1.stdout.exp insn_test_ldnt1u_add_X1.stderr.exp \ > + insn_test_ldnt1u_add_X1.vgtest \ > + insn_test_ldnt2s_X1.stdout.exp insn_test_ldnt2s_X1.stderr.exp \ > + insn_test_ldnt2s_X1.vgtest \ > + insn_test_ldnt2s_add_X1.stdout.exp insn_test_ldnt2s_add_X1.stderr.exp \ > + insn_test_ldnt2s_add_X1.vgtest \ > + insn_test_ldnt2u_add_X1.stdout.exp insn_test_ldnt2u_add_X1.stderr.exp \ > + insn_test_ldnt2u_add_X1.vgtest \ > + insn_test_ldnt4s_X1.stdout.exp insn_test_ldnt4s_X1.stderr.exp \ > + insn_test_ldnt4s_X1.vgtest \ > + insn_test_ldnt4s_add_X1.stdout.exp insn_test_ldnt4s_add_X1.stderr.exp \ > + insn_test_ldnt4s_add_X1.vgtest \ > + insn_test_ldnt4u_X1.stdout.exp insn_test_ldnt4u_X1.stderr.exp \ > + insn_test_ldnt4u_X1.vgtest \ > + insn_test_ldnt4u_add_X1.stdout.exp insn_test_ldnt4u_add_X1.stderr.exp \ > + insn_test_ldnt4u_add_X1.vgtest \ > + insn_test_ldnt_add_X1.stdout.exp insn_test_ldnt_add_X1.stderr.exp \ > + insn_test_ldnt_add_X1.vgtest \ > + insn_test_lnk_X1.stdout.exp insn_test_lnk_X1.stderr.exp \ > + insn_test_lnk_X1.vgtest \ > + insn_test_lnk_Y1.stdout.exp insn_test_lnk_Y1.stderr.exp \ > + insn_test_lnk_Y1.vgtest \ > + insn_test_mf_X1.stdout.exp insn_test_mf_X1.stderr.exp \ > + insn_test_mf_X1.vgtest \ > + insn_test_mm_X0.stdout.exp insn_test_mm_X0.stderr.exp \ > + insn_test_mm_X0.vgtest \ > + insn_test_mnz_X0.stdout.exp insn_test_mnz_X0.stderr.exp \ > + insn_test_mnz_X0.vgtest \ > + insn_test_mnz_X1.stdout.exp insn_test_mnz_X1.stderr.exp \ > + insn_test_mnz_X1.vgtest \ > + insn_test_mnz_Y0.stdout.exp insn_test_mnz_Y0.stderr.exp \ > + insn_test_mnz_Y0.vgtest \ > + insn_test_mnz_Y1.stdout.exp insn_test_mnz_Y1.stderr.exp \ > + insn_test_mnz_Y1.vgtest \ > + insn_test_mul_hs_hs_X0.stdout.exp insn_test_mul_hs_hs_X0.stderr.exp \ > + insn_test_mul_hs_hs_X0.vgtest \ > + insn_test_mul_hs_hs_Y0.stdout.exp insn_test_mul_hs_hs_Y0.stderr.exp \ > + insn_test_mul_hs_hs_Y0.vgtest \ > + insn_test_mul_hs_hu_X0.stdout.exp insn_test_mul_hs_hu_X0.stderr.exp \ > + insn_test_mul_hs_hu_X0.vgtest \ > + insn_test_mul_hs_ls_X0.stdout.exp insn_test_mul_hs_ls_X0.stderr.exp \ > + insn_test_mul_hs_ls_X0.vgtest \ > + insn_test_mul_hs_lu_X0.stdout.exp insn_test_mul_hs_lu_X0.stderr.exp \ > + insn_test_mul_hs_lu_X0.vgtest \ > + insn_test_mul_hu_hu_X0.stdout.exp insn_test_mul_hu_hu_X0.stderr.exp \ > + insn_test_mul_hu_hu_X0.vgtest \ > + insn_test_mul_hu_hu_Y0.stdout.exp insn_test_mul_hu_hu_Y0.stderr.exp \ > + insn_test_mul_hu_hu_Y0.vgtest \ > + insn_test_mul_hu_lu_X0.stdout.exp insn_test_mul_hu_lu_X0.stderr.exp \ > + insn_test_mul_hu_lu_X0.vgtest \ > + insn_test_mul_ls_ls_X0.stdout.exp insn_test_mul_ls_ls_X0.stderr.exp \ > + insn_test_mul_ls_ls_X0.vgtest \ > + insn_test_mul_ls_ls_Y0.stdout.exp insn_test_mul_ls_ls_Y0.stderr.exp \ > + insn_test_mul_ls_ls_Y0.vgtest \ > + insn_test_mul_ls_lu_X0.stdout.exp insn_test_mul_ls_lu_X0.stderr.exp \ > + insn_test_mul_ls_lu_X0.vgtest \ > + insn_test_mul_lu_lu_X0.stdout.exp insn_test_mul_lu_lu_X0.stderr.exp \ > + insn_test_mul_lu_lu_X0.vgtest \ > + insn_test_mul_lu_lu_Y0.stdout.exp insn_test_mul_lu_lu_Y0.stderr.exp \ > + insn_test_mul_lu_lu_Y0.vgtest \ > + insn_test_mula_hs_hs_X0.stdout.exp insn_test_mula_hs_hs_X0.stderr.exp \ > + insn_test_mula_hs_hs_X0.vgtest \ > + insn_test_mula_hs_hs_Y0.stdout.exp insn_test_mula_hs_hs_Y0.stderr.exp \ > + insn_test_mula_hs_hs_Y0.vgtest \ > + insn_test_mula_hs_hu_X0.stdout.exp insn_test_mula_hs_hu_X0.stderr.exp \ > + insn_test_mula_hs_hu_X0.vgtest \ > + insn_test_mula_hs_ls_X0.stdout.exp insn_test_mula_hs_ls_X0.stderr.exp \ > + insn_test_mula_hs_ls_X0.vgtest \ > + insn_test_mula_hs_lu_X0.stdout.exp insn_test_mula_hs_lu_X0.stderr.exp \ > + insn_test_mula_hs_lu_X0.vgtest \ > + insn_test_mula_hu_hu_X0.stdout.exp insn_test_mula_hu_hu_X0.stderr.exp \ > + insn_test_mula_hu_hu_X0.vgtest \ > + insn_test_mula_hu_hu_Y0.stdout.exp insn_test_mula_hu_hu_Y0.stderr.exp \ > + insn_test_mula_hu_hu_Y0.vgtest \ > + insn_test_mula_hu_ls_X0.stdout.exp insn_test_mula_hu_ls_X0.stderr.exp \ > + insn_test_mula_hu_ls_X0.vgtest \ > + insn_test_mula_hu_lu_X0.stdout.exp insn_test_mula_hu_lu_X0.stderr.exp \ > + insn_test_mula_hu_lu_X0.vgtest \ > + insn_test_mula_ls_ls_X0.stdout.exp insn_test_mula_ls_ls_X0.stderr.exp \ > + insn_test_mula_ls_ls_X0.vgtest \ > + insn_test_mula_ls_ls_Y0.stdout.exp insn_test_mula_ls_ls_Y0.stderr.exp \ > + insn_test_mula_ls_ls_Y0.vgtest \ > + insn_test_mula_ls_lu_X0.stdout.exp insn_test_mula_ls_lu_X0.stderr.exp \ > + insn_test_mula_ls_lu_X0.vgtest \ > + insn_test_mula_lu_lu_X0.stdout.exp insn_test_mula_lu_lu_X0.stderr.exp \ > + insn_test_mula_lu_lu_X0.vgtest \ > + insn_test_mula_lu_lu_Y0.stdout.exp insn_test_mula_lu_lu_Y0.stderr.exp \ > + insn_test_mula_lu_lu_Y0.vgtest \ > + insn_test_mulax_X0.stdout.exp insn_test_mulax_X0.stderr.exp \ > + insn_test_mulax_X0.vgtest \ > + insn_test_mulax_Y0.stdout.exp insn_test_mulax_Y0.stderr.exp \ > + insn_test_mulax_Y0.vgtest \ > + insn_test_mulx_X0.stdout.exp insn_test_mulx_X0.stderr.exp \ > + insn_test_mulx_X0.vgtest \ > + insn_test_mulx_Y0.stdout.exp insn_test_mulx_Y0.stderr.exp \ > + insn_test_mulx_Y0.vgtest \ > + insn_test_mz_X0.stdout.exp insn_test_mz_X0.stderr.exp \ > + insn_test_mz_X0.vgtest \ > + insn_test_mz_X1.stdout.exp insn_test_mz_X1.stderr.exp \ > + insn_test_mz_X1.vgtest \ > + insn_test_mz_Y0.stdout.exp insn_test_mz_Y0.stderr.exp \ > + insn_test_mz_Y0.vgtest \ > + insn_test_mz_Y1.stdout.exp insn_test_mz_Y1.stderr.exp \ > + insn_test_mz_Y1.vgtest \ > + insn_test_nop_X0.stdout.exp insn_test_nop_X0.stderr.exp \ > + insn_test_nop_X0.vgtest \ > + insn_test_nop_X1.stdout.exp insn_test_nop_X1.stderr.exp \ > + insn_test_nop_X1.vgtest \ > + insn_test_nop_Y0.stdout.exp insn_test_nop_Y0.stderr.exp \ > + insn_test_nop_Y0.vgtest \ > + insn_test_nop_Y1.stdout.exp insn_test_nop_Y1.stderr.exp \ > + insn_test_nop_Y1.vgtest \ > + insn_test_nor_X0.stdout.exp insn_test_nor_X0.stderr.exp \ > + insn_test_nor_X0.vgtest \ > + insn_test_nor_X1.stdout.exp insn_test_nor_X1.stderr.exp \ > + insn_test_nor_X1.vgtest \ > + insn_test_nor_Y0.stdout.exp insn_test_nor_Y0.stderr.exp \ > + insn_test_nor_Y0.vgtest \ > + insn_test_nor_Y1.stdout.exp insn_test_nor_Y1.stderr.exp \ > + insn_test_nor_Y1.vgtest \ > + insn_test_or_X0.stdout.exp insn_test_or_X0.stderr.exp \ > + insn_test_or_X0.vgtest \ > + insn_test_or_X1.stdout.exp insn_test_or_X1.stderr.exp \ > + insn_test_or_X1.vgtest \ > + insn_test_or_Y0.stdout.exp insn_test_or_Y0.stderr.exp \ > + insn_test_or_Y0.vgtest \ > + insn_test_or_Y1.stdout.exp insn_test_or_Y1.stderr.exp \ > + insn_test_or_Y1.vgtest \ > + insn_test_ori_X0.stdout.exp insn_test_ori_X0.stderr.exp \ > + insn_test_ori_X0.vgtest \ > + insn_test_ori_X1.stdout.exp insn_test_ori_X1.stderr.exp \ > + insn_test_ori_X1.vgtest \ > + insn_test_pcnt_X0.stdout.exp insn_test_pcnt_X0.stderr.exp \ > + insn_test_pcnt_X0.vgtest \ > + insn_test_pcnt_Y0.stdout.exp insn_test_pcnt_Y0.stderr.exp \ > + insn_test_pcnt_Y0.vgtest \ > + insn_test_revbits_X0.stdout.exp insn_test_revbits_X0.stderr.exp \ > + insn_test_revbits_X0.vgtest \ > + insn_test_revbits_Y0.stdout.exp insn_test_revbits_Y0.stderr.exp \ > + insn_test_revbits_Y0.vgtest \ > + insn_test_revbytes_X0.stdout.exp insn_test_revbytes_X0.stderr.exp \ > + insn_test_revbytes_X0.vgtest \ > + insn_test_revbytes_Y0.stdout.exp insn_test_revbytes_Y0.stderr.exp \ > + insn_test_revbytes_Y0.vgtest \ > + insn_test_rotl_X0.stdout.exp insn_test_rotl_X0.stderr.exp \ > + insn_test_rotl_X0.vgtest \ > + insn_test_rotl_X1.stdout.exp insn_test_rotl_X1.stderr.exp \ > + insn_test_rotl_X1.vgtest \ > + insn_test_rotl_Y0.stdout.exp insn_test_rotl_Y0.stderr.exp \ > + insn_test_rotl_Y0.vgtest \ > + insn_test_rotl_Y1.stdout.exp insn_test_rotl_Y1.stderr.exp \ > + insn_test_rotl_Y1.vgtest \ > + insn_test_rotli_X0.stdout.exp insn_test_rotli_X0.stderr.exp \ > + insn_test_rotli_X0.vgtest \ > + insn_test_rotli_X1.stdout.exp insn_test_rotli_X1.stderr.exp \ > + insn_test_rotli_X1.vgtest \ > + insn_test_rotli_Y0.stdout.exp insn_test_rotli_Y0.stderr.exp \ > + insn_test_rotli_Y0.vgtest \ > + insn_test_rotli_Y1.stdout.exp insn_test_rotli_Y1.stderr.exp \ > + insn_test_rotli_Y1.vgtest \ > + insn_test_shl_X0.stdout.exp insn_test_shl_X0.stderr.exp \ > + insn_test_shl_X0.vgtest \ > + insn_test_shl_X1.stdout.exp insn_test_shl_X1.stderr.exp \ > + insn_test_shl_X1.vgtest \ > + insn_test_shl_Y0.stdout.exp insn_test_shl_Y0.stderr.exp \ > + insn_test_shl_Y0.vgtest \ > + insn_test_shl_Y1.stdout.exp insn_test_shl_Y1.stderr.exp \ > + insn_test_shl_Y1.vgtest \ > + insn_test_shl16insli_X0.stdout.exp insn_test_shl16insli_X0.stderr.exp \ > + insn_test_shl16insli_X0.vgtest \ > + insn_test_shl16insli_X1.stdout.exp insn_test_shl16insli_X1.stderr.exp \ > + insn_test_shl16insli_X1.vgtest \ > + insn_test_shl1add_X0.stdout.exp insn_test_shl1add_X0.stderr.exp \ > + insn_test_shl1add_X0.vgtest \ > + insn_test_shl1add_X1.stdout.exp insn_test_shl1add_X1.stderr.exp \ > + insn_test_shl1add_X1.vgtest \ > + insn_test_shl1add_Y0.stdout.exp insn_test_shl1add_Y0.stderr.exp \ > + insn_test_shl1add_Y0.vgtest \ > + insn_test_shl1add_Y1.stdout.exp insn_test_shl1add_Y1.stderr.exp \ > + insn_test_shl1add_Y1.vgtest \ > + insn_test_shl1addx_X0.stdout.exp insn_test_shl1addx_X0.stderr.exp \ > + insn_test_shl1addx_X0.vgtest \ > + insn_test_shl1addx_X1.stdout.exp insn_test_shl1addx_X1.stderr.exp \ > + insn_test_shl1addx_X1.vgtest \ > + insn_test_shl1addx_Y0.stdout.exp insn_test_shl1addx_Y0.stderr.exp \ > + insn_test_shl1addx_Y0.vgtest \ > + insn_test_shl1addx_Y1.stdout.exp insn_test_shl1addx_Y1.stderr.exp \ > + insn_test_shl1addx_Y1.vgtest \ > + insn_test_shl2add_X0.stdout.exp insn_test_shl2add_X0.stderr.exp \ > + insn_test_shl2add_X0.vgtest \ > + insn_test_shl2add_X1.stdout.exp insn_test_shl2add_X1.stderr.exp \ > + insn_test_shl2add_X1.vgtest \ > + insn_test_shl2add_Y0.stdout.exp insn_test_shl2add_Y0.stderr.exp \ > + insn_test_shl2add_Y0.vgtest \ > + insn_test_shl2add_Y1.stdout.exp insn_test_shl2add_Y1.stderr.exp \ > + insn_test_shl2add_Y1.vgtest \ > + insn_test_shl2addx_X0.stdout.exp insn_test_shl2addx_X0.stderr.exp \ > + insn_test_shl2addx_X0.vgtest \ > + insn_test_shl2addx_X1.stdout.exp insn_test_shl2addx_X1.stderr.exp \ > + insn_test_shl2addx_X1.vgtest \ > + insn_test_shl2addx_Y0.stdout.exp insn_test_shl2addx_Y0.stderr.exp \ > + insn_test_shl2addx_Y0.vgtest \ > + insn_test_shl2addx_Y1.stdout.exp insn_test_shl2addx_Y1.stderr.exp \ > + insn_test_shl2addx_Y1.vgtest \ > + insn_test_shl3add_X0.stdout.exp insn_test_shl3add_X0.stderr.exp \ > + insn_test_shl3add_X0.vgtest \ > + insn_test_shl3add_X1.stdout.exp insn_test_shl3add_X1.stderr.exp \ > + insn_test_shl3add_X1.vgtest \ > + insn_test_shl3add_Y0.stdout.exp insn_test_shl3add_Y0.stderr.exp \ > + insn_test_shl3add_Y0.vgtest \ > + insn_test_shl3add_Y1.stdout.exp insn_test_shl3add_Y1.stderr.exp \ > + insn_test_shl3add_Y1.vgtest \ > + insn_test_shl3addx_X0.stdout.exp insn_test_shl3addx_X0.stderr.exp \ > + insn_test_shl3addx_X0.vgtest \ > + insn_test_shl3addx_X1.stdout.exp insn_test_shl3addx_X1.stderr.exp \ > + insn_test_shl3addx_X1.vgtest \ > + insn_test_shl3addx_Y0.stdout.exp insn_test_shl3addx_Y0.stderr.exp \ > + insn_test_shl3addx_Y0.vgtest \ > + insn_test_shl3addx_Y1.stdout.exp insn_test_shl3addx_Y1.stderr.exp \ > + insn_test_shl3addx_Y1.vgtest \ > + insn_test_shli_X0.stdout.exp insn_test_shli_X0.stderr.exp \ > + insn_test_shli_X0.vgtest \ > + insn_test_shli_X1.stdout.exp insn_test_shli_X1.stderr.exp \ > + insn_test_shli_X1.vgtest \ > + insn_test_shli_Y0.stdout.exp insn_test_shli_Y0.stderr.exp \ > + insn_test_shli_Y0.vgtest \ > + insn_test_shli_Y1.stdout.exp insn_test_shli_Y1.stderr.exp \ > + insn_test_shli_Y1.vgtest \ > + insn_test_shlx_X0.stdout.exp insn_test_shlx_X0.stderr.exp \ > + insn_test_shlx_X0.vgtest \ > + insn_test_shlx_X1.stdout.exp insn_test_shlx_X1.stderr.exp \ > + insn_test_shlx_X1.vgtest \ > + insn_test_shlxi_X0.stdout.exp insn_test_shlxi_X0.stderr.exp \ > + insn_test_shlxi_X0.vgtest \ > + insn_test_shlxi_X1.stdout.exp insn_test_shlxi_X1.stderr.exp \ > + insn_test_shlxi_X1.vgtest \ > + insn_test_shrs_X0.stdout.exp insn_test_shrs_X0.stderr.exp \ > + insn_test_shrs_X0.vgtest \ > + insn_test_shrs_X1.stdout.exp insn_test_shrs_X1.stderr.exp \ > + insn_test_shrs_X1.vgtest \ > + insn_test_shrs_Y0.stdout.exp insn_test_shrs_Y0.stderr.exp \ > + insn_test_shrs_Y0.vgtest \ > + insn_test_shrs_Y1.stdout.exp insn_test_shrs_Y1.stderr.exp \ > + insn_test_shrs_Y1.vgtest \ > + insn_test_shrsi_X0.stdout.exp insn_test_shrsi_X0.stderr.exp \ > + insn_test_shrsi_X0.vgtest \ > + insn_test_shrsi_X1.stdout.exp insn_test_shrsi_X1.stderr.exp \ > + insn_test_shrsi_X1.vgtest \ > + insn_test_shrsi_Y0.stdout.exp insn_test_shrsi_Y0.stderr.exp \ > + insn_test_shrsi_Y0.vgtest \ > + insn_test_shrsi_Y1.stdout.exp insn_test_shrsi_Y1.stderr.exp \ > + insn_test_shrsi_Y1.vgtest \ > + insn_test_shru_X0.stdout.exp insn_test_shru_X0.stderr.exp \ > + insn_test_shru_X0.vgtest \ > + insn_test_shru_X1.stdout.exp insn_test_shru_X1.stderr.exp \ > + insn_test_shru_X1.vgtest \ > + insn_test_shru_Y0.stdout.exp insn_test_shru_Y0.stderr.exp \ > + insn_test_shru_Y0.vgtest \ > + insn_test_shru_Y1.stdout.exp insn_test_shru_Y1.stderr.exp \ > + insn_test_shru_Y1.vgtest \ > + insn_test_shrui_X0.stdout.exp insn_test_shrui_X0.stderr.exp \ > + insn_test_shrui_X0.vgtest \ > + insn_test_shrui_X1.stdout.exp insn_test_shrui_X1.stderr.exp \ > + insn_test_shrui_X1.vgtest \ > + insn_test_shrui_Y0.stdout.exp insn_test_shrui_Y0.stderr.exp \ > + insn_test_shrui_Y0.vgtest \ > + insn_test_shrui_Y1.stdout.exp insn_test_shrui_Y1.stderr.exp \ > + insn_test_shrui_Y1.vgtest \ > + insn_test_shrux_X0.stdout.exp insn_test_shrux_X0.stderr.exp \ > + insn_test_shrux_X0.vgtest \ > + insn_test_shrux_X1.stdout.exp insn_test_shrux_X1.stderr.exp \ > + insn_test_shrux_X1.vgtest \ > + insn_test_shufflebytes_X0.stdout.exp \ > + insn_test_shufflebytes_X0.stderr.exp \ > + insn_test_shufflebytes_X0.vgtest \ > + insn_test_st_X1.stdout.exp insn_test_st_X1.stderr.exp \ > + insn_test_st_X1.vgtest \ > + insn_test_st_Y2.stdout.exp insn_test_st_Y2.stderr.exp \ > + insn_test_st_Y2.vgtest \ > + insn_test_st1_X1.stdout.exp insn_test_st1_X1.stderr.exp \ > + insn_test_st1_X1.vgtest \ > + insn_test_st1_Y2.stdout.exp insn_test_st1_Y2.stderr.exp \ > + insn_test_st1_Y2.vgtest \ > + insn_test_st1_add_X1.stdout.exp insn_test_st1_add_X1.stderr.exp \ > + insn_test_st1_add_X1.vgtest \ > + insn_test_st2_X1.stdout.exp insn_test_st2_X1.stderr.exp \ > + insn_test_st2_X1.vgtest \ > + insn_test_st2_Y2.stdout.exp insn_test_st2_Y2.stderr.exp \ > + insn_test_st2_Y2.vgtest \ > + insn_test_st2_add_X1.stdout.exp insn_test_st2_add_X1.stderr.exp \ > + insn_test_st2_add_X1.vgtest \ > + insn_test_st4_X1.stdout.exp insn_test_st4_X1.stderr.exp \ > + insn_test_st4_X1.vgtest \ > + insn_test_st4_Y2.stdout.exp insn_test_st4_Y2.stderr.exp \ > + insn_test_st4_Y2.vgtest \ > + insn_test_st4_add_X1.stdout.exp insn_test_st4_add_X1.stderr.exp \ > + insn_test_st4_add_X1.vgtest \ > + insn_test_st_add_X1.stdout.exp insn_test_st_add_X1.stderr.exp \ > + insn_test_st_add_X1.vgtest \ > + insn_test_stnt_X1.stdout.exp insn_test_stnt_X1.stderr.exp \ > + insn_test_stnt_X1.vgtest \ > + insn_test_stnt1_X1.stdout.exp insn_test_stnt1_X1.stderr.exp \ > + insn_test_stnt1_X1.vgtest \ > + insn_test_stnt2_X1.stdout.exp insn_test_stnt2_X1.stderr.exp \ > + insn_test_stnt2_X1.vgtest \ > + insn_test_stnt2_add_X1.stdout.exp insn_test_stnt2_add_X1.stderr.exp \ > + insn_test_stnt2_add_X1.vgtest \ > + insn_test_stnt4_X1.stdout.exp insn_test_stnt4_X1.stderr.exp \ > + insn_test_stnt4_X1.vgtest \ > + insn_test_stnt4_add_X1.stdout.exp insn_test_stnt4_add_X1.stderr.exp \ > + insn_test_stnt4_add_X1.vgtest \ > + insn_test_stnt_add_X1.stdout.exp insn_test_stnt_add_X1.stderr.exp \ > + insn_test_stnt_add_X1.vgtest \ > + insn_test_sub_X0.stdout.exp insn_test_sub_X0.stderr.exp \ > + insn_test_sub_X0.vgtest \ > + insn_test_sub_X1.stdout.exp insn_test_sub_X1.stderr.exp \ > + insn_test_sub_X1.vgtest \ > + insn_test_sub_Y0.stdout.exp insn_test_sub_Y0.stderr.exp \ > + insn_test_sub_Y0.vgtest \ > + insn_test_sub_Y1.stdout.exp insn_test_sub_Y1.stderr.exp \ > + insn_test_sub_Y1.vgtest \ > + insn_test_subx_X0.stdout.exp insn_test_subx_X0.stderr.exp \ > + insn_test_subx_X0.vgtest \ > + insn_test_subx_X1.stdout.exp insn_test_subx_X1.stderr.exp \ > + insn_test_subx_X1.vgtest \ > + insn_test_subx_Y0.stdout.exp insn_test_subx_Y0.stderr.exp \ > + insn_test_subx_Y0.vgtest \ > + insn_test_subx_Y1.stdout.exp insn_test_subx_Y1.stderr.exp \ > + insn_test_subx_Y1.vgtest \ > + insn_test_tblidxb0_X0.stdout.exp insn_test_tblidxb0_X0.stderr.exp \ > + insn_test_tblidxb0_X0.vgtest \ > + insn_test_tblidxb0_Y0.stdout.exp insn_test_tblidxb0_Y0.stderr.exp \ > + insn_test_tblidxb0_Y0.vgtest \ > + insn_test_tblidxb1_X0.stdout.exp insn_test_tblidxb1_X0.stderr.exp \ > + insn_test_tblidxb1_X0.vgtest \ > + insn_test_tblidxb1_Y0.stdout.exp insn_test_tblidxb1_Y0.stderr.exp \ > + insn_test_tblidxb1_Y0.vgtest \ > + insn_test_tblidxb2_X0.stdout.exp insn_test_tblidxb2_X0.stderr.exp \ > + insn_test_tblidxb2_X0.vgtest \ > + insn_test_tblidxb2_Y0.stdout.exp insn_test_tblidxb2_Y0.stderr.exp \ > + insn_test_tblidxb2_Y0.vgtest \ > + insn_test_tblidxb3_X0.stdout.exp insn_test_tblidxb3_X0.stderr.exp \ > + insn_test_tblidxb3_X0.vgtest \ > + insn_test_tblidxb3_Y0.stdout.exp insn_test_tblidxb3_Y0.stderr.exp \ > + insn_test_tblidxb3_Y0.vgtest \ > + insn_test_v1add_X0.stdout.exp insn_test_v1add_X0.stderr.exp \ > + insn_test_v1add_X0.vgtest \ > + insn_test_v1add_X1.stdout.exp insn_test_v1add_X1.stderr.exp \ > + insn_test_v1add_X1.vgtest \ > + insn_test_v1adduc_X0.stdout.exp insn_test_v1adduc_X0.stderr.exp \ > + insn_test_v1adduc_X0.vgtest \ > + insn_test_v1adduc_X1.stdout.exp insn_test_v1adduc_X1.stderr.exp \ > + insn_test_v1adduc_X1.vgtest \ > + insn_test_v1adiffu_X0.stdout.exp insn_test_v1adiffu_X0.stderr.exp \ > + insn_test_v1adiffu_X0.vgtest \ > + insn_test_v1avgu_X0.stdout.exp insn_test_v1avgu_X0.stderr.exp \ > + insn_test_v1avgu_X0.vgtest \ > + insn_test_v1cmpeq_X0.stdout.exp insn_test_v1cmpeq_X0.stderr.exp \ > + insn_test_v1cmpeq_X0.vgtest \ > + insn_test_v1cmpeq_X1.stdout.exp insn_test_v1cmpeq_X1.stderr.exp \ > + insn_test_v1cmpeq_X1.vgtest \ > + insn_test_v1cmpeqi_X0.stdout.exp insn_test_v1cmpeqi_X0.stderr.exp \ > + insn_test_v1cmpeqi_X0.vgtest \ > + insn_test_v1cmpeqi_X1.stdout.exp insn_test_v1cmpeqi_X1.stderr.exp \ > + insn_test_v1cmpeqi_X1.vgtest \ > + insn_test_v1cmples_X0.stdout.exp insn_test_v1cmples_X0.stderr.exp \ > + insn_test_v1cmples_X0.vgtest \ > + insn_test_v1cmples_X1.stdout.exp insn_test_v1cmples_X1.stderr.exp \ > + insn_test_v1cmples_X1.vgtest \ > + insn_test_v1cmpleu_X0.stdout.exp insn_test_v1cmpleu_X0.stderr.exp \ > + insn_test_v1cmpleu_X0.vgtest \ > + insn_test_v1cmpleu_X1.stdout.exp insn_test_v1cmpleu_X1.stderr.exp \ > + insn_test_v1cmpleu_X1.vgtest \ > + insn_test_v1cmplts_X0.stdout.exp insn_test_v1cmplts_X0.stderr.exp \ > + insn_test_v1cmplts_X0.vgtest \ > + insn_test_v1cmplts_X1.stdout.exp insn_test_v1cmplts_X1.stderr.exp \ > + insn_test_v1cmplts_X1.vgtest \ > + insn_test_v1cmpltu_X0.stdout.exp insn_test_v1cmpltu_X0.stderr.exp \ > + insn_test_v1cmpltu_X0.vgtest \ > + insn_test_v1cmpltu_X1.stdout.exp insn_test_v1cmpltu_X1.stderr.exp \ > + insn_test_v1cmpltu_X1.vgtest \ > + insn_test_v1cmpne_X0.stdout.exp insn_test_v1cmpne_X0.stderr.exp \ > + insn_test_v1cmpne_X0.vgtest \ > + insn_test_v1cmpne_X1.stdout.exp insn_test_v1cmpne_X1.stderr.exp \ > + insn_test_v1cmpne_X1.vgtest \ > + insn_test_v1ddotpu_X0.stdout.exp insn_test_v1ddotpu_X0.stderr.exp \ > + insn_test_v1ddotpu_X0.vgtest \ > + insn_test_v1ddotpua_X0.stdout.exp insn_test_v1ddotpua_X0.stderr.exp \ > + insn_test_v1ddotpua_X0.vgtest \ > + insn_test_v1ddotpus_X0.stdout.exp insn_test_v1ddotpus_X0.stderr.exp \ > + insn_test_v1ddotpus_X0.vgtest \ > + insn_test_v1ddotpusa_X0.stdout.exp insn_test_v1ddotpusa_X0.stderr.exp \ > + insn_test_v1ddotpusa_X0.vgtest \ > + insn_test_v1dotp_X0.stdout.exp insn_test_v1dotp_X0.stderr.exp \ > + insn_test_v1dotp_X0.vgtest \ > + insn_test_v1dotpa_X0.stdout.exp insn_test_v1dotpa_X0.stderr.exp \ > + insn_test_v1dotpa_X0.vgtest \ > + insn_test_v1dotpu_X0.stdout.exp insn_test_v1dotpu_X0.stderr.exp \ > + insn_test_v1dotpu_X0.vgtest \ > + insn_test_v1dotpua_X0.stdout.exp insn_test_v1dotpua_X0.stderr.exp \ > + insn_test_v1dotpua_X0.vgtest \ > + insn_test_v1dotpus_X0.stdout.exp insn_test_v1dotpus_X0.stderr.exp \ > + insn_test_v1dotpus_X0.vgtest \ > + insn_test_v1dotpusa_X0.stdout.exp insn_test_v1dotpusa_X0.stderr.exp \ > + insn_test_v1dotpusa_X0.vgtest \ > + insn_test_v1int_h_X0.stdout.exp insn_test_v1int_h_X0.stderr.exp \ > + insn_test_v1int_h_X0.vgtest \ > + insn_test_v1int_h_X1.stdout.exp insn_test_v1int_h_X1.stderr.exp \ > + insn_test_v1int_h_X1.vgtest \ > + insn_test_v1int_l_X0.stdout.exp insn_test_v1int_l_X0.stderr.exp \ > + insn_test_v1int_l_X0.vgtest \ > + insn_test_v1int_l_X1.stdout.exp insn_test_v1int_l_X1.stderr.exp \ > + insn_test_v1int_l_X1.vgtest \ > + insn_test_v1maxu_X0.stdout.exp insn_test_v1maxu_X0.stderr.exp \ > + insn_test_v1maxu_X0.vgtest \ > + insn_test_v1maxu_X1.stdout.exp insn_test_v1maxu_X1.stderr.exp \ > + insn_test_v1maxu_X1.vgtest \ > + insn_test_v1minu_X0.stdout.exp insn_test_v1minu_X0.stderr.exp \ > + insn_test_v1minu_X0.vgtest \ > + insn_test_v1minu_X1.stdout.exp insn_test_v1minu_X1.stderr.exp \ > + insn_test_v1minu_X1.vgtest \ > + insn_test_v1mnz_X0.stdout.exp insn_test_v1mnz_X0.stderr.exp \ > + insn_test_v1mnz_X0.vgtest \ > + insn_test_v1mnz_X1.stdout.exp insn_test_v1mnz_X1.stderr.exp \ > + insn_test_v1mnz_X1.vgtest \ > + insn_test_v1multu_X0.stdout.exp insn_test_v1multu_X0.stderr.exp \ > + insn_test_v1multu_X0.vgtest \ > + insn_test_v1mulu_X0.stdout.exp insn_test_v1mulu_X0.stderr.exp \ > + insn_test_v1mulu_X0.vgtest \ > + insn_test_v1mulus_X0.stdout.exp insn_test_v1mulus_X0.stderr.exp \ > + insn_test_v1mulus_X0.vgtest \ > + insn_test_v1mz_X0.stdout.exp insn_test_v1mz_X0.stderr.exp \ > + insn_test_v1mz_X0.vgtest \ > + insn_test_v1mz_X1.stdout.exp insn_test_v1mz_X1.stderr.exp \ > + insn_test_v1mz_X1.vgtest \ > + insn_test_v1sadau_X0.stdout.exp insn_test_v1sadau_X0.stderr.exp \ > + insn_test_v1sadau_X0.vgtest \ > + insn_test_v1sadu_X0.stdout.exp insn_test_v1sadu_X0.stderr.exp \ > + insn_test_v1sadu_X0.vgtest \ > + insn_test_v1shl_X0.stdout.exp insn_test_v1shl_X0.stderr.exp \ > + insn_test_v1shl_X0.vgtest \ > + insn_test_v1shl_X1.stdout.exp insn_test_v1shl_X1.stderr.exp \ > + insn_test_v1shl_X1.vgtest \ > + insn_test_v1shli_X0.stdout.exp insn_test_v1shli_X0.stderr.exp \ > + insn_test_v1shli_X0.vgtest \ > + insn_test_v1shli_X1.stdout.exp insn_test_v1shli_X1.stderr.exp \ > + insn_test_v1shli_X1.vgtest \ > + insn_test_v1shrs_X0.stdout.exp insn_test_v1shrs_X0.stderr.exp \ > + insn_test_v1shrs_X0.vgtest \ > + insn_test_v1shrs_X1.stdout.exp insn_test_v1shrs_X1.stderr.exp \ > + insn_test_v1shrs_X1.vgtest \ > + insn_test_v1shrsi_X0.stdout.exp insn_test_v1shrsi_X0.stderr.exp \ > + insn_test_v1shrsi_X0.vgtest \ > + insn_test_v1shrsi_X1.stdout.exp insn_test_v1shrsi_X1.stderr.exp \ > + insn_test_v1shrsi_X1.vgtest \ > + insn_test_v1shru_X0.stdout.exp insn_test_v1shru_X0.stderr.exp \ > + insn_test_v1shru_X0.vgtest \ > + insn_test_v1shru_X1.stdout.exp insn_test_v1shru_X1.stderr.exp \ > + insn_test_v1shru_X1.vgtest \ > + insn_test_v1shrui_X0.stdout.exp insn_test_v1shrui_X0.stderr.exp \ > + insn_test_v1shrui_X0.vgtest \ > + insn_test_v1shrui_X1.stdout.exp insn_test_v1shrui_X1.stderr.exp \ > + insn_test_v1shrui_X1.vgtest \ > + insn_test_v1sub_X0.stdout.exp insn_test_v1sub_X0.stderr.exp \ > + insn_test_v1sub_X0.vgtest \ > + insn_test_v1sub_X1.stdout.exp insn_test_v1sub_X1.stderr.exp \ > + insn_test_v1sub_X1.vgtest \ > + insn_test_v1subuc_X0.stdout.exp insn_test_v1subuc_X0.stderr.exp \ > + insn_test_v1subuc_X0.vgtest \ > + insn_test_v1subuc_X1.stdout.exp insn_test_v1subuc_X1.stderr.exp \ > + insn_test_v1subuc_X1.vgtest \ > + insn_test_v2add_X0.stdout.exp insn_test_v2add_X0.stderr.exp \ > + insn_test_v2add_X0.vgtest \ > + insn_test_v2add_X1.stdout.exp insn_test_v2add_X1.stderr.exp \ > + insn_test_v2add_X1.vgtest \ > + insn_test_v2addsc_X0.stdout.exp insn_test_v2addsc_X0.stderr.exp \ > + insn_test_v2addsc_X0.vgtest \ > + insn_test_v2addsc_X1.stdout.exp insn_test_v2addsc_X1.stderr.exp \ > + insn_test_v2addsc_X1.vgtest \ > + insn_test_v2adiffs_X0.stdout.exp insn_test_v2adiffs_X0.stderr.exp \ > + insn_test_v2adiffs_X0.vgtest \ > + insn_test_v2avgs_X0.stdout.exp insn_test_v2avgs_X0.stderr.exp \ > + insn_test_v2avgs_X0.vgtest \ > + insn_test_v2cmpeq_X0.stdout.exp insn_test_v2cmpeq_X0.stderr.exp \ > + insn_test_v2cmpeq_X0.vgtest \ > + insn_test_v2cmpeq_X1.stdout.exp insn_test_v2cmpeq_X1.stderr.exp \ > + insn_test_v2cmpeq_X1.vgtest \ > + insn_test_v2cmpeqi_X0.stdout.exp insn_test_v2cmpeqi_X0.stderr.exp \ > + insn_test_v2cmpeqi_X0.vgtest \ > + insn_test_v2cmpeqi_X1.stdout.exp insn_test_v2cmpeqi_X1.stderr.exp \ > + insn_test_v2cmpeqi_X1.vgtest \ > + insn_test_v2cmples_X0.stdout.exp insn_test_v2cmples_X0.stderr.exp \ > + insn_test_v2cmples_X0.vgtest \ > + insn_test_v2cmples_X1.stdout.exp insn_test_v2cmples_X1.stderr.exp \ > + insn_test_v2cmples_X1.vgtest \ > + insn_test_v2cmpleu_X0.stdout.exp insn_test_v2cmpleu_X0.stderr.exp \ > + insn_test_v2cmpleu_X0.vgtest \ > + insn_test_v2cmpleu_X1.stdout.exp insn_test_v2cmpleu_X1.stderr.exp \ > + insn_test_v2cmpleu_X1.vgtest \ > + insn_test_v2cmplts_X0.stdout.exp insn_test_v2cmplts_X0.stderr.exp \ > + insn_test_v2cmplts_X0.vgtest \ > + insn_test_v2cmplts_X1.stdout.exp insn_test_v2cmplts_X1.stderr.exp \ > + insn_test_v2cmplts_X1.vgtest \ > + insn_test_v2cmpltsi_X0.stdout.exp insn_test_v2cmpltsi_X0.stderr.exp \ > + insn_test_v2cmpltsi_X0.vgtest \ > + insn_test_v2cmpltsi_X1.stdout.exp insn_test_v2cmpltsi_X1.stderr.exp \ > + insn_test_v2cmpltsi_X1.vgtest \ > + insn_test_v2cmpltu_X0.stdout.exp insn_test_v2cmpltu_X0.stderr.exp \ > + insn_test_v2cmpltu_X0.vgtest \ > + insn_test_v2cmpltu_X1.stdout.exp insn_test_v2cmpltu_X1.stderr.exp \ > + insn_test_v2cmpltu_X1.vgtest \ > + insn_test_v2cmpltui_X0.stdout.exp insn_test_v2cmpltui_X0.stderr.exp \ > + insn_test_v2cmpltui_X0.vgtest \ > + insn_test_v2cmpltui_X1.stdout.exp insn_test_v2cmpltui_X1.stderr.exp \ > + insn_test_v2cmpltui_X1.vgtest \ > + insn_test_v2cmpne_X0.stdout.exp insn_test_v2cmpne_X0.stderr.exp \ > + insn_test_v2cmpne_X0.vgtest \ > + insn_test_v2cmpne_X1.stdout.exp insn_test_v2cmpne_X1.stderr.exp \ > + insn_test_v2cmpne_X1.vgtest \ > + insn_test_v2dotp_X0.stdout.exp insn_test_v2dotp_X0.stderr.exp \ > + insn_test_v2dotp_X0.vgtest \ > + insn_test_v2dotpa_X0.stdout.exp insn_test_v2dotpa_X0.stderr.exp \ > + insn_test_v2dotpa_X0.vgtest \ > + insn_test_v2int_h_X0.stdout.exp insn_test_v2int_h_X0.stderr.exp \ > + insn_test_v2int_h_X0.vgtest \ > + insn_test_v2int_h_X1.stdout.exp insn_test_v2int_h_X1.stderr.exp \ > + insn_test_v2int_h_X1.vgtest \ > + insn_test_v2int_l_X0.stdout.exp insn_test_v2int_l_X0.stderr.exp \ > + insn_test_v2int_l_X0.vgtest \ > + insn_test_v2int_l_X1.stdout.exp insn_test_v2int_l_X1.stderr.exp \ > + insn_test_v2int_l_X1.vgtest \ > + insn_test_v2maxs_X0.stdout.exp insn_test_v2maxs_X0.stderr.exp \ > + insn_test_v2maxs_X0.vgtest \ > + insn_test_v2maxs_X1.stdout.exp insn_test_v2maxs_X1.stderr.exp \ > + insn_test_v2maxs_X1.vgtest \ > + insn_test_v2mins_X0.stdout.exp insn_test_v2mins_X0.stderr.exp \ > + insn_test_v2mins_X0.vgtest \ > + insn_test_v2mins_X1.stdout.exp insn_test_v2mins_X1.stderr.exp \ > + insn_test_v2mins_X1.vgtest \ > + insn_test_v2mnz_X0.stdout.exp insn_test_v2mnz_X0.stderr.exp \ > + insn_test_v2mnz_X0.vgtest \ > + insn_test_v2mnz_X1.stdout.exp insn_test_v2mnz_X1.stderr.exp \ > + insn_test_v2mnz_X1.vgtest \ > + insn_test_v2mulfsc_X0.stdout.exp insn_test_v2mulfsc_X0.stderr.exp \ > + insn_test_v2mulfsc_X0.vgtest \ > + insn_test_v2muls_X0.stdout.exp insn_test_v2muls_X0.stderr.exp \ > + insn_test_v2muls_X0.vgtest \ > + insn_test_v2mults_X0.stdout.exp insn_test_v2mults_X0.stderr.exp \ > + insn_test_v2mults_X0.vgtest \ > + insn_test_v2mz_X0.stdout.exp insn_test_v2mz_X0.stderr.exp \ > + insn_test_v2mz_X0.vgtest \ > + insn_test_v2mz_X1.stdout.exp insn_test_v2mz_X1.stderr.exp \ > + insn_test_v2mz_X1.vgtest \ > + insn_test_v2packh_X0.stdout.exp insn_test_v2packh_X0.stderr.exp \ > + insn_test_v2packh_X0.vgtest \ > + insn_test_v2packh_X1.stdout.exp insn_test_v2packh_X1.stderr.exp \ > + insn_test_v2packh_X1.vgtest \ > + insn_test_v2packl_X0.stdout.exp insn_test_v2packl_X0.stderr.exp \ > + insn_test_v2packl_X0.vgtest \ > + insn_test_v2packl_X1.stdout.exp insn_test_v2packl_X1.stderr.exp \ > + insn_test_v2packl_X1.vgtest \ > + insn_test_v2packuc_X0.stdout.exp insn_test_v2packuc_X0.stderr.exp \ > + insn_test_v2packuc_X0.vgtest \ > + insn_test_v2packuc_X1.stdout.exp insn_test_v2packuc_X1.stderr.exp \ > + insn_test_v2packuc_X1.vgtest \ > + insn_test_v2sadas_X0.stdout.exp insn_test_v2sadas_X0.stderr.exp \ > + insn_test_v2sadas_X0.vgtest \ > + insn_test_v2sadau_X0.stdout.exp insn_test_v2sadau_X0.stderr.exp \ > + insn_test_v2sadau_X0.vgtest \ > + insn_test_v2sads_X0.stdout.exp insn_test_v2sads_X0.stderr.exp \ > + insn_test_v2sads_X0.vgtest \ > + insn_test_v2sadu_X0.stdout.exp insn_test_v2sadu_X0.stderr.exp \ > + insn_test_v2sadu_X0.vgtest \ > + insn_test_v2shl_X0.stdout.exp insn_test_v2shl_X0.stderr.exp \ > + insn_test_v2shl_X0.vgtest \ > + insn_test_v2shl_X1.stdout.exp insn_test_v2shl_X1.stderr.exp \ > + insn_test_v2shl_X1.vgtest \ > + insn_test_v2shli_X0.stdout.exp insn_test_v2shli_X0.stderr.exp \ > + insn_test_v2shli_X0.vgtest \ > + insn_test_v2shli_X1.stdout.exp insn_test_v2shli_X1.stderr.exp \ > + insn_test_v2shli_X1.vgtest \ > + insn_test_v2shlsc_X0.stdout.exp insn_test_v2shlsc_X0.stderr.exp \ > + insn_test_v2shlsc_X0.vgtest \ > + insn_test_v2shlsc_X1.stdout.exp insn_test_v2shlsc_X1.stderr.exp \ > + insn_test_v2shlsc_X1.vgtest \ > + insn_test_v2shrs_X0.stdout.exp insn_test_v2shrs_X0.stderr.exp \ > + insn_test_v2shrs_X0.vgtest \ > + insn_test_v2shrs_X1.stdout.exp insn_test_v2shrs_X1.stderr.exp \ > + insn_test_v2shrs_X1.vgtest \ > + insn_test_v2shrsi_X0.stdout.exp insn_test_v2shrsi_X0.stderr.exp \ > + insn_test_v2shrsi_X0.vgtest \ > + insn_test_v2shrsi_X1.stdout.exp insn_test_v2shrsi_X1.stderr.exp \ > + insn_test_v2shrsi_X1.vgtest \ > + insn_test_v2shru_X0.stdout.exp insn_test_v2shru_X0.stderr.exp \ > + insn_test_v2shru_X0.vgtest \ > + insn_test_v2shru_X1.stdout.exp insn_test_v2shru_X1.stderr.exp \ > + insn_test_v2shru_X1.vgtest \ > + insn_test_v2shrui_X0.stdout.exp insn_test_v2shrui_X0.stderr.exp \ > + insn_test_v2shrui_X0.vgtest \ > + insn_test_v2shrui_X1.stdout.exp insn_test_v2shrui_X1.stderr.exp \ > + insn_test_v2shrui_X1.vgtest \ > + insn_test_v2sub_X0.stdout.exp insn_test_v2sub_X0.stderr.exp \ > + insn_test_v2sub_X0.vgtest \ > + insn_test_v2sub_X1.stdout.exp insn_test_v2sub_X1.stderr.exp \ > + insn_test_v2sub_X1.vgtest \ > + insn_test_v2subsc_X0.stdout.exp insn_test_v2subsc_X0.stderr.exp \ > + insn_test_v2subsc_X0.vgtest \ > + insn_test_v2subsc_X1.stdout.exp insn_test_v2subsc_X1.stderr.exp \ > + insn_test_v2subsc_X1.vgtest \ > + insn_test_v4add_X0.stdout.exp insn_test_v4add_X0.stderr.exp \ > + insn_test_v4add_X0.vgtest \ > + insn_test_v4add_X1.stdout.exp insn_test_v4add_X1.stderr.exp \ > + insn_test_v4add_X1.vgtest \ > + insn_test_v4addsc_X0.stdout.exp insn_test_v4addsc_X0.stderr.exp \ > + insn_test_v4addsc_X0.vgtest \ > + insn_test_v4addsc_X1.stdout.exp insn_test_v4addsc_X1.stderr.exp \ > + insn_test_v4addsc_X1.vgtest \ > + insn_test_v4int_h_X0.stdout.exp insn_test_v4int_h_X0.stderr.exp \ > + insn_test_v4int_h_X0.vgtest \ > + insn_test_v4int_h_X1.stdout.exp insn_test_v4int_h_X1.stderr.exp \ > + insn_test_v4int_h_X1.vgtest \ > + insn_test_v4int_l_X0.stdout.exp insn_test_v4int_l_X0.stderr.exp \ > + insn_test_v4int_l_X0.vgtest \ > + insn_test_v4int_l_X1.stdout.exp insn_test_v4int_l_X1.stderr.exp \ > + insn_test_v4int_l_X1.vgtest \ > + insn_test_v4packsc_X0.stdout.exp insn_test_v4packsc_X0.stderr.exp \ > + insn_test_v4packsc_X0.vgtest \ > + insn_test_v4packsc_X1.stdout.exp insn_test_v4packsc_X1.stderr.exp \ > + insn_test_v4packsc_X1.vgtest \ > + insn_test_v4shl_X0.stdout.exp insn_test_v4shl_X0.stderr.exp \ > + insn_test_v4shl_X0.vgtest \ > + insn_test_v4shl_X1.stdout.exp insn_test_v4shl_X1.stderr.exp \ > + insn_test_v4shl_X1.vgtest \ > + insn_test_v4shlsc_X0.stdout.exp insn_test_v4shlsc_X0.stderr.exp \ > + insn_test_v4shlsc_X0.vgtest \ > + insn_test_v4shlsc_X1.stdout.exp insn_test_v4shlsc_X1.stderr.exp \ > + insn_test_v4shlsc_X1.vgtest \ > + insn_test_v4shrs_X0.stdout.exp insn_test_v4shrs_X0.stderr.exp \ > + insn_test_v4shrs_X0.vgtest \ > + insn_test_v4shrs_X1.stdout.exp insn_test_v4shrs_X1.stderr.exp \ > + insn_test_v4shrs_X1.vgtest \ > + insn_test_v4shru_X0.stdout.exp insn_test_v4shru_X0.stderr.exp \ > + insn_test_v4shru_X0.vgtest \ > + insn_test_v4shru_X1.stdout.exp insn_test_v4shru_X1.stderr.exp \ > + insn_test_v4shru_X1.vgtest \ > + insn_test_v4sub_X0.stdout.exp insn_test_v4sub_X0.stderr.exp \ > + insn_test_v4sub_X0.vgtest \ > + insn_test_v4sub_X1.stdout.exp insn_test_v4sub_X1.stderr.exp \ > + insn_test_v4sub_X1.vgtest \ > + insn_test_v4subsc_X0.stdout.exp insn_test_v4subsc_X0.stderr.exp \ > + insn_test_v4subsc_X0.vgtest \ > + insn_test_v4subsc_X1.stdout.exp insn_test_v4subsc_X1.stderr.exp \ > + insn_test_v4subsc_X1.vgtest \ > + insn_test_wh64_X1.stdout.exp insn_test_wh64_X1.stderr.exp \ > + insn_test_wh64_X1.vgtest \ > + insn_test_xor_X0.stdout.exp insn_test_xor_X0.stderr.exp \ > + insn_test_xor_X0.vgtest \ > + insn_test_xor_X1.stdout.exp insn_test_xor_X1.stderr.exp \ > + insn_test_xor_X1.vgtest \ > + insn_test_xor_Y0.stdout.exp insn_test_xor_Y0.stderr.exp \ > + insn_test_xor_Y0.vgtest \ > + insn_test_xor_Y1.stdout.exp insn_test_xor_Y1.stderr.exp \ > + insn_test_xor_Y1.vgtest \ > + insn_test_xori_X0.stdout.exp insn_test_xori_X0.stderr.exp \ > + insn_test_xori_X0.vgtest \ > + insn_test_xori_X1.stdout.exp insn_test_xori_X1.stderr.exp \ > + insn_test_xori_X1.vgtest > + > +bin_PROGRAMS = gen_insn_test > + > +insn_tests = \ > + insn_test_move_X0 \ > + insn_test_move_X1 \ > + insn_test_move_Y0 \ > + insn_test_move_Y1 \ > + insn_test_movei_X0 \ > + insn_test_movei_X1 \ > + insn_test_movei_Y0 \ > + insn_test_movei_Y1 \ > + insn_test_moveli_X0 \ > + insn_test_moveli_X1 \ > + insn_test_prefetch_X1 \ > + insn_test_prefetch_Y2 \ > + insn_test_prefetch_l1_X1 \ > + insn_test_prefetch_l1_Y2 \ > + insn_test_prefetch_l2_X1 \ > + insn_test_prefetch_l2_Y2 \ > + insn_test_prefetch_l3_X1 \ > + insn_test_prefetch_l3_Y2 \ > + insn_test_add_X0 \ > + insn_test_add_X1 \ > + insn_test_add_Y0 \ > + insn_test_add_Y1 \ > + insn_test_addi_X0 \ > + insn_test_addi_X1 \ > + insn_test_addi_Y0 \ > + insn_test_addi_Y1 \ > + insn_test_addli_X0 \ > + insn_test_addli_X1 \ > + insn_test_addx_X0 \ > + insn_test_addx_X1 \ > + insn_test_addx_Y0 \ > + insn_test_addx_Y1 \ > + insn_test_addxi_X0 \ > + insn_test_addxi_X1 \ > + insn_test_addxi_Y0 \ > + insn_test_addxi_Y1 \ > + insn_test_addxli_X0 \ > + insn_test_addxli_X1 \ > + insn_test_addxsc_X0 \ > + insn_test_addxsc_X1 \ > + insn_test_and_X0 \ > + insn_test_and_X1 \ > + insn_test_and_Y0 \ > + insn_test_and_Y1 \ > + insn_test_andi_X0 \ > + insn_test_andi_X1 \ > + insn_test_andi_Y0 \ > + insn_test_andi_Y1 \ > + insn_test_beqz_X1 \ > + insn_test_beqzt_X1 \ > + insn_test_bfexts_X0 \ > + insn_test_bfextu_X0 \ > + insn_test_bfins_X0 \ > + insn_test_bgez_X1 \ > + insn_test_bgezt_X1 \ > + insn_test_bgtz_X1 \ > + insn_test_bgtzt_X1 \ > + insn_test_blbc_X1 \ > + insn_test_blbct_X1 \ > + insn_test_blbs_X1 \ > + insn_test_blbst_X1 \ > + insn_test_blez_X1 \ > + insn_test_blezt_X1 \ > + insn_test_bltz_X1 \ > + insn_test_bltzt_X1 \ > + insn_test_bnez_X1 \ > + insn_test_bnezt_X1 \ > + insn_test_clz_X0 \ > + insn_test_clz_Y0 \ > + insn_test_cmoveqz_X0 \ > + insn_test_cmoveqz_Y0 \ > + insn_test_cmovnez_X0 \ > + insn_test_cmovnez_Y0 \ > + insn_test_cmpeq_X0 \ > + insn_test_cmpeq_X1 \ > + insn_test_cmpeq_Y0 \ > + insn_test_cmpeq_Y1 \ > + insn_test_cmpeqi_X0 \ > + insn_test_cmpeqi_X1 \ > + insn_test_cmpeqi_Y0 \ > + insn_test_cmpeqi_Y1 \ > + insn_test_cmples_X0 \ > + insn_test_cmples_X1 \ > + insn_test_cmples_Y0 \ > + insn_test_cmples_Y1 \ > + insn_test_cmpleu_X0 \ > + insn_test_cmpleu_X1 \ > + insn_test_cmpleu_Y0 \ > + insn_test_cmpleu_Y1 \ > + insn_test_cmplts_X0 \ > + insn_test_cmplts_X1 \ > + insn_test_cmplts_Y0 \ > + insn_test_cmplts_Y1 \ > + insn_test_cmpltsi_X0 \ > + insn_test_cmpltsi_X1 \ > + insn_test_cmpltsi_Y0 \ > + insn_test_cmpltsi_Y1 \ > + insn_test_cmpltu_X0 \ > + insn_test_cmpltu_X1 \ > + insn_test_cmpltu_Y0 \ > + insn_test_cmpltu_Y1 \ > + insn_test_cmpltui_X0 \ > + insn_test_cmpltui_X1 \ > + insn_test_cmpne_X0 \ > + insn_test_cmpne_X1 \ > + insn_test_cmpne_Y0 \ > + insn_test_cmpne_Y1 \ > + insn_test_cmul_X0 \ > + insn_test_cmula_X0 \ > + insn_test_cmulaf_X0 \ > + insn_test_cmulf_X0 \ > + insn_test_cmulfr_X0 \ > + insn_test_cmulh_X0 \ > + insn_test_cmulhr_X0 \ > + insn_test_crc32_32_X0 \ > + insn_test_crc32_8_X0 \ > + insn_test_ctz_X0 \ > + insn_test_ctz_Y0 \ > + insn_test_dblalign_X0 \ > + insn_test_dblalign2_X0 \ > + insn_test_dblalign2_X1 \ > + insn_test_dblalign4_X0 \ > + insn_test_dblalign4_X1 \ > + insn_test_dblalign6_X0 \ > + insn_test_dblalign6_X1 \ > + insn_test_dtlbpr_X1 \ > + insn_test_fdouble_add_flags_X0 \ > + insn_test_fdouble_addsub_X0 \ > + insn_test_fdouble_mul_flags_X0 \ > + insn_test_fdouble_pack1_X0 \ > + insn_test_fdouble_pack2_X0 \ > + insn_test_fdouble_sub_flags_X0 \ > + insn_test_fdouble_unpack_max_X0 \ > + insn_test_fdouble_unpack_min_X0 \ > + insn_test_flushwb_X1 \ > + insn_test_fnop_X0 \ > + insn_test_fnop_X1 \ > + insn_test_fnop_Y0 \ > + insn_test_fnop_Y1 \ > + insn_test_fsingle_add1_X0 \ > + insn_test_fsingle_addsub2_X0 \ > + insn_test_fsingle_mul1_X0 \ > + insn_test_fsingle_mul2_X0 \ > + insn_test_fsingle_pack1_X0 \ > + insn_test_fsingle_pack1_Y0 \ > + insn_test_fsingle_pack2_X0 \ > + insn_test_fsingle_sub1_X0 \ > + insn_test_icoh_X1 \ > + insn_test_j_X1 \ > + insn_test_jal_X1 \ > + insn_test_jalr_X1 \ > + insn_test_jalr_Y1 \ > + insn_test_jalrp_X1 \ > + insn_test_jalrp_Y1 \ > + insn_test_jr_X1 \ > + insn_test_jr_Y1 \ > + insn_test_jrp_X1 \ > + insn_test_jrp_Y1 \ > + insn_test_ld_X1 \ > + insn_test_ld_Y2 \ > + insn_test_ld1s_X1 \ > + insn_test_ld1s_Y2 \ > + insn_test_ld1s_add_X1 \ > + insn_test_ld1u_X1 \ > + insn_test_ld1u_Y2 \ > + insn_test_ld1u_add_X1 \ > + insn_test_ld2s_X1 \ > + insn_test_ld2s_Y2 \ > + insn_test_ld2u_X1 \ > + insn_test_ld2u_Y2 \ > + insn_test_ld4s_X1 \ > + insn_test_ld4s_add_X1 \ > + insn_test_ld4u_X1 \ > + insn_test_ld4u_Y2 \ > + insn_test_ld4u_add_X1 \ > + insn_test_ld_add_X1 \ > + insn_test_ldna_X1 \ > + insn_test_ldna_add_X1 \ > + insn_test_ldnt_X1 \ > + insn_test_ldnt1s_X1 \ > + insn_test_ldnt1s_add_X1 \ > + insn_test_ldnt1u_X1 \ > + insn_test_ldnt1u_add_X1 \ > + insn_test_ldnt2s_X1 \ > + insn_test_ldnt2s_add_X1 \ > + insn_test_ldnt2u_add_X1 \ > + insn_test_ldnt4s_X1 \ > + insn_test_ldnt4s_add_X1 \ > + insn_test_ldnt4u_X1 \ > + insn_test_ldnt4u_add_X1 \ > + insn_test_ldnt_add_X1 \ > + insn_test_lnk_X1 \ > + insn_test_lnk_Y1 \ > + insn_test_mf_X1 \ > + insn_test_mm_X0 \ > + insn_test_mnz_X0 \ > + insn_test_mnz_X1 \ > + insn_test_mnz_Y0 \ > + insn_test_mnz_Y1 \ > + insn_test_mul_hs_hs_X0 \ > + insn_test_mul_hs_hs_Y0 \ > + insn_test_mul_hs_hu_X0 \ > + insn_test_mul_hs_ls_X0 \ > + insn_test_mul_hs_lu_X0 \ > + insn_test_mul_hu_hu_X0 \ > + insn_test_mul_hu_hu_Y0 \ > + insn_test_mul_hu_lu_X0 \ > + insn_test_mul_ls_ls_X0 \ > + insn_test_mul_ls_ls_Y0 \ > + insn_test_mul_ls_lu_X0 \ > + insn_test_mul_lu_lu_X0 \ > + insn_test_mul_lu_lu_Y0 \ > + insn_test_mula_hs_hs_X0 \ > + insn_test_mula_hs_hs_Y0 \ > + insn_test_mula_hs_hu_X0 \ > + insn_test_mula_hs_ls_X0 \ > + insn_test_mula_hs_lu_X0 \ > + insn_test_mula_hu_hu_X0 \ > + insn_test_mula_hu_hu_Y0 \ > + insn_test_mula_hu_ls_X0 \ > + insn_test_mula_hu_lu_X0 \ > + insn_test_mula_ls_ls_X0 \ > + insn_test_mula_ls_ls_Y0 \ > + insn_test_mula_ls_lu_X0 ... [truncated message content] |
|
From: <sv...@va...> - 2015-08-03 06:17:20
|
Author: rhyskidd
Date: Mon Aug 3 07:17:13 2015
New Revision: 15476
Log:
Resolve suppressions that changed in OS X 10.11 (DP5). n-i-bz
Modified:
trunk/darwin15.supp
Modified: trunk/darwin15.supp
==============================================================================
--- trunk/darwin15.supp (original)
+++ trunk/darwin15.supp Mon Aug 3 07:17:13 2015
@@ -35,15 +35,14 @@
...
}
-#{
-# OSX1011:4-Leak
-# Memcheck:Leak
-# match-leak-kinds: reachable
-# fun:malloc_zone_?alloc
-# ...
-# fun:dyld_register_image_state_change_handler
-# ...
-#}
+{
+ OSX1011:4-Leak
+ Memcheck:Leak
+ fun:malloc_zone_?alloc
+ ...
+ fun:dyld_register_image_state_change_handler
+ ...
+}
{
OSX1011:5-Leak
@@ -62,7 +61,7 @@
fun:malloc_zone_?alloc
...
fun:map_images_nolock
- fun:map_hax_images
+ fun:map_2_images
...
}
@@ -73,7 +72,7 @@
fun:malloc_zone_?alloc
...
fun:map_images_nolock
- fun:map_hax_images
+ fun:map_2_images
...
}
@@ -500,13 +499,15 @@
obj:/usr/lib/libz.*dylib
}
-#{
-# OSX1011:32bit:_libxpc_initializer
-# Memcheck:Cond
-# fun:_libxpc_initializer
-# fun:libSystem_initializer
-# fun:*ImageLoaderMachO*doModInitFunctions*
-#}
+{
+ OSX1011:32bit:_libxpc_initializer
+ Memcheck:Cond
+ obj:/usr/lib/system/libsystem_c.dylib
+ obj:/usr/lib/system/libsystem_c.dylib
+ fun:_libxpc_initializer
+ obj:/usr/lib/libSystem.B.dylib
+ fun:*ImageLoaderMachO*doModInitFunctions*
+}
{
OSX1011:dyld-1
|
|
From: <sv...@va...> - 2015-08-03 02:14:26
|
Author: rhyskidd
Date: Mon Aug 3 03:14:17 2015
New Revision: 15475
Log:
OS X: Suppress newly introduced system library errors in OS X 10.11 (DP5). n-i-bz
Modified:
trunk/darwin15.supp
Modified: trunk/darwin15.supp
==============================================================================
--- trunk/darwin15.supp (original)
+++ trunk/darwin15.supp Mon Aug 3 03:14:17 2015
@@ -13,14 +13,14 @@
fun:_read_images
}
-#{
-# OSX1011:2-Leak
-# Memcheck:Leak
-# match-leak-kinds: definite
-# fun:?alloc
-# ...
-# fun:_ZN4dyld24initializeMainExecutableEv
-#}
+{
+ OSX1011:2-Leak
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc_zone_?alloc
+ ...
+ fun:_read_images
+}
{
OSX1011:3-Leak
@@ -202,6 +202,16 @@
...
}
+{
+ OSX1011:21-Leak
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc_zone_memalign
+ ...
+ fun:_ZN4dyld24initializeMainExecutableEv
+ ...
+}
+
############################################
## Non-leak errors
|