You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
1
(17) |
2
(15) |
3
(36) |
4
(24) |
5
(36) |
|
6
(18) |
7
(16) |
8
(18) |
9
(19) |
10
(18) |
11
(37) |
12
(18) |
|
13
(13) |
14
(21) |
15
(27) |
16
(10) |
17
(16) |
18
(25) |
19
(21) |
|
20
(11) |
21
(14) |
22
(6) |
23
(15) |
24
(27) |
25
(3) |
26
(9) |
|
27
(16) |
28
(24) |
29
(21) |
30
(43) |
31
(42) |
|
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 21:38:28
|
CVS commit by fitzhardinge:
Generate a SIGILL for the complex forms of the ENTER instruction, rather than
getting an assertion failure.
M +4 -0 vg_to_ucode.c 1.157
--- valgrind/coregrind/vg_to_ucode.c #1.156:1.157
@@ -5336,4 +5336,8 @@ static Addr disInstr ( UCodeBlock* cb, A
abyte = getUChar(eip); eip++;
+ if (sz != 4 || abyte != 0) {
+ VG_(message)(Vg_UserMsg, "Can't handle complex forms of ENTER");
+ goto decode_failure;
+ }
vg_assert(sz == 4);
vg_assert(abyte == 0);
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 21:37:31
|
CVS commit by fitzhardinge:
Add a --leak-check=summary command line option. Full display of leaked memory
is enabled with --leak-check=full (though =yes still works). I haven't changed
the default setting yet.
M +3 -3 addrcheck/ac_main.c 1.79
M +0 -1 addrcheck/tests/leak-0.stderr.exp 1.3
M +0 -1 addrcheck/tests/leak-regroot.stderr.exp 1.3
M +140 -91 memcheck/mac_leakcheck.c 1.22
M +24 -16 memcheck/mac_needs.c 1.36
M +13 -4 memcheck/mac_shared.h 1.37
M +3 -3 memcheck/mc_main.c 1.67
M +9 -0 memcheck/memcheck.h 1.22
M +16 -7 memcheck/docs/mc_main.html 1.15
M +0 -1 memcheck/tests/leak-0.stderr.exp 1.3
M +0 -1 memcheck/tests/leak-regroot.stderr.exp 1.3
M +0 -2 memcheck/tests/pointer-trace.stderr.exp 1.5
--- valgrind/addrcheck/ac_main.c #1.78:1.79
@@ -1153,7 +1153,7 @@ Bool ac_is_valid_address ( Addr a )
run the generic leak detector with suitable parameters for this
tool. */
-static void ac_detect_memory_leaks ( void )
+static void ac_detect_memory_leaks ( LeakCheckMode mode )
{
- MAC_(do_detect_memory_leaks) ( ac_is_valid_64k_chunk, ac_is_valid_address );
+ MAC_(do_detect_memory_leaks) ( mode, ac_is_valid_64k_chunk, ac_is_valid_address );
}
@@ -1219,5 +1219,5 @@ Bool SK_(handle_client_request) ( Thread
switch (arg[0]) {
case VG_USERREQ__DO_LEAK_CHECK:
- ac_detect_memory_leaks();
+ ac_detect_memory_leaks(arg[1] ? LC_Summary : LC_Full);
*ret = 0; /* return value is meaningless */
break;
--- valgrind/memcheck/mac_shared.h #1.36:1.37
@@ -267,5 +267,13 @@ extern Int MAC_(clo_freelist_vol);
/* Do leak check at exit? default: NO */
-extern Bool MAC_(clo_leak_check);
+typedef
+ enum {
+ LC_Off,
+ LC_Summary,
+ LC_Full,
+ }
+ LeakCheckMode;
+
+extern LeakCheckMode MAC_(clo_leak_check);
/* How closely should we compare ExeContexts in leak records? default: 2 */
@@ -350,5 +358,5 @@ extern MAC_Chunk* MAC_(first_matching_fr
extern void MAC_(common_pre_clo_init) ( void );
-extern void MAC_(common_fini) ( void (*leak_check)(void) );
+extern void MAC_(common_fini) ( void (*leak_check)(LeakCheckMode mode) );
extern Bool MAC_(handle_common_client_requests) ( ThreadId tid,
@@ -362,6 +370,7 @@ extern void MAC_(pp_LeakError)(void* vl,
extern void MAC_(do_detect_memory_leaks) (
- Bool is_valid_64k_chunk ( UInt ),
- Bool is_valid_address ( Addr )
+ LeakCheckMode mode,
+ Bool (*is_valid_64k_chunk) ( UInt ),
+ Bool (*is_valid_address) ( Addr )
);
--- valgrind/memcheck/mac_needs.c #1.35:1.36
@@ -50,5 +50,5 @@
Bool MAC_(clo_partial_loads_ok) = True;
Int MAC_(clo_freelist_vol) = 1000000;
-Bool MAC_(clo_leak_check) = False;
+LeakCheckMode MAC_(clo_leak_check) = LC_Off;
VgRes MAC_(clo_leak_resolution) = Vg_LowRes;
Bool MAC_(clo_show_reachable) = False;
@@ -57,6 +57,5 @@ Bool MAC_(clo_workaround_gcc296_bugs) =
Bool MAC_(process_common_cmd_line_option)(Char* arg)
{
- VG_BOOL_CLO("--leak-check", MAC_(clo_leak_check))
- else VG_BOOL_CLO("--partial-loads-ok", MAC_(clo_partial_loads_ok))
+ VG_BOOL_CLO("--partial-loads-ok", MAC_(clo_partial_loads_ok))
else VG_BOOL_CLO("--show-reachable", MAC_(clo_show_reachable))
else VG_BOOL_CLO("--workaround-gcc296-bugs",MAC_(clo_workaround_gcc296_bugs))
@@ -64,4 +63,12 @@ Bool MAC_(process_common_cmd_line_option
else VG_BNUM_CLO("--freelist-vol", MAC_(clo_freelist_vol), 0, 1000000000)
+ else if (VG_CLO_STREQ(arg, "--leak-check=no"))
+ MAC_(clo_leak_check) = LC_Off;
+ else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
+ MAC_(clo_leak_check) = LC_Summary;
+ else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
+ VG_CLO_STREQ(arg, "--leak-check=full"))
+ MAC_(clo_leak_check) = LC_Full;
+
else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
MAC_(clo_leak_resolution) = Vg_LowRes;
@@ -82,5 +89,5 @@ void MAC_(print_common_usage)(void)
" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
-" --leak-check=no|yes search for memory leaks at exit? [no]\n"
+" --leak-check=no|summary|full search for memory leaks at exit? [no]\n"
" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
@@ -831,10 +838,10 @@ void MAC_(common_pre_clo_init)(void)
}
-void MAC_(common_fini)(void (*leak_check)(void))
+void MAC_(common_fini)(void (*leak_check)(LeakCheckMode mode))
{
MAC_(print_malloc_stats)();
if (VG_(clo_verbosity) == 1) {
- if (!MAC_(clo_leak_check))
+ if (MAC_(clo_leak_check) == LC_Off)
VG_(message)(Vg_UserMsg,
"For a detailed leak analysis, rerun with: --leak-check=yes");
@@ -843,5 +850,6 @@ void MAC_(common_fini)(void (*leak_check
"For counts of detected errors, rerun with: -v");
}
- if (MAC_(clo_leak_check)) leak_check();
+ if (MAC_(clo_leak_check) != LC_Off)
+ (*leak_check)(MAC_(clo_leak_check));
done_prof_mem();
--- valgrind/memcheck/mac_leakcheck.c #1.21:1.22
@@ -395,90 +395,17 @@ static void lc_do_leakcheck(Int clique)
}
-/* Top level entry point to leak detector. Call here, passing in
- suitable address-validating functions (see comment at top of
- vg_scan_all_valid_memory above). All this is to avoid duplication
- of the leak-detection code for Memcheck and Addrcheck.
- Also pass in a tool-specific function to extract the .where field
- for allocated blocks, an indication of the resolution wanted for
- distinguishing different allocation points, and whether or not
- reachable blocks should be shown.
-*/
-void MAC_(do_detect_memory_leaks) (
- Bool is_valid_64k_chunk ( UInt ),
- Bool is_valid_address ( Addr )
-)
+static Int blocks_leaked;
+static Int blocks_indirect;
+static Int blocks_dubious;
+static Int blocks_reachable;
+static Int blocks_suppressed;
+
+static void full_report()
{
Int i;
- Int blocks_leaked;
- Int blocks_indirect;
- Int blocks_dubious;
- Int blocks_reachable;
- Int blocks_suppressed;
Int n_lossrecords;
- Bool is_suppressed;
-
LossRecord* errlist;
LossRecord* p;
-
- /* VG_(HT_to_array) allocates storage for shadows */
- lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
- &lc_n_shadows );
-
- /* Sort the array. */
- VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
-
- /* Sanity check; assert that the blocks are now in order */
- for (i = 0; i < lc_n_shadows-1; i++) {
- sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
- }
-
- /* Sanity check -- make sure they don't overlap */
- for (i = 0; i < lc_n_shadows-1; i++) {
- sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
- < lc_shadows[i+1]->data );
- }
-
- if (lc_n_shadows == 0) {
- sk_assert(lc_shadows == NULL);
- if (VG_(clo_verbosity) >= 1) {
- VG_(message)(Vg_UserMsg,
- "No malloc'd blocks -- no leaks are possible.");
- }
- return;
- }
-
- if (VG_(clo_verbosity) > 0)
- VG_(message)(Vg_UserMsg,
- "searching for pointers to %d not-freed blocks.",
- lc_n_shadows );
-
- lc_min_mallocd_addr = lc_shadows[0]->data;
- lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
- + lc_shadows[lc_n_shadows-1]->size;
-
- lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
- for (i = 0; i < lc_n_shadows; i++) {
- lc_markstack[i].next = -1;
- lc_markstack[i].state = Unreached;
- lc_markstack[i].indirect = 0;
- }
- lc_markstack_top = -1;
-
- lc_is_valid_chunk = is_valid_64k_chunk;
- lc_is_valid_address = is_valid_address;
-
- lc_scanned = 0;
-
- /* Do the scan of memory, pushing any pointers onto the mark stack */
- VG_(find_root_memory)(lc_scan_memory);
-
- /* Push registers onto mark stack */
- VG_(mark_from_registers)(lc_markstack_push);
-
- /* Keep walking the heap until everything is found */
- lc_do_leakcheck(-1);
-
- if (VG_(clo_verbosity) > 0)
- VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
+ Bool is_suppressed;
/* Go through and group lost structures into cliques. For each
@@ -544,10 +471,4 @@ void MAC_(do_detect_memory_leaks) (
/* Print out the commoned-up blocks and collect summary stats. */
- blocks_leaked = MAC_(bytes_leaked) = 0;
- blocks_indirect = MAC_(bytes_indirect) = 0;
- blocks_dubious = MAC_(bytes_dubious) = 0;
- blocks_reachable = MAC_(bytes_reachable) = 0;
- blocks_suppressed = MAC_(bytes_suppressed) = 0;
-
for (i = 0; i < n_lossrecords; i++) {
Bool print_record;
@@ -601,4 +522,128 @@ void MAC_(do_detect_memory_leaks) (
p_min->num_blocks = 0;
}
+}
+
+/* Compute a quick summary of the leak check. */
+static void make_summary()
+{
+ Int i;
+
+ for(i = 0; i < lc_n_shadows; i++) {
+ SizeT size = lc_shadows[i]->size;
+
+ switch(lc_markstack[i].state) {
+ case Unreached:
+ blocks_leaked++;
+ MAC_(bytes_leaked) += size;
+ break;
+
+ case Proper:
+ blocks_reachable++;
+ MAC_(bytes_reachable) += size;
+ break;
+
+ case Interior:
+ blocks_dubious++;
+ MAC_(bytes_dubious) += size;
+ break;
+
+ case IndirectLeak: /* shouldn't happen */
+ blocks_indirect++;
+ MAC_(bytes_indirect) += size;
+ break;
+ }
+ }
+}
+
+/* Top level entry point to leak detector. Call here, passing in
+ suitable address-validating functions (see comment at top of
+ vg_scan_all_valid_memory above). All this is to avoid duplication
+ of the leak-detection code for Memcheck and Addrcheck.
+ Also pass in a tool-specific function to extract the .where field
+ for allocated blocks, an indication of the resolution wanted for
+ distinguishing different allocation points, and whether or not
+ reachable blocks should be shown.
+*/
+void MAC_(do_detect_memory_leaks) (
+ LeakCheckMode mode,
+ Bool (*is_valid_64k_chunk) ( UInt ),
+ Bool (*is_valid_address) ( Addr )
+)
+{
+ Int i;
+
+ sk_assert(mode != LC_Off);
+
+ /* VG_(HT_to_array) allocates storage for shadows */
+ lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
+ &lc_n_shadows );
+
+ /* Sort the array. */
+ VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
+
+ /* Sanity check; assert that the blocks are now in order */
+ for (i = 0; i < lc_n_shadows-1; i++) {
+ sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
+ }
+
+ /* Sanity check -- make sure they don't overlap */
+ for (i = 0; i < lc_n_shadows-1; i++) {
+ sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
+ < lc_shadows[i+1]->data );
+ }
+
+ if (lc_n_shadows == 0) {
+ sk_assert(lc_shadows == NULL);
+ if (VG_(clo_verbosity) >= 1) {
+ VG_(message)(Vg_UserMsg,
+ "No malloc'd blocks -- no leaks are possible.");
+ }
+ return;
+ }
+
+ if (VG_(clo_verbosity) > 0)
+ VG_(message)(Vg_UserMsg,
+ "searching for pointers to %d not-freed blocks.",
+ lc_n_shadows );
+
+ lc_min_mallocd_addr = lc_shadows[0]->data;
+ lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
+ + lc_shadows[lc_n_shadows-1]->size;
+
+ lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
+ for (i = 0; i < lc_n_shadows; i++) {
+ lc_markstack[i].next = -1;
+ lc_markstack[i].state = Unreached;
+ lc_markstack[i].indirect = 0;
+ }
+ lc_markstack_top = -1;
+
+ lc_is_valid_chunk = is_valid_64k_chunk;
+ lc_is_valid_address = is_valid_address;
+
+ lc_scanned = 0;
+
+ /* Do the scan of memory, pushing any pointers onto the mark stack */
+ VG_(find_root_memory)(lc_scan_memory);
+
+ /* Push registers onto mark stack */
+ VG_(mark_from_registers)(lc_markstack_push);
+
+ /* Keep walking the heap until everything is found */
+ lc_do_leakcheck(-1);
+
+ if (VG_(clo_verbosity) > 0)
+ VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);
+
+ blocks_leaked = MAC_(bytes_leaked) = 0;
+ blocks_indirect = MAC_(bytes_indirect) = 0;
+ blocks_dubious = MAC_(bytes_dubious) = 0;
+ blocks_reachable = MAC_(bytes_reachable) = 0;
+ blocks_suppressed = MAC_(bytes_suppressed) = 0;
+
+ if (mode == LC_Full)
+ full_report();
+ else
+ make_summary();
if (VG_(clo_verbosity) > 0) {
@@ -607,4 +652,5 @@ void MAC_(do_detect_memory_leaks) (
VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
MAC_(bytes_leaked), blocks_leaked );
+ if (blocks_indirect > 0)
VG_(message)(Vg_UserMsg, " indirectly lost: %d bytes in %d blocks.",
MAC_(bytes_indirect), blocks_indirect );
@@ -615,5 +661,8 @@ void MAC_(do_detect_memory_leaks) (
VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.",
MAC_(bytes_suppressed), blocks_suppressed );
- if (!MAC_(clo_show_reachable)) {
+ if (mode == LC_Summary)
+ VG_(message)(Vg_UserMsg,
+ "Use --leak-check=full to see details of leaked memory.");
+ else if (!MAC_(clo_show_reachable)) {
VG_(message)(Vg_UserMsg,
"Reachable blocks (those to which a pointer was found) are not shown.");
--- valgrind/memcheck/mc_main.c #1.66:1.67
@@ -1554,7 +1554,7 @@ Bool mc_is_valid_address ( Addr a )
run the generic leak detector with suitable parameters for this
tool. */
-static void mc_detect_memory_leaks ( void )
+static void mc_detect_memory_leaks ( LeakCheckMode mode )
{
- MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
+ MAC_(do_detect_memory_leaks) ( mode, mc_is_valid_64k_chunk, mc_is_valid_address );
}
@@ -1849,5 +1849,5 @@ Bool SK_(handle_client_request) ( Thread
case VG_USERREQ__DO_LEAK_CHECK:
- mc_detect_memory_leaks();
+ mc_detect_memory_leaks(arg[1] ? LC_Summary : LC_Full);
*ret = 0; /* return value is meaningless */
break;
--- valgrind/memcheck/memcheck.h #1.21:1.22
@@ -199,4 +199,13 @@ typedef
}
+/* Just display summaries of leaked memory, rather than all the
+ details */
+#define VALGRIND_DO_QUICK_LEAK_CHECK \
+ {unsigned int _qzz_res; \
+ VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 1, 0, 0, 0); \
+ }
+
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
--- valgrind/addrcheck/tests/leak-regroot.stderr.exp #1.2:1.3
@@ -5,5 +5,4 @@
LEAK SUMMARY:
definitely lost: 0 bytes in 0 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 10 bytes in 1 blocks.
--- valgrind/addrcheck/tests/leak-0.stderr.exp #1.2:1.3
@@ -5,5 +5,4 @@
LEAK SUMMARY:
definitely lost: 0 bytes in 0 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 0 bytes in 1 blocks.
--- valgrind/memcheck/tests/pointer-trace.stderr.exp #1.4:1.5
@@ -5,5 +5,4 @@
LEAK SUMMARY:
definitely lost: 0 bytes in 0 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 1048576 bytes in 1 blocks.
@@ -25,5 +24,4 @@
LEAK SUMMARY:
definitely lost: 1048576 bytes in 1 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 0 bytes in 0 blocks.
--- valgrind/memcheck/tests/leak-regroot.stderr.exp #1.2:1.3
@@ -5,5 +5,4 @@
LEAK SUMMARY:
definitely lost: 0 bytes in 0 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 10 bytes in 1 blocks.
--- valgrind/memcheck/tests/leak-0.stderr.exp #1.2:1.3
@@ -5,5 +5,4 @@
LEAK SUMMARY:
definitely lost: 0 bytes in 0 blocks.
- indirectly lost: 0 bytes in 0 blocks.
possibly lost: 0 bytes in 0 blocks.
still reachable: 0 bytes in 1 blocks.
--- valgrind/memcheck/docs/mc_main.html #1.14:1.15
@@ -36,10 +36,14 @@
<ul>
<li><code>--leak-check=no</code> [default]<br>
- <code>--leak-check=yes</code>
+ <code>--leak-check=summary</code><br>
+ <code>--leak-check=full</code>
<p>When enabled, search for memory leaks when the client program
finishes. A memory leak means a malloc'd block, which has not
yet been free'd, but to which no pointer can be found. Such a
block can never be free'd by the program, since no pointer to it
- exists. </li><br><p>
+ exists.
+ <p><code>summary</code> just shows summary totals of leaked and
+ unleaked memory. <code>full</code> gives a detailed display of
+ leaked memory.</li><br><p>
<li><code>--show-reachable=no</code> [default]<br>
@@ -847,13 +851,18 @@
an error message if not. Returns no value.
<p>
-<li><code>VALGRIND_DO_LEAK_CHECK</code>: run the memory leak detector
- right now. Returns no value. I guess this could be used to
- incrementally check for leaks between arbitrary places in the
- program's execution. Warning: not properly tested!
+
+<li><code>VALGRIND_DO_LEAK_CHECK</code> and
+ <code>VALGRIND_DO_QUICK_LEAK_CHECK</code>:
+ run the memory leak detector right now. Returns no value.
+ I guess this could be used to incrementally check for leaks
+ between arbitrary places in the program's execution.
+ <code>VALGRIND_DO_QUICK_LEAK_CHECK</code>
+ just displays summaries of leaked memory, rather than the full
+ details.
<p>
<li><code>VALGRIND_COUNT_LEAKS</code>: fills in the four arguments with
the number of bytes of memory found by the previous leak check to
be leaked, dubious, reachable and suppressed. Again, useful in
- test harness code, after calling <code>VALGRIND_DO_LEAK_CHECK</code>.
+ test harness code, after calling <code>VALGRIND_DO_[QUICK_]LEAK_CHECK</code>.
<p>
<li><code>VALGRIND_GET_VBITS</code> and
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 21:36:17
|
CVS commit by fitzhardinge:
Put thread modelling on the back-burner. We need to get 2.4.0 out the door.
M +0 -1 Makefile.am 1.74
M +2 -0 coregrind/vg_main.c 1.259
M +14 -0 coregrind/vg_pthreadmodel.c 1.4
M +2 -0 coregrind/vg_threadmodel.c 1.6
M +0 -1 none/tests/cmdline2.stdout.exp 1.14
--- valgrind/coregrind/vg_threadmodel.c #1.5:1.6
@@ -714,4 +714,5 @@ static struct mutex *mutex_check_initial
}
+#if 0
static Bool mx_is_locked(Addr mutexp)
{
@@ -720,4 +721,5 @@ static Bool mx_is_locked(Addr mutexp)
return mx && (mx->state == MX_Locked);
}
+#endif
/* Mutex at mutexp is initialized. This must be done before any
--- valgrind/coregrind/vg_main.c #1.258:1.259
@@ -1521,5 +1521,7 @@ void usage ( Bool debug_help )
" --trace-sched=no|yes show thread scheduler details? [no]\n"
" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
+#if 0
" --model-pthreads=yes|no model the pthreads library [no]\n"
+#endif
" --command-line-only=no|yes only use command line options [no]\n"
"\n"
--- valgrind/coregrind/vg_pthreadmodel.c #1.3:1.4
@@ -56,4 +56,6 @@
#include "core.h"
+#if 0
+
#define __USE_GNU
#define __USE_UNIX98
@@ -573,2 +575,14 @@ void VG_(pthread_init)()
VG_(tm_thread_create)(VG_INVALID_THREADID, VG_(master_tid), True);
}
+
+#else /* !0 */
+/* Stubs for now */
+
+void VG_(pthread_init)()
+{
+}
+
+void VG_(pthread_startfunc_wrapper)(Addr wrapper)
+{
+}
+#endif /* 0 */
--- valgrind/none/tests/cmdline2.stdout.exp #1.13:1.14
@@ -47,5 +47,4 @@
--trace-sched=no|yes show thread scheduler details? [no]
--wait-for-gdb=yes|no pause on startup to wait for gdb attach
- --model-pthreads=yes|no model the pthreads library [no]
--command-line-only=no|yes only use command line options [no]
--- valgrind/Makefile.am #1.73:1.74
@@ -11,5 +11,4 @@
cachegrind \
corecheck \
- helgrind \
massif \
lackey \
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 21:20:59
|
CVS commit by fitzhardinge:
Get baseline memory usage to compensate for libc versions which malloc()
before main().
M +12 -0 leakotron.c 1.2
--- valgrind/memcheck/tests/leakotron.c #1.1:1.2
@@ -95,6 +95,13 @@ int main()
{
int i;
+ int base_definite, base_dubious, base_reachable, base_suppressed;
int definite, dubious, reachable, suppressed, total;
+ /* get a baseline in case the runtime allocated some memory */
+ VALGRIND_DO_LEAK_CHECK;
+ base_definite = base_dubious = base_reachable = base_suppressed = 0;
+ VALGRIND_COUNT_LEAKS(base_definite, base_dubious,
+ base_reachable, base_suppressed);
+
for(i = 0; i < ITER; i++) {
mk();
@@ -124,4 +131,9 @@ int main()
VALGRIND_COUNT_LEAKS(definite, dubious, reachable, suppressed);
+ definite -= base_definite;
+ dubious -= base_dubious;
+ reachable -= base_reachable;
+ suppressed -= base_suppressed;
+
total = definite+dubious+reachable+suppressed;
|
|
From: Jeremy F. <je...@go...> - 2005-03-03 20:58:25
|
Julian Seward wrote:
>Fair enough. Well, we can have precise exns off by default but have
>a flag --precise-mem-exns=yes for those that need it.
>
>
How about a client request too, so that programs can request it if they
need it?
>So execontext becomes a union type, starting out as an array of
>addresses but being converted into a human readable source location.
>
>Hmm. Maybe. Depends what operations we need to do on them
>after the conversion step.
>
>
The conversion would need to be per-frame rather than per ExeContext.
If you have an ExeContext of someone calling a .so, and that .so calling
something else, and then you unload the .so, you would only need to
convert the .so's frames. It is possible the ExeContext could still be
used for matching leak check records (which may only use the top 2-4
frames).
J
|
|
From: Tom H. <th...@cy...> - 2005-03-03 19:09:52
|
CVS commit by thughes:
Store thread error details in the core pool instead of the tool pool.
M +2 -2 vg_threadmodel.c 1.5
--- valgrind/coregrind/vg_threadmodel.c #1.4:1.5
@@ -1161,5 +1161,5 @@ UInt VG_(tm_error_update_extra)(Error *e
case ThreadErr: {
struct thread_error_data *errdata = VG_(get_error_extra)(err);
- struct thread *new_th = VG_(malloc)(sizeof(struct thread));
+ struct thread *new_th = VG_(arena_malloc)(VG_AR_CORE, sizeof(struct thread));
VG_(memcpy)(new_th, errdata->th, sizeof(struct thread));
@@ -1172,5 +1172,5 @@ UInt VG_(tm_error_update_extra)(Error *e
case MutexErr: {
struct mutex_error_data *errdata = VG_(get_error_extra)(err);
- struct mutex *new_mx = VG_(malloc)(sizeof(struct mutex));
+ struct mutex *new_mx = VG_(arena_malloc)(VG_AR_CORE, sizeof(struct mutex));
VG_(memcpy)(new_mx, errdata->mx, sizeof(struct mutex));
|
|
From: Tom H. <th...@cy...> - 2005-03-03 19:09:04
|
CVS commit by thughes:
Actually make the call to VG_(tm_error_update_extra) when we decide to
save a thread error - without that call the error details aren't copied
and may be invalid when we decide to print them.
M +16 -16 vg_errcontext.c 1.70
--- valgrind/coregrind/vg_errcontext.c #1.69:1.70
@@ -517,11 +517,12 @@ void VG_(maybe_record_error) ( ThreadId
/* update `extra' */
- if (VG_(needs).skin_errors) {
switch (ekind) {
case ThreadErr:
case MutexErr:
+ vg_assert(VG_(needs).core_errors);
extra_size = VG_(tm_error_update_extra)(p);
break;
default:
+ vg_assert(VG_(needs).skin_errors);
extra_size = SK_(update_extra)(p);
break;
@@ -534,5 +535,4 @@ void VG_(maybe_record_error) ( ThreadId
p->extra = new_extra;
}
- }
p->next = vg_errors;
|
|
From: Nicholas N. <nj...@cs...> - 2005-03-03 17:38:54
|
On Thu, 3 Mar 2005, Bryan O'Sullivan wrote: >> Arguably the bigger problem is when Joe Programmer allocates a 2MB array >> on the stack and Memcheck gives him a zillion invalid read/write errors >> because it thinks he switched stacks. > > This would affect most Fortran programs, for example, since many Fortran > implementations allocate arrays on the stack. I'm not sure what you're saying -- I described the current situation. Are you saying my suggestion of tweaking the heuristic is bad? N |
|
From: Bryan O'S. <bo...@se...> - 2005-03-03 16:56:55
|
On Thu, 2005-03-03 at 10:12 -0600, Nicholas Nethercote wrote: > Arguably the bigger problem is when Joe Programmer allocates a 2MB array > on the stack and Memcheck gives him a zillion invalid read/write errors > because it thinks he switched stacks. This would affect most Fortran programs, for example, since many Fortran implementations allocate arrays on the stack. <b |
|
From: Nicholas N. <nj...@cs...> - 2005-03-03 16:13:06
|
On Wed, 2 Mar 2005, Robert Walsh wrote: >>> * 81361 Can't distinguish large stack allocations from stack-swit... >> >> Do we care about this? Is writing-your-own-thread-package >> regarded as a sensible thing to do? > > Oh yeah. For example, we play all sorts of tricks in our OpenMP > implementation to get higher performance, including mixing pthreads and > a home-grown light-weight threads package. We'd be happy to augment > everything and anything so that our customers can Valgrind their OpenMP > applications without spurious errors. Arguably the bigger problem is when Joe Programmer allocates a 2MB array on the stack and Memcheck gives him a zillion invalid read/write errors because it thinks he switched stacks. I think we should slant things in favour of him, rather than the person using stack-switching -- they presumably know what they're doing, so making them use a client request seems not unreasonable. So I'd suggest implementing the client request as Jeremy says, and then tweaking the heuristic so that the %esp-delta has to be substantially bigger (say, 8MB) before Memcheck assumes it's a stack-switch. And add a FAQ about it. N |
|
From: Julian S. <js...@ac...> - 2005-03-03 12:38:24
|
> >Vex can (at a price) provide precise mem exceptions, but not any > >kind of FP exception support. My question is, is there any sizeable > >user group writing programs that actually need precise exceptions? > > Yep, they're not uncommon. All the virtual machines use page protection > tricks, as do garbage collectors. Fair enough. Well, we can have precise exns off by default but have a flag --precise-mem-exns=yes for those that need it. > What would happen if you deferred the addr->symbol resolution until the > unload actually happens? When the .so is unloaded, you know that those > ExeContexts are essentially static (ie, you don't need to compare > against them, because nothing should match). And if the .so isn't > unloaded, then there's no need to do anything. So execontext becomes a union type, starting out as an array of addresses but being converted into a human readable source location. Hmm. Maybe. Depends what operations we need to do on them after the conversion step. J |
|
From: Jeremy F. <je...@go...> - 2005-03-03 07:45:16
|
CVS commit by fitzhardinge: Re-add filter_sink, with luck the +x will stick. A filter_sink 1.3 |
|
From: Jeremy F. <je...@go...> - 2005-03-03 07:43:57
|
CVS commit by fitzhardinge: Remove filter_sync to re-add with +x R filter_sink 1.1 |
|
From: Jeremy F. <je...@go...> - 2005-03-03 07:34:45
|
Julian Seward wrote:
>Vex can (at a price) provide precise mem exceptions, but not any
>kind of FP exception support. My question is, is there any sizeable
>user group writing programs that actually need precise exceptions?
>
>
Yep, they're not uncommon. All the virtual machines use page protection
tricks, as do garbage collectors.
>Yes .. recording them as file/fn/line locations might work, but it
>seems expensive in that basically every stack snapshot has to be
>converted right away into file/fn/line info. And a lot of
>such snapshots get made (once per malloc for example). Also, the
>error-commoning mechanism works by comparing stack snapshots, and
>that can really get hammered. There just doesn't seem to be any
>easy solution. Perhaps the best one is the idiot-solution which
>is essentially to ignore requests to munmap executable areas
>so their symbol tables never go away. Of course that has its
>own dangers.
>
>
What would happen if you deferred the addr->symbol resolution until the
unload actually happens? When the .so is unloaded, you know that those
ExeContexts are essentially static (ie, you don't need to compare
against them, because nothing should match). And if the .so isn't
unloaded, then there's no need to do anything.
J
|
|
From: Tom H. <to...@co...> - 2005-03-03 06:31:05
|
Nightly build on dunsmere ( Fedora Core 3 ) started at 2005-03-03 03:20:03 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow clientperm: valgrind -q ./clientperm custom_alloc: valgrind -q ./custom_alloc describe-block: valgrind ./describe-block sh: line 1: 16097 Segmentation fault VALGRINDLIB=/tmp/valgrind.5693/valgrind/.in_place /tmp/valgrind.5693/valgrind/./coregrind/valgrind --command-line-only=yes --tool=memcheck ./describe-block >describe-block.stdout.out 2>describe-block.stderr.out doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 leak-cycle: valgrind --leak-resolution=high ./leak-cycle leak-regroot: valgrind ./leak-regroot leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) make: *** [regtest] Error 9 |
|
From: Robert W. <rj...@du...> - 2005-03-03 06:14:37
|
> > * 81361 Can't distinguish large stack allocations from stack-swit... >=20 > Do we care about this? Is writing-your-own-thread-package > regarded as a sensible thing to do? Oh yeah. For example, we play all sorts of tricks in our OpenMP implementation to get higher performance, including mixing pthreads and a home-grown light-weight threads package. We'd be happy to augment everything and anything so that our customers can Valgrind their OpenMP applications without spurious errors. > In any case there's not > much we can do about this without the client telling us when=20 > stack switches are happening. Yup - that's fine by us. Regards, Robert. --=20 Robert Walsh Amalgamated Durables, Inc. - "We don't make the things you buy." Email: rj...@du... |
|
From: Tom H. <th...@cy...> - 2005-03-03 05:29:16
|
Nightly build on standard ( Red Hat 7.2 ) started at 2005-03-03 03:00:03 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow clientperm: valgrind -q ./clientperm custom_alloc: valgrind -q ./custom_alloc describe-block: valgrind ./describe-block doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 leak-cycle: valgrind --leak-resolution=high ./leak-cycle leak-regroot: valgrind ./leak-regroot leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) *** leak-tree failed (stderr) *** make: *** [regtest] Error 1 |
|
From: Jeremy F. <je...@go...> - 2005-03-03 05:06:10
|
CVS commit by fitzhardinge:
Thread modelling update. This version fixes up a couple of problems
with error reporting, turns off all the debug output by default, initial
stab at condition variables, but not enough to work, fails gracefully if
function wrapping hasn't managed to capture all the necessary functions.
M +9 -19 core.h 1.95
M +3 -1 vg_intercept.c 1.32
M +2 -0 vg_main.c 1.258
M +148 -39 vg_pthreadmodel.c 1.3
M +11 -4 vg_redir.c 1.8
M +32 -10 vg_symtab2.c 1.105
M +207 -59 vg_threadmodel.c 1.4
--- valgrind/coregrind/core.h #1.94:1.95
@@ -549,21 +549,4 @@ struct vg_mallocfunc_info {
extern Bool VG_(sk_malloc_called_by_scheduler);
-/* ---------------------------------------------------------------------
- Exports of vg_threadmodel.c
- ------------------------------------------------------------------ */
-
-extern void VG_(tm_threadcreate)(ThreadId creator, ThreadId tid, Bool detached);
-extern void VG_(tm_threadexit) (ThreadId tid);
-extern void VG_(tm_threadjoin) (ThreadId joiner, ThreadId joinee);
-extern void VG_(tm_switchto) (ThreadId tid);
-
-extern void VG_(tm_mutex_init) (ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_destroy)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_trylock)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_giveup) (ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_acquire)(ThreadId tid, Addr mutexp);
-extern void VG_(tm_mutex_unlock) (ThreadId tid, Addr mutexp);
-
-
/* ---------------------------------------------------------------------
@@ -1108,7 +1091,8 @@ extern Bool VG_(is_wrapper_return)(Addr
/* Primary interface for adding wrappers for client-side functions. */
-extern void VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+extern CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
const FuncWrapper *wrapper);
+extern Bool VG_(is_resolved)(const CodeRedirect *redir);
/* ---------------------------------------------------------------------
@@ -1879,4 +1863,10 @@ extern void VG_(tm_error_print) (Error *
extern void VG_(tm_init) ();
+extern void VG_(tm_cond_init) (ThreadId tid, Addr condp);
+extern void VG_(tm_cond_destroy) (ThreadId tid, Addr condp);
+extern void VG_(tm_cond_wait) (ThreadId tid, Addr condp, Addr mutexp);
+extern void VG_(tm_cond_wakeup) (ThreadId tid, Addr condp, Addr mutexp);
+extern void VG_(tm_cond_signal) (ThreadId tid, Addr condp);
+
/* ----- pthreads ----- */
extern void VG_(pthread_init) ();
--- valgrind/coregrind/vg_threadmodel.c #1.3:1.4
@@ -67,4 +67,7 @@ struct mutex;
struct condvar;
+static const Bool debug_thread = False;
+static const Bool debug_mutex = False;
+
/* --------------------------------------------------
Thread lifetime
@@ -194,5 +197,6 @@ static void thread_setstate(struct threa
th->state = state;
-
+ if (debug_thread)
+ VG_(printf)("setting thread(%d) -> %s\n", th->tid, pp_threadstate(th));
thread_validate(th);
}
@@ -272,5 +276,5 @@ static void thread_report(ThreadId tid,
errdata.action = action;
- VG_(maybe_record_error)(tid, ThreadErr, 0, errstr, &errdata);
+ VG_(maybe_record_error)(VG_(get_running_tid)(), ThreadErr, 0, errstr, &errdata);
}
@@ -281,11 +285,13 @@ static void pp_thread_error(Error *err)
Char *errstr = VG_(get_error_string)(err);
- VG_(message)(Vg_UserMsg, "Found %s thread in state %s while %s\n",
+ VG_(message)(Vg_UserMsg, "Found %s thread in state %s while %s",
errstr, pp_threadstate(th), errdata->action);
VG_(pp_ExeContext)(VG_(get_error_where)(err));
- VG_(message)(Vg_UserMsg, "Thread was %s",
- th->state == TS_Dead ? "destroyed" : "created");
+ if (th) {
+ VG_(message)(Vg_UserMsg, " Thread %d was %s",
+ th->tid, th->state == TS_Dead ? "destroyed" : "created");
VG_(pp_ExeContext)(th->ec_created);
+ }
}
@@ -295,4 +301,5 @@ void VG_(tm_thread_create)(ThreadId crea
struct thread *th = thread_get(tid);
+ if (debug_thread)
VG_(printf)("thread %d creates %d %s\n", creator, tid, detached ? "detached" : "");
if (th != NULL) {
@@ -424,5 +431,5 @@ void VG_(tm_thread_join)(ThreadId joiner
/* now the joiner... */
if (joiner == NULL)
- thread_report(joinerid, THE_NotExist, "joining as joiner");
+ thread_report(joineeid, THE_NotExist, "joining as joiner");
else {
switch(joiner->state) {
@@ -433,5 +440,5 @@ void VG_(tm_thread_join)(ThreadId joiner
case TS_Zombie: /* back from the dead */
case TS_Dead:
- thread_report(joinerid, THE_NotAlive, "joining as joiner");
+ thread_report(joineeid, THE_NotAlive, "joining as joiner");
break;
@@ -439,11 +446,11 @@ void VG_(tm_thread_join)(ThreadId joiner
case TS_CVBlocked:
case TS_JoinBlocked:
- thread_report(joinerid, THE_Blocked, "joining as joiner");
+ thread_report(joineeid, THE_Blocked, "joining as joiner");
break;
}
if (joinee->detached)
- thread_report(joinerid, THE_Detached, "joining as joiner");
-
+ thread_report(joineeid, THE_Detached, "joining as joiner");
+ else {
/* block if the joinee hasn't exited yet */
if (joinee) {
@@ -453,5 +460,5 @@ void VG_(tm_thread_join)(ThreadId joiner
default:
- if (joinee->detached || joinee->state == TS_Zombie)
+ if (joinee->state == TS_Zombie)
do_thread_dead(joinee);
else
@@ -460,4 +467,5 @@ void VG_(tm_thread_join)(ThreadId joiner
}
}
+ }
}
@@ -570,5 +578,5 @@ struct mutex_error_data
};
-static struct mutex *mx_get(Addr mutexp);
+static struct mutex *mutex_get(Addr mutexp);
static const Char *pp_mutexstate(const struct mutex *mx)
@@ -615,4 +623,5 @@ static void mutex_setstate(ThreadId tid,
mx->state = st;
+ if (debug_mutex)
VG_(printf)("setting mutex(%p) -> %s\n", mx->mutex, pp_mutexstate(mx));
}
@@ -621,5 +630,5 @@ static void mutex_report(ThreadId tid, A
{
Char *errstr="?";
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
struct mutex_error_data errdata;
@@ -647,5 +656,5 @@ static void pp_mutex_error(Error *err)
Char *errstr = VG_(get_error_string)(err);
- VG_(message)(Vg_UserMsg, "Found %s mutex %p while %s\n",
+ VG_(message)(Vg_UserMsg, "Found %s mutex %p while %s",
errstr, mx ? mx->mutex : 0, errdata->action);
VG_(pp_ExeContext)(VG_(get_error_where)(err));
@@ -656,18 +665,18 @@ static void pp_mutex_error(Error *err)
break;
case MX_Locked:
- VG_(message)(Vg_UserMsg, "Mutex was locked by thread %d", mx->owner);
+ VG_(message)(Vg_UserMsg, " Mutex was locked by thread %d", mx->owner);
VG_(pp_ExeContext)(mx->ec_locked);
break;
case MX_Unlocking:
- VG_(message)(Vg_UserMsg, "Mutex being unlocked");
+ VG_(message)(Vg_UserMsg, " Mutex being unlocked");
VG_(pp_ExeContext)(mx->ec_locked);
break;
case MX_Free:
- VG_(message)(Vg_UserMsg, "Mutex was unlocked");
+ VG_(message)(Vg_UserMsg, " Mutex was unlocked");
VG_(pp_ExeContext)(mx->ec_locked);
break;
}
- VG_(message)(Vg_UserMsg, "Mutex was %s",
+ VG_(message)(Vg_UserMsg, " Mutex was %s",
mx->state == MX_Dead ? "destroyed" : "created");
VG_(pp_ExeContext)(mx->ec_create);
@@ -676,5 +685,5 @@ static void pp_mutex_error(Error *err)
static SkipList sk_mutex = SKIPLIST_INIT(struct mutex, mutex, VG_(cmp_Addr), NULL, VG_AR_CORE);
-static struct mutex *mx_get(Addr mutexp)
+static struct mutex *mutex_get(Addr mutexp)
{
return VG_(SkipList_Find_Exact)(&sk_mutex, &mutexp);
@@ -683,11 +692,13 @@ static struct mutex *mx_get(Addr mutexp)
static Bool mx_is_initialized(Addr mutexp)
{
- const struct mutex *mx = mx_get(mutexp);
+ const struct mutex *mx = mutex_get(mutexp);
return mx && mx->state != MX_Dead;
}
-static void mx_check_initialized(ThreadId tid, Addr mutexp, const Char *action)
+static struct mutex *mutex_check_initialized(ThreadId tid, Addr mutexp, const Char *action)
{
+ struct mutex *mx;
+
vg_assert(tid != VG_INVALID_THREADID);
@@ -696,9 +707,14 @@ static void mx_check_initialized(ThreadI
VG_(tm_mutex_init)(tid, mutexp);
}
+
+ mx = mutex_get(mutexp);
+ vg_assert(mx != NULL);
+
+ return mx;
}
static Bool mx_is_locked(Addr mutexp)
{
- const struct mutex *mx = mx_get(mutexp);
+ const struct mutex *mx = mutex_get(mutexp);
return mx && (mx->state == MX_Locked);
@@ -711,5 +727,5 @@ static Bool mx_is_locked(Addr mutexp)
void VG_(tm_mutex_init)(ThreadId tid, Addr mutexp)
{
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
if (mx == NULL) {
@@ -737,5 +753,5 @@ Bool VG_(tm_mutex_exists)(Addr mutexp)
void VG_(tm_mutex_destroy)(ThreadId tid, Addr mutexp)
{
- struct mutex *mx = mx_get(mutexp);
+ struct mutex *mx = mutex_get(mutexp);
if (mx == NULL)
@@ -774,7 +790,5 @@ void VG_(tm_mutex_trylock)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "trylocking");
-
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "trylocking");
thread_block_mutex(tid, mx);
@@ -793,6 +807,5 @@ void VG_(tm_mutex_giveup)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "giving up");
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "giving up");
thread_unblock_mutex(tid, mx, "giving up on mutex");
@@ -807,6 +820,5 @@ void VG_(tm_mutex_acquire)(ThreadId tid,
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "acquiring");
- mx = mx_get(mutexp);
+ mx = mutex_check_initialized(tid, mutexp, "acquiring");
switch(mx->state) {
@@ -822,4 +834,5 @@ void VG_(tm_mutex_acquire)(ThreadId tid,
case MX_Locked:
+ if (debug_mutex)
VG_(printf)("mutex=%p mx->state=%s\n", mutexp, pp_mutexstate(mx));
VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
@@ -847,7 +860,5 @@ void VG_(tm_mutex_tryunlock)(ThreadId ti
struct mutex *mx;
- mx_check_initialized(tid, mutexp, "try-unlocking");
- mx = mx_get(mutexp);
- vg_assert(mx != NULL);
+ mx = mutex_check_initialized(tid, mutexp, "try-unlocking");
th = thread_get(tid);
@@ -895,5 +906,4 @@ void VG_(tm_mutex_tryunlock)(ThreadId ti
mutex_setstate(tid, mx, MX_Unlocking);
- VG_TRACK( post_mutex_unlock, tid, (void *)mutexp );
}
@@ -909,5 +919,5 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
struct thread *th;
- mx_check_initialized(tid, mutexp, "unlocking mutex");
+ mx = mutex_check_initialized(tid, mutexp, "unlocking mutex");
th = thread_get(tid);
@@ -935,7 +945,4 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
}
- mx = mx_get(mutexp);
- vg_assert(mx != NULL);
-
switch(mx->state) {
case MX_Locked:
@@ -951,6 +958,6 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
case MX_Unlocking:
/* OK - we need to complete the unlock */
- mutex_setstate(tid, mx, MX_Free);
VG_TRACK( post_mutex_unlock, tid, (void *)mutexp );
+ mutex_setstate(tid, mx, MX_Free);
break;
@@ -965,9 +972,90 @@ void VG_(tm_mutex_unlock)(ThreadId tid,
-------------------------------------------------- */
+struct condvar_waiter
+{
+ ThreadId waiter;
+
+ struct condvar *condvar;
+ struct mutex *mutex;
+
+ struct condvar_waiter *next;
+};
+
+struct condvar
+{
+ Addr condvar;
+
+ enum condvar_state {
+ CV_Dead,
+ CV_Alive,
+ } state;
+
+ struct condvar_waiter *waiters; // XXX skiplist?
+
+ ExeContext *ec_created; // where created
+ ExeContext *ec_signalled; // where last signalled
+};
+
+enum condvar_err {
+ CVE_NotExist,
+ CVE_NotInit,
+ CVE_ReInit,
+ CVE_Busy,
+ CVE_Blocked,
+};
+
+static SkipList sk_condvar = SKIPLIST_INIT(struct condvar, condvar, VG_(cmp_Addr),
+ NULL, VG_AR_CORE);
+
+static struct condvar *condvar_get(Addr condp)
+{
+ return VG_(SkipList_Find_Exact)(&sk_condvar, &condp);
+}
+
+static Bool condvar_is_initialized(Addr condp)
+{
+ const struct condvar *cv = condvar_get(condp);
+
+ return cv && cv->state != CV_Dead;
+}
+
+static void condvar_report(ThreadId tid, Addr condp, enum condvar_err err, const Char *action)
+{
+}
+
+static struct condvar *condvar_check_initialized(ThreadId tid, Addr condp, const Char *action)
+{
+ struct condvar *cv;
+ vg_assert(tid != VG_INVALID_THREADID);
+
+ if (!condvar_is_initialized(condp)) {
+ condvar_report(tid, condp, CVE_NotInit, action);
+ VG_(tm_cond_init)(tid, condp);
+ }
+
+ cv = condvar_get(condp);
+ vg_assert(cv != NULL);
+
+ return cv;
+}
+
/* Initialize a condition variable. Fails if:
- condp has already been initialized
*/
-void VG_(tm_cond_init)(void *condp)
+void VG_(tm_cond_init)(ThreadId tid, Addr condp)
{
+ struct condvar *cv = condvar_get(condp);
+
+ if (cv == NULL) {
+ cv = VG_(SkipNode_Alloc)(&sk_condvar);
+ cv->condvar = condp;
+ cv->waiters = NULL;
+ VG_(SkipList_Insert)(&sk_condvar, cv);
+ } else if (cv->state != CV_Dead) {
+ condvar_report(tid, condp, CVE_ReInit, "initializing");
+ /* ? what about existing waiters? */
+ }
+
+ cv->state = CV_Alive;
}
@@ -976,6 +1064,27 @@ void VG_(tm_cond_init)(void *condp)
- condp is currently being waited on
*/
-void VG_(tm_cond_destroy)(void *condp)
+void VG_(tm_cond_destroy)(ThreadId tid, Addr condp)
{
+ struct condvar *cv = condvar_get(condp);
+
+ if (cv == NULL)
+ condvar_report(tid, condp, CVE_NotExist, "destroying");
+ else {
+ if (cv->state != CV_Alive)
+ condvar_report(tid, condp, CVE_NotInit, "destroying");
+ if (cv->waiters != NULL)
+ condvar_report(tid, condp, CVE_Busy, "destroying");
+ cv->state = CV_Dead;
+ }
+}
+
+static struct condvar_waiter *get_waiter(const struct condvar *cv, ThreadId tid)
+{
+ struct condvar_waiter *w;
+
+ for(w = cv->waiters; w; w = w->next)
+ if (w->waiter == tid)
+ return w;
+ return NULL;
}
@@ -984,7 +1093,46 @@ void VG_(tm_cond_destroy)(void *condp)
- thread doesn't hold mutexp
- thread is blocked on some other object
+ - thread is already blocked on mutex
*/
-void VG_(tm_cond_wait)(ThreadId tid, void *condp, void *mutexp)
+void VG_(tm_cond_wait)(ThreadId tid, Addr condp, Addr mutexp)
{
+ struct thread *th = thread_get(tid);
+ struct mutex *mx;
+ struct condvar *cv;
+ struct condvar_waiter *waiter;
+
+ /* Condvar must exist */
+ cv = condvar_check_initialized(tid, condp, "waiting");
+
+ /* Mutex must exist */
+ mx = mutex_check_initialized(tid, mutexp, "waiting on condvar");
+
+ /* Thread must own mutex */
+ if (mx->state != MX_Locked) {
+ mutex_report(tid, mutexp, MXE_NotLocked, "waiting on condvar");
+ VG_(tm_mutex_trylock)(tid, mutexp);
+ VG_(tm_mutex_acquire)(tid, mutexp);
+ } else if (mx->owner != tid) {
+ mutex_report(tid, mutexp, MXE_NotOwner, "waiting on condvar");
+ mx->owner = tid;
+ }
+
+ /* Thread must not be already waiting for condvar */
+ waiter = get_waiter(cv, tid);
+ if (waiter != NULL)
+ condvar_report(tid, condp, CVE_Blocked, "waiting");
+ else {
+ waiter = VG_(arena_malloc)(VG_AR_CORE, sizeof(*waiter));
+ waiter->condvar = cv;
+ waiter->mutex = mx;
+ waiter->next = cv->waiters;
+ cv->waiters = waiter;
+ }
+
+ /* Thread is now blocking on condvar */
+ do_thread_block_condvar(th, cv);
+
+ /* (half) release mutex */
+ VG_(tm_mutex_tryunlock)(tid, mutexp);
}
@@ -993,5 +1141,5 @@ void VG_(tm_cond_wait)(ThreadId tid, voi
- thread is not waiting on condp
*/
-void VG_(tm_cond_wakeup)(ThreadId tid, void *condp)
+void VG_(tm_cond_wakeup)(ThreadId tid, Addr condp, Addr mutexp)
{
}
@@ -1000,5 +1148,5 @@ void VG_(tm_cond_wakeup)(ThreadId tid, v
- condp has not been initialized
*/
-void VG_(tm_cond_signal)(ThreadId tid, void *condp)
+void VG_(tm_cond_signal)(ThreadId tid, Addr condp)
{
}
--- valgrind/coregrind/vg_pthreadmodel.c #1.2:1.3
@@ -8,6 +8,6 @@
emulator for monitoring program execution on x86-Unixes.
- Copyright (C) 2000-2004 Julian Seward
- js...@ac...
+ Copyright (C) 2005 Jeremy Fitzhardinge
+ je...@go...
This program is free software; you can redistribute it and/or
@@ -57,6 +57,11 @@
#define __USE_GNU
+#define __USE_UNIX98
#include <pthread.h>
+static const Bool debug = False;
+
+static Bool check_wrappings(void);
+
#define ENTER(x) \
do { \
@@ -68,5 +73,5 @@
static const Char *pp_retval(enum return_type rt, Word retval)
{
- static Char buf[20];
+ static Char buf[50];
switch(rt) {
@@ -127,9 +132,10 @@ static ThreadId get_pthread_mapping(pthr
/* Create a mapping between a ThreadId and a pthread_t */
-void pthread_id_mapping(ThreadId tid, Addr idp, UInt idsz)
+static void pthread_id_mapping(ThreadId tid, Addr idp, UInt idsz)
{
pthread_t id = *(pthread_t *)idp;
struct pthread_map *m = VG_(SkipList_Find_Exact)(&sk_pthread_map, &id);
+ if (debug)
VG_(printf)("Thread %d maps to %p\n", tid, id);
@@ -150,4 +156,5 @@ static void check_thread_exists(ThreadId
{
if (!VG_(tm_thread_exists)(tid)) {
+ if (debug)
VG_(printf)("creating thread %d\n", tid);
VG_(tm_thread_create)(VG_INVALID_THREADID, tid, False);
@@ -174,4 +181,9 @@ static void *before_pthread_create(va_li
void *arg = va_arg(va, void *);
struct pthread_create_nonce *n;
+ struct vg_pthread_newthread_data *data;
+ ThreadState *tst;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_create);
@@ -179,20 +191,25 @@ static void *before_pthread_create(va_li
/* Data is in the client heap and is freed by the client in the
startfunc_wrapper. */
- if (startfunc_wrapper != 0) {
- struct vg_pthread_newthread_data *data;
- ThreadState *tst = VG_(get_ThreadState)(VG_(get_running_tid)());
+ vg_assert(startfunc_wrapper != 0);
+
+ tst = VG_(get_ThreadState)(VG_(get_running_tid)());
VG_(sk_malloc_called_by_scheduler) = True;
data = SK_(malloc)(sizeof(*data));
VG_(sk_malloc_called_by_scheduler) = False;
+
+ VG_TRACK(pre_mem_write, Vg_CorePThread, tst->tid, "new thread data",
+ (Addr)data, sizeof(*data));
data->startfunc = start;
data->arg = arg;
+ VG_TRACK(post_mem_write, (Addr)data, sizeof(*data));
/* Substitute arguments
- XXX hack: need an API to do this.
- */
+ XXX hack: need an API to do this. */
((Word *)tst->arch.m_esp)[3] = startfunc_wrapper;
((Word *)tst->arch.m_esp)[4] = (Word)data;
- }
+
+ if (debug)
+ VG_(printf)("starting thread at wrapper %p\n", startfunc_wrapper);
n = VG_(arena_malloc)(VG_AR_CORE, sizeof(*n));
@@ -208,4 +225,7 @@ static void after_pthread_create(void *n
ThreadId tid = VG_(get_running_tid)();
+ if (n == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0) {
if (!VG_(tm_thread_exists)(tid))
@@ -227,8 +247,13 @@ static void *before_pthread_join(va_list
{
pthread_t pt_joinee = va_arg(va, pthread_t);
- ThreadId joinee = get_pthread_mapping(pt_joinee);
+ ThreadId joinee;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_join);
+ joinee = get_pthread_mapping(pt_joinee);
+
VG_(tm_thread_join)(VG_(get_running_tid)(), joinee);
@@ -239,4 +264,7 @@ static void after_pthread_join(void *v,
{
/* nothing to be done? */
+ if (!check_wrappings())
+ return;
+
LEAVE(pthread_join, rt, retval);
}
@@ -249,9 +277,14 @@ static void *before_pthread_detach(va_li
{
pthread_t id = va_arg(va, pthread_t);
- struct pthread_detach_data *data = VG_(arena_malloc)(VG_AR_CORE, sizeof(*data));
- data->id = id;
+ struct pthread_detach_data *data;
+
+ if (!check_wrappings())
+ return NULL;
ENTER(pthread_detach);
+ data = VG_(arena_malloc)(VG_AR_CORE, sizeof(*data));
+ data->id = id;
+
return data;
}
@@ -260,5 +293,10 @@ static void after_pthread_detach(void *n
{
struct pthread_detach_data *data = (struct pthread_detach_data *)nonce;
- ThreadId tid = get_pthread_mapping(data->id);
+ ThreadId tid;
+
+ if (data == NULL)
+ return;
+
+ tid = get_pthread_mapping(data->id);
VG_(arena_free)(VG_AR_CORE, data);
@@ -277,4 +315,7 @@ static void *before_pthread_self(va_list
to the return value. On Linux/glibc, it's a simple scalar, so it is
returned normally. */
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_self);
@@ -287,4 +328,7 @@ static void after_pthread_self(void *non
pthread_t ret = (pthread_t)retval;
+ if (!check_wrappings())
+ return;
+
pthread_id_mapping(VG_(get_running_tid)(), (Addr)&ret, sizeof(ret));
@@ -323,4 +367,7 @@ static void *before_pthread_mutex_init(v
const pthread_mutexattr_t *attr = va_arg(va, const pthread_mutexattr_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_init);
@@ -334,4 +381,7 @@ static void *before_pthread_mutex_init(v
static void after_pthread_mutex_init(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_init)(VG_(get_running_tid)(), (Addr)nonce);
@@ -344,4 +394,7 @@ static void *before_pthread_mutex_destro
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_destroy);
@@ -353,4 +406,7 @@ static void *before_pthread_mutex_destro
static void after_pthread_mutex_destroy(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
LEAVE(pthread_mutex_destroy, rt, retval);
}
@@ -360,6 +416,10 @@ static void *before_pthread_mutex_lock(v
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_lock);
+ if (debug)
VG_(printf)("%d locking %p\n", VG_(get_running_tid)(), mx);
check_thread_exists(VG_(get_running_tid)());
@@ -372,7 +432,11 @@ static void *before_pthread_mutex_lock(v
static void after_pthread_mutex_lock(void *nonce, enum return_type rt, Word retval)
{
+ if (!check_wrappings())
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_acquire)(VG_(get_running_tid)(), (Addr)nonce);
else {
+ if (debug)
VG_(printf)("after mutex_lock failed: rt=%d ret=%d\n", rt, retval);
VG_(tm_mutex_giveup)(VG_(get_running_tid)(), (Addr)nonce);
@@ -386,6 +450,10 @@ static void *before_pthread_mutex_tryloc
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_trylock);
+ if (debug)
VG_(printf)("%d trylocking %p\n", VG_(get_running_tid)(), mx);
check_thread_exists(VG_(get_running_tid)());
@@ -398,7 +466,11 @@ static void *before_pthread_mutex_tryloc
static void after_pthread_mutex_trylock(void *nonce, enum return_type rt, Word retval)
{
+ if (nonce == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_acquire)(VG_(get_running_tid)(), (Addr)nonce);
else {
+ if (debug)
VG_(printf)("after mutex_trylock failed: rt=%d ret=%d\n", rt, retval);
VG_(tm_mutex_giveup)(VG_(get_running_tid)(), (Addr)nonce);
@@ -412,5 +484,9 @@ static void *before_pthread_mutex_unlock
pthread_mutex_t *mx = va_arg(va, pthread_mutex_t *);
+ if (!check_wrappings())
+ return NULL;
+
ENTER(pthread_mutex_unlock);
+
VG_(tm_mutex_tryunlock)(VG_(get_running_tid)(), (Addr)mx);
@@ -420,4 +496,7 @@ static void *before_pthread_mutex_unlock
static void after_pthread_mutex_unlock(void *nonce, enum return_type rt, Word retval)
{
+ if (nonce == NULL)
+ return;
+
if (rt == RT_RETURN && retval == 0)
VG_(tm_mutex_unlock)(VG_(get_running_tid)(), (Addr)nonce); /* complete unlock */
@@ -432,4 +511,5 @@ static struct pt_wraps {
const Char *name;
FuncWrapper wrapper;
+ const CodeRedirect *redir;
} wraps[] = {
#define WRAP(func, extra) { #func extra, { before_##func, after_##func } }
@@ -448,4 +528,33 @@ static struct pt_wraps {
};
+/* Check to see if all the wrappers are resolved */
+static Bool check_wrappings()
+{
+ Int i;
+ static Bool ok = True;
+ static Bool checked = False;
+
+ if (checked)
+ return ok;
+
+ for(i = 0; i < sizeof(wraps)/sizeof(*wraps); i++) {
+ if (!VG_(is_resolved)(wraps[i].redir)) {
+ VG_(message)(Vg_DebugMsg, "Pthread wrapper for \"%s\" is not resolved",
+ wraps[i].name);
+ ok = False;
+ }
+ }
+
+ if (startfunc_wrapper == 0) {
+ VG_(message)(Vg_DebugMsg, "Pthread wrapper for thread start function is not resolved");
+ ok = False;
+ }
+
+ if (!ok)
+ VG_(message)(Vg_DebugMsg, "Missing intercepts; model disabled");
+
+ checked = True;
+ return ok;
+}
/*
@@ -457,7 +566,7 @@ void VG_(pthread_init)()
for(i = 0; i < sizeof(wraps)/sizeof(*wraps); i++) {
- VG_(printf)("adding pthread wrapper for %s\n", wraps[i].name);
- //VG_(add_wrapper)("soname:libpthread.so.0", wraps[i].name, &wraps[i].wrapper);
- VG_(add_wrapper)("soname:libpthread.so.0", wraps[i].name, &wraps[i].wrapper);
+ //VG_(printf)("adding pthread wrapper for %s\n", wraps[i].name);
+ wraps[i].redir = VG_(add_wrapper)("soname:libpthread.so.0",
+ wraps[i].name, &wraps[i].wrapper);
}
VG_(tm_init)();
--- valgrind/coregrind/vg_intercept.c #1.31:1.32
@@ -75,5 +75,5 @@ void *VG_WRAPPER(pthread_startfunc_wrapp
static pthread_t (*pthread_selfp)(void);
- //VALGRIND_PRINTF("intercepted thread start: real start is %p(%p)\n", func, arg);
+ //VALGRIND_PRINTF("intercepted thread start: real start is %p(%p)", func, arg);
/* Do this rather than a direct call so we don't make an explicit
@@ -85,4 +85,6 @@ void *VG_WRAPPER(pthread_startfunc_wrapp
if (pthread_selfp != NULL)
(*pthread_selfp)(); /* just calling this is enough */
+ else
+ VALGRIND_PRINTF("pthread_self pointer is NULL!");
/* Free the data the before_pthread_create wrapper left for us. */
--- valgrind/coregrind/vg_redir.c #1.7:1.8
@@ -128,4 +128,9 @@ static inline Bool to_resolved(const Cod
}
+Bool VG_(is_resolved)(const CodeRedirect *redir)
+{
+ return from_resolved(redir) && to_resolved(redir);
+}
+
/* Resolve a redir using si if possible, and add it to the resolved
list */
@@ -141,5 +146,5 @@ Bool VG_(resolve_redir)(CodeRedirect *re
return False;
- resolved = from_resolved(redir) && to_resolved(redir);
+ resolved = VG_(is_resolved)(redir);
if (0 && VG_(clo_trace_redir))
@@ -335,5 +340,5 @@ void VG_(add_redirect_addr)(const Char *
}
-void VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
const FuncWrapper *wrapper)
{
@@ -363,4 +368,6 @@ void VG_(add_wrapper)(const Char *from_l
unresolved_redir = redir;
}
+
+ return redir;
}
--- valgrind/coregrind/vg_symtab2.c #1.104:1.105
@@ -409,20 +409,42 @@ static Int compare_RiSym(void *va, void
/* Two symbols have the same address. Which name do we prefer?
- The shortest. Always. Hm, well, prefer the ones with '@' symbol versioning in them.
- If they're the same length, then alphabetical.
+ The general rule is to prefer the shorter symbol name. If the
+ symbol contains a '@', which means its versioned, then the length
+ up to the '@' is used for length comparison purposes (so
+ "foo@GLIBC_2.4.2" is considered shorter than "foobar"), but if two
+ symbols have the same length, the one with the version string is
+ preferred. If all else fails, use alphabetical ordering.
*/
static RiSym *prefersym(RiSym *a, RiSym *b)
{
- Int lena, lenb;
- Bool va = VG_(strchr)(a->name, '@') != NULL;
- Bool vb = VG_(strchr)(b->name, '@') != NULL;
+ Int lena, lenb; /* full length */
+ Int vlena, vlenb; /* length without version */
+ const Char *vpa, *vpb;
- lena = VG_(strlen)(a->name);
- lenb = VG_(strlen)(b->name);
- if (va || lena < lenb)
+ vlena = lena = VG_(strlen)(a->name);
+ vlenb = lenb = VG_(strlen)(b->name);
+
+ vpa = VG_(strchr)(a->name, '@');
+ vpb = VG_(strchr)(b->name, '@');
+
+ if (vpa)
+ vlena = vpa - a->name;
+ if (vpb)
+ vlenb = vpb - b->name;
+
+ /* Select the shortest unversioned name */
+ if (vlena < vlenb)
return a;
- else if (vb || lenb < lena)
+ else if (vlenb < vlena)
+ return b;
+
+ /* Equal lengths; select the versioned name */
+ if (vpa && !vpb)
+ return a;
+ if (vpb && !vpa)
return b;
+ /* Either both versioned or neither is versioned; select them
+ alphabetically */
if (VG_(strcmp)(a->name, b->name) < 0)
return a;
--- valgrind/coregrind/vg_main.c #1.257:1.258
@@ -2669,4 +2669,6 @@ void VG_(shutdown_actions)(ThreadId tid)
VGA_(reap_threads)(tid);
+ VG_(clo_model_pthreads) = False;
+
// Clean the client up before the final report
VGA_(final_tidyup)(tid);
|
|
From: Nicholas N. <nj...@cs...> - 2005-03-03 05:05:04
|
On Wed, 2 Mar 2005, Nicholas Nethercote wrote: >>> Can someone remind me why we use mangled C++ names in suppressions? I >>> just tried switching to using demangled names and it worked fine, and >>> the suppressions looked much better. >> >> Do we have a mangler in there? Or does it run the demangler and then >> compare against the un-mangled name? >> >> Uh, maybe we've just been dense; I can't see why this wouldn't work. > > It's a two line change in vg_errcontext.c -- change the two calls to > VG_(get_fnname_nodemangle)() to VG_(get_fnname)(). To answer your question, we would run the demangler more often than now. But I don't think it's a problem. N |
|
From: Jeremy F. <je...@go...> - 2005-03-03 04:18:25
|
CVS commit by fitzhardinge: Missing leakotron stderr exp file. A leakotron.stderr.exp 1.1 |
|
From: Tom H. <th...@cy...> - 2005-03-03 04:04:17
|
Nightly build on alvis ( Red Hat 7.3 ) started at 2005-03-03 03:05:03 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 *** leak-0 failed (stderr) *** leak-cycle: valgrind --leak-resolution=high ./leak-cycle *** leak-cycle failed (stderr) *** leak-regroot: valgrind ./leak-regroot *** leak-regroot failed (stderr) *** leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) *** leak-tree failed (stderr) *** make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2005-03-03 04:03:22
|
Nightly build on ginetta ( Red Hat 8.0 ) started at 2005-03-03 03:10:02 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow buflen_check: valgrind -q ./buflen_check clientperm: valgrind -q ./clientperm custom_alloc: valgrind -q ./custom_alloc describe-block: valgrind ./describe-block doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 leak-cycle: valgrind --leak-resolution=high ./leak-cycle leak-regroot: valgrind ./leak-regroot leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) make: *** [regtest] Error 25 |
|
From: Tom H. <th...@cy...> - 2005-03-03 04:00:35
|
Nightly build on audi ( Red Hat 9 ) started at 2005-03-03 03:15:02 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow buflen_check: valgrind -q ./buflen_check clientperm: valgrind -q ./clientperm custom_alloc: valgrind -q ./custom_alloc describe-block: valgrind ./describe-block doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 leak-cycle: valgrind --leak-resolution=high ./leak-cycle leak-regroot: valgrind ./leak-regroot leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) make: *** [regtest] Error 25 |
|
From: Julian S. <js...@ac...> - 2005-03-03 03:57:34
|
Very useful summary. > * 69511 Valgrind can call wrong function As Jeremy says, this is probably fixable by adding CRC checks for selected translations obtained from near the stack pointer or from other known-volatile code areas. > * 69530 we need to implement precise exception handling > * 69531 Some tools need a mechanism to save machine state before ... Vex can (at a price) provide precise mem exceptions, but not any kind of FP exception support. My question is, is there any sizeable user group writing programs that actually need precise exceptions? > * 81361 Can't distinguish large stack allocations from stack-swit... Do we care about this? Is writing-your-own-thread-package regarded as a sensible thing to do? In any case there's not much we can do about this without the client telling us when stack switches are happening. > * 82301 FV memory layout too rigid Will be fixed in the 3.0 series. > * 92071 Reading debugging info uses too much memory Hmm. Dunno. Possibly is time for a cleanup of it. It's a good candidate as a cleanly-defined subsystem which we can extract from the coregrind/ swamp. > * 93818 couldn't allocate address space for shadow memory > * 98278 Infinite recursion possible when allocating memory Also will be fixed in 3.0 -- both sound like low-level mem management problems. > - The debug info getting lost is a problem for the leak checker. I think > the right way to fix this is to record code locations as either source > locations (eg. file/fn/line) if possible, or as object code locations > (eg. file/offset). Recording them as locations in memory is no good, since > they can change over time. But I recall some argument about this in the > past. Yes .. recording them as file/fn/line locations might work, but it seems expensive in that basically every stack snapshot has to be converted right away into file/fn/line info. And a lot of such snapshots get made (once per malloc for example). Also, the error-commoning mechanism works by comparing stack snapshots, and that can really get hammered. There just doesn't seem to be any easy solution. Perhaps the best one is the idiot-solution which is essentially to ignore requests to munmap executable areas so their symbol tables never go away. Of course that has its own dangers. > - Massif comes up again. I think people tend to project their wishes for a > memory-measurement tool onto it, even when what they want isn't very > close to what Massif currently does. I guess it might help to find some Real Live Massif Users and see what they think. I agree that a lot of wishlist stuff for Massif seems to derive from armchair users of it. J |
|
From: <js...@ac...> - 2005-03-03 03:55:51
|
Nightly build on phoenix ( SuSE 9.1 ) started at 2005-03-03 03:50:00 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow buflen_check: valgrind -q ./buflen_check clientperm: valgrind -q ./clientperm custom_alloc: valgrind -q ./custom_alloc describe-block: valgrind ./describe-block doublefree: valgrind -q ./doublefree error_counts: valgrind --log-fd=-1 ./error_counts errs1: valgrind -q ./errs1 execve: valgrind -q ./execve execve2: valgrind -q --trace-children=yes ./execve2 exitprog: valgrind -q ./exitprog fprw: valgrind -q ./fprw fwrite: valgrind -q ./fwrite inits: valgrind -q ./inits inline: valgrind -q ./inline leak-0: valgrind ./leak-0 leak-cycle: valgrind --leak-resolution=high ./leak-cycle leak-regroot: valgrind ./leak-regroot leak-tree: valgrind --leak-resolution=high ./leak-tree vg_regtest: `./../../tests/filter_sink' not executable (.) make: *** [regtest] Error 9 |