You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
1
(22) |
2
(19) |
3
(8) |
4
(34) |
5
(14) |
6
(14) |
|
7
(12) |
8
(15) |
9
(15) |
10
(10) |
11
(10) |
12
(28) |
13
(11) |
|
14
(22) |
15
(29) |
16
(20) |
17
(15) |
18
(39) |
19
(11) |
20
(12) |
|
21
(8) |
22
(9) |
23
(8) |
24
(10) |
25
(9) |
26
(7) |
27
(7) |
|
28
(6) |
29
(6) |
30
(11) |
|
|
|
|
|
From: Nicholas N. <nj...@ca...> - 2004-11-02 12:36:18
|
CVS commit by nethercote:
64-bit cleanness: Converted malloc() et al to use SizeT rather than Int.
This required some tricks with casting to maintain Memcheck's silly (ie.
negative) arg checking. The allocator was also changed accordingly. It
should now be able to allocate more than 4GB blocks on 64-bit platforms.
M +8 -8 coregrind/core.h 1.47
M +6 -6 coregrind/toolfuncs.def 1.5
M +1 -1 coregrind/vg_default.c 1.25
M +135 -131 coregrind/vg_malloc2.c 1.34
M +8 -8 helgrind/hg_main.c 1.86
M +3 -0 include/basic_types.h 1.2
M +8 -8 include/tool.h.base 1.10
M +10 -10 massif/ms_main.c 1.17
M +43 -31 memcheck/mac_malloc_wrappers.c 1.15
M +2 -2 memcheck/mac_shared.h 1.24
--- valgrind/coregrind/core.h #1.46:1.47
@@ -443,14 +443,14 @@ typedef Int ArenaId;
#define VG_SLOPPY_MALLOC_SZB 4
-extern void* VG_(arena_malloc) ( ArenaId arena, Int nbytes );
+extern void* VG_(arena_malloc) ( ArenaId arena, SizeT nbytes );
extern void VG_(arena_free) ( ArenaId arena, void* ptr );
-extern void* VG_(arena_calloc) ( ArenaId arena, Int alignment,
- Int nmemb, Int nbytes );
-extern void* VG_(arena_realloc) ( ArenaId arena, void* ptr, Int alignment,
- Int size );
-extern void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB,
- Int req_pszB );
+extern void* VG_(arena_calloc) ( ArenaId arena, SizeT alignment,
+ SizeT nmemb, SizeT nbytes );
+extern void* VG_(arena_realloc) ( ArenaId arena, void* ptr, SizeT alignment,
+ SizeT size );
+extern void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB,
+ SizeT req_pszB );
-extern Int VG_(arena_payload_szB) ( ArenaId aid, void* payload );
+extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
extern void VG_(sanity_check_malloc_all) ( void );
--- valgrind/coregrind/toolfuncs.def #1.4:1.5
@@ -291,11 +291,11 @@
## malloc and friends
:malloc
-void*, malloc, Int n
-void*, __builtin_new, Int n
-void*, __builtin_vec_new, Int n
-void*, memalign, Int align, Int n
-void*, calloc, Int nmemb, Int n
+void*, malloc, SizeT n
+void*, __builtin_new, SizeT n
+void*, __builtin_vec_new, SizeT n
+void*, memalign, SizeT align, SizeT n
+void*, calloc, SizeT nmemb, SizeT n
void, free, void* p
void, __builtin_delete, void* p
void, __builtin_vec_delete, void* p
-void*, realloc, void* p, Int size
+void*, realloc, void* p, SizeT size
--- valgrind/coregrind/vg_default.c #1.24:1.25
@@ -80,5 +80,5 @@ Bool VG_(sk_malloc_called_by_scheduler)
SK_(free)(). */
__attribute__ ((weak))
-void* SK_(malloc)( Int size )
+void* SK_(malloc)( SizeT size )
{
if (VG_(sk_malloc_called_by_scheduler))
--- valgrind/coregrind/vg_malloc2.c #1.33:1.34
@@ -42,7 +42,6 @@
#define VG_N_MALLOC_LISTS 16 // do not change this
-// On 64-bit systems size_t is 64-bits, so bigger than this is possible.
-// We can worry about that when it happens...
-#define MAX_PSZB 0x7ffffff0
+// The amount you can ask for is limited only by sizeof(SizeT)...
+#define MAX_PSZB (~((SizeT)0x0))
typedef UChar UByte;
@@ -50,5 +49,5 @@ typedef UChar UByte;
/* Block layout:
- this block total szB (sizeof(Int) bytes)
+ this block total szB (sizeof(SizeT) bytes)
freelist previous ptr (sizeof(void*) bytes)
red zone bytes (depends on .rz_szB field of Arena)
@@ -56,15 +55,16 @@ typedef UChar UByte;
red zone bytes (depends on .rz_szB field of Arena)
freelist next ptr (sizeof(void*) bytes)
- this block total szB (sizeof(Int) bytes)
+ this block total szB (sizeof(SizeT) bytes)
Total size in bytes (bszB) and payload size in bytes (pszB)
are related by:
- bszB == pszB + 2*sizeof(Int) + 2*sizeof(void*) + 2*a->rz_szB
+ bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
- Furthermore, both size fields in the block are negative if it is
- not in use, and positive if it is in use. A block size of zero
- is not possible, because a block always has at least two Ints and two
- pointers of overhead.
+ Furthermore, both size fields in the block have their least-sifnificant
+ bit set if the block is not in use, and unset if it is in use.
+ (The bottom 3 or so bits are always free for this because of alignment.)
+ A block size of zero is not possible, because a block always has at
+ least two SizeTs and two pointers of overhead.
Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
@@ -101,7 +101,8 @@ typedef
struct _Superblock {
struct _Superblock* next;
- Int n_payload_bytes;
+ SizeT n_payload_bytes;
UByte padding[ VG_MIN_MALLOC_SZB -
- ((sizeof(void*) + sizeof(Int)) % VG_MIN_MALLOC_SZB) ];
+ ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
+ VG_MIN_MALLOC_SZB) ];
UByte payload_bytes[0];
}
@@ -114,12 +115,12 @@ typedef
Char* name;
Bool clientmem; // Allocates in the client address space?
- Int rz_szB; // Red zone size in bytes
- Int min_sblock_szB; // Minimum superblock size in bytes
+ UInt rz_szB; // Red zone size in bytes
+ SizeT min_sblock_szB; // Minimum superblock size in bytes
Block* freelist[VG_N_MALLOC_LISTS];
Superblock* sblocks;
// Stats only.
- UInt bytes_on_loan;
- UInt bytes_mmaped;
- UInt bytes_on_loan_max;
+ SizeT bytes_on_loan;
+ SizeT bytes_mmaped;
+ SizeT bytes_on_loan_max;
}
Arena;
@@ -130,16 +131,18 @@ typedef
/*------------------------------------------------------------*/
+#define SIZE_T_0x1 ((SizeT)0x1)
+
// Mark a bszB as in-use, and not in-use.
static __inline__
-Int mk_inuse_bszB ( Int bszB )
+SizeT mk_inuse_bszB ( SizeT bszB )
{
vg_assert(bszB != 0);
- return (bszB < 0) ? -bszB : bszB;
+ return bszB & (~SIZE_T_0x1);
}
static __inline__
-Int mk_free_bszB ( Int bszB )
+SizeT mk_free_bszB ( SizeT bszB )
{
vg_assert(bszB != 0);
- return (bszB < 0) ? bszB : -bszB;
+ return bszB | SIZE_T_0x1;
}
@@ -147,16 +150,16 @@ Int mk_free_bszB ( Int bszB )
// the size.
static __inline__
-Int mk_plain_bszB ( Int bszB )
+SizeT mk_plain_bszB ( SizeT bszB )
{
vg_assert(bszB != 0);
- return (bszB < 0) ? -bszB : bszB;
+ return bszB & (~SIZE_T_0x1);
}
// Does this bszB have the in-use attribute?
static __inline__
-Bool is_inuse_bszB ( Int bszB )
+Bool is_inuse_bszB ( SizeT bszB )
{
vg_assert(bszB != 0);
- return (bszB < 0) ? False : True;
+ return (0 != (bszB & SIZE_T_0x1)) ? False : True;
}
@@ -164,12 +167,12 @@ Bool is_inuse_bszB ( Int bszB )
// Set and get the lower size field of a block.
static __inline__
-void set_bszB_lo ( Block* b, Int bszB )
+void set_bszB_lo ( Block* b, SizeT bszB )
{
- *(Int*)&b[0] = bszB;
+ *(SizeT*)&b[0] = bszB;
}
static __inline__
-Int get_bszB_lo ( Block* b )
+SizeT get_bszB_lo ( Block* b )
{
- return *(Int*)&b[0];
+ return *(SizeT*)&b[0];
}
@@ -184,19 +187,37 @@ UByte* last_byte ( Block* b )
// Set and get the upper size field of a block.
static __inline__
-void set_bszB_hi ( Block* b, Int bszB )
+void set_bszB_hi ( Block* b, SizeT bszB )
{
UByte* b2 = (UByte*)b;
UByte* lb = last_byte(b);
vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
- *(Int*)&lb[-sizeof(Int) + 1] = bszB;
+ *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
}
static __inline__
-Int get_bszB_hi ( Block* b )
+SizeT get_bszB_hi ( Block* b )
{
UByte* lb = last_byte(b);
- return *(Int*)&lb[-sizeof(Int) + 1];
+ return *(SizeT*)&lb[-sizeof(SizeT) + 1];
}
+// Return the lower, upper and total overhead in bytes for a block.
+// These are determined purely by which arena the block lives in.
+static __inline__
+UInt overhead_szB_lo ( Arena* a )
+{
+ return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
+}
+static __inline__
+UInt overhead_szB_hi ( Arena* a )
+{
+ return a->rz_szB + sizeof(void*) + sizeof(SizeT);
+}
+static __inline__
+UInt overhead_szB ( Arena* a )
+{
+ return overhead_szB_lo(a) + overhead_szB_hi(a);
+}
+
// Given the addr of a block, return the addr of its payload.
static __inline__
@@ -204,5 +225,5 @@ UByte* get_block_payload ( Arena* a, Blo
{
UByte* b2 = (UByte*)b;
- return & b2[sizeof(Int) + sizeof(void*) + a->rz_szB];
+ return & b2[ overhead_szB_lo(a) ];
}
// Given the addr of a block's payload, return the addr of the block itself.
@@ -210,5 +231,5 @@ static __inline__
Block* get_payload_block ( Arena* a, UByte* payload )
{
- return (Block*)&payload[-sizeof(Int) - sizeof(void*) - a->rz_szB];
+ return (Block*)&payload[ -overhead_szB_lo(a) ];
}
@@ -219,5 +240,5 @@ void set_prev_b ( Block* b, Block* prev_
{
UByte* b2 = (UByte*)b;
- *(Block**)&b2[sizeof(Int)] = prev_p;
+ *(Block**)&b2[sizeof(SizeT)] = prev_p;
}
static __inline__
@@ -225,5 +246,5 @@ void set_next_b ( Block* b, Block* next_
{
UByte* lb = last_byte(b);
- *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1] = next_p;
+ *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
}
static __inline__
@@ -231,5 +252,5 @@ Block* get_prev_b ( Block* b )
{
UByte* b2 = (UByte*)b;
- return *(Block**)&b2[sizeof(Int)];
+ return *(Block**)&b2[sizeof(SizeT)];
}
static __inline__
@@ -237,5 +258,5 @@ Block* get_next_b ( Block* b )
{
UByte* lb = last_byte(b);
- return *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1];
+ return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
}
@@ -246,5 +267,5 @@ Block* get_predecessor_block ( Block* b
{
UByte* b2 = (UByte*)b;
- Int bszB = mk_plain_bszB( (*(Int*)&b2[-sizeof(Int)]) );
+ SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
return (Block*)&b2[-bszB];
}
@@ -252,51 +273,33 @@ Block* get_predecessor_block ( Block* b
// Read and write the lower and upper red-zone bytes of a block.
static __inline__
-void set_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
+void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
{
UByte* b2 = (UByte*)b;
- b2[sizeof(Int) + sizeof(void*) + rz_byteno] = v;
+ b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
}
static __inline__
-void set_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
+void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
{
UByte* lb = last_byte(b);
- lb[-sizeof(Int) - sizeof(void*) - rz_byteno] = v;
+ lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
}
static __inline__
-UByte get_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno )
+UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
{
UByte* b2 = (UByte*)b;
- return b2[sizeof(Int) + sizeof(void*) + rz_byteno];
+ return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
}
static __inline__
-UByte get_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno )
+UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
{
UByte* lb = last_byte(b);
- return lb[-sizeof(Int) - sizeof(void*) - rz_byteno];
+ return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
}
-/* Return the lower, upper and total overhead in bytes for a block.
- These are determined purely by which arena the block lives in. */
-static __inline__
-Int overhead_szB_lo ( Arena* a )
-{
- return sizeof(Int) + sizeof(void*) + a->rz_szB;
-}
-static __inline__
-Int overhead_szB_hi ( Arena* a )
-{
- return sizeof(void*) + sizeof(Int) + a->rz_szB;
-}
-static __inline__
-Int overhead_szB ( Arena* a )
-{
- return overhead_szB_lo(a) + overhead_szB_hi(a);
-}
-
// Return the minimum bszB for a block in this arena. Can have zero-length
// payloads, so it's the size of the admin bytes.
static __inline__
-Int min_useful_bszB ( Arena* a )
+UInt min_useful_bszB ( Arena* a )
{
return overhead_szB(a);
@@ -305,15 +308,13 @@ Int min_useful_bszB ( Arena* a )
// Convert payload size <--> block size (both in bytes).
static __inline__
-Int pszB_to_bszB ( Arena* a, Int pszB )
+SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
{
- vg_assert(pszB >= 0);
return pszB + overhead_szB(a);
}
static __inline__
-Int bszB_to_pszB ( Arena* a, Int bszB )
+SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
{
- Int pszB = bszB - overhead_szB(a);
- vg_assert(pszB >= 0);
- return pszB;
+ vg_assert(bszB >= overhead_szB(a));
+ return bszB - overhead_szB(a);
}
@@ -339,10 +340,10 @@ static Arena* arenaId_to_ArenaP ( ArenaI
// made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed.
static
-void arena_init ( ArenaId aid, Char* name, Int rz_szB, Int min_sblock_szB )
+void arena_init ( ArenaId aid, Char* name, UInt rz_szB, SizeT min_sblock_szB )
{
- Int i;
+ SizeT i;
Arena* a = arenaId_to_ArenaP(aid);
- vg_assert(rz_szB >= 0);
+ vg_assert(rz_szB < 128); // ensure reasonable size
vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
a->name = name;
@@ -367,5 +368,5 @@ void arena_init ( ArenaId aid, Char* nam
void VG_(print_all_arena_stats) ( void )
{
- Int i;
+ UInt i;
for (i = 0; i < VG_N_ARENAS; i++) {
Arena* a = arenaId_to_ArenaP(i);
@@ -384,5 +385,5 @@ static
void ensure_mm_init ( void )
{
- static Int client_rz_szB;
+ static UInt client_rz_szB;
static Bool init_done = False;
@@ -397,5 +398,4 @@ void ensure_mm_init ( void )
/* No particular reason for this figure, it's just smallish */
sk_assert(VG_(vg_malloc_redzone_szB) < 128);
- sk_assert(VG_(vg_malloc_redzone_szB) >= 0);
client_rz_szB = VG_(vg_malloc_redzone_szB);
@@ -435,5 +435,5 @@ void ensure_mm_init ( void )
// Align ptr p upwards to an align-sized boundary.
static
-void* align_upwards ( void* p, Int align )
+void* align_upwards ( void* p, SizeT align )
{
Addr a = (Addr)p;
@@ -445,5 +445,5 @@ void* align_upwards ( void* p, Int align
// or returns 0 (for client memory).
static
-Superblock* newSuperblock ( Arena* a, Int cszB )
+Superblock* newSuperblock ( Arena* a, SizeT cszB )
{
// The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
@@ -520,5 +520,5 @@ Bool VG_(clo_trace_malloc) = False;
/* Minimum alignment in functions that don't specify alignment explicitly.
default: 0, i.e. use VG_MIN_MALLOC_SZB. */
-Int VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
+UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
@@ -526,5 +526,5 @@ Bool VG_(replacement_malloc_process_cmd_
{
if (VG_CLO_STREQN(12, arg, "--alignment=")) {
- VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
+ VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
@@ -573,7 +573,6 @@ void VG_(replacement_malloc_print_debug_
// Convert a payload size in bytes to a freelist number.
static
-Int pszB_to_listNo ( Int pszB )
+UInt pszB_to_listNo ( SizeT pszB )
{
- vg_assert(pszB >= 0);
vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
pszB /= VG_MIN_MALLOC_SZB;
@@ -598,8 +597,8 @@ Int pszB_to_listNo ( Int pszB )
// What is the minimum payload size for a given list?
static
-Int listNo_to_pszB_min ( Int listNo )
+SizeT listNo_to_pszB_min ( UInt listNo )
{
- Int pszB = 0;
- vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
+ SizeT pszB = 0;
+ vg_assert(listNo <= VG_N_MALLOC_LISTS);
while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
return pszB;
@@ -608,7 +607,7 @@ Int listNo_to_pszB_min ( Int listNo )
// What is the maximum payload size for a given list?
static
-Int listNo_to_pszB_max ( Int listNo )
+SizeT listNo_to_pszB_max ( UInt listNo )
{
- vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
+ vg_assert(listNo <= VG_N_MALLOC_LISTS);
if (listNo == VG_N_MALLOC_LISTS-1) {
return MAX_PSZB;
@@ -624,10 +623,10 @@ Int listNo_to_pszB_max ( Int listNo )
blocks rather than cruise through the address space. */
static
-void swizzle ( Arena* a, Int lno )
+void swizzle ( Arena* a, UInt lno )
{
Block* p_best;
Block* pp;
Block* pn;
- Int i;
+ UInt i;
p_best = a->freelist[lno];
@@ -657,10 +656,10 @@ void swizzle ( Arena* a, Int lno )
#define VG_REDZONE_HI_MASK 0x7c
-// Do some crude sanity checks on a chunk.
+// Do some crude sanity checks on a Block.
static
Bool blockSane ( Arena* a, Block* b )
{
# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
- Int i;
+ UInt i;
if (get_bszB_lo(b) != get_bszB_hi(b))
{BLEAT("sizes");return False;}
@@ -683,5 +682,6 @@ static
void ppSuperblocks ( Arena* a )
{
- Int i, b_bszB, blockno;
+ UInt i, blockno;
+ SizeT b_bszB;
Block* b;
Superblock* sb = a->sblocks;
@@ -708,11 +708,12 @@ void ppSuperblocks ( Arena* a )
static void sanity_check_malloc_arena ( ArenaId aid )
{
- Int i, superblockctr, b_bszB, b_pszB, blockctr_sb, blockctr_li;
- Int blockctr_sb_free, listno, list_min_pszB, list_max_pszB;
+ UInt i, superblockctr, blockctr_sb, blockctr_li;
+ UInt blockctr_sb_free, listno;
+ SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
Superblock* sb;
Bool thisFree, lastWasFree;
Block* b;
Block* b_prev;
- UInt arena_bytes_on_loan;
+ SizeT arena_bytes_on_loan;
Arena* a;
@@ -822,5 +823,5 @@ static void sanity_check_malloc_arena (
void VG_(sanity_check_malloc_all) ( void )
{
- Int i;
+ UInt i;
for (i = 0; i < VG_N_ARENAS; i++)
sanity_check_malloc_arena ( i );
@@ -836,5 +837,5 @@ Bool VG_(is_empty_arena) ( ArenaId aid )
Superblock* sb;
Block* b;
- Int b_bszB;
+ SizeT b_bszB;
ensure_mm_init();
@@ -862,8 +863,7 @@ Bool VG_(is_empty_arena) ( ArenaId aid )
static
-void mkFreeBlock ( Arena* a, Block* b, Int bszB, Int b_lno )
+void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
{
- Int pszB = bszB_to_pszB(a, bszB);
- vg_assert(pszB >= 0);
+ SizeT pszB = bszB_to_pszB(a, bszB);
vg_assert(b_lno == pszB_to_listNo(pszB));
// Set the size fields and indicate not-in-use.
@@ -892,7 +892,7 @@ void mkFreeBlock ( Arena* a, Block* b, I
// appropriately.
static
-void mkInuseBlock ( Arena* a, Block* b, UInt bszB )
+void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
{
- Int i;
+ UInt i;
vg_assert(bszB >= min_useful_bszB(a));
set_bszB_lo(b, mk_inuse_bszB(bszB));
@@ -913,7 +913,7 @@ void mkInuseBlock ( Arena* a, Block* b,
// Remove a block from a given list. Does no sanity checking.
static
-void unlinkBlock ( Arena* a, Block* b, Int listno )
+void unlinkBlock ( Arena* a, Block* b, UInt listno )
{
- vg_assert(listno >= 0 && listno < VG_N_MALLOC_LISTS);
+ vg_assert(listno < VG_N_MALLOC_LISTS);
if (get_prev_b(b) == b) {
// Only one element in the list; treat it specially.
@@ -939,13 +939,14 @@ void unlinkBlock ( Arena* a, Block* b, I
// Align the request size.
static __inline__
-Int align_req_pszB ( Int req_pszB )
+SizeT align_req_pszB ( SizeT req_pszB )
{
- Int n = VG_MIN_MALLOC_SZB-1;
+ SizeT n = VG_MIN_MALLOC_SZB-1;
return ((req_pszB + n) & (~n));
}
-void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
+void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
{
- Int req_bszB, frag_bszB, b_bszB, lno;
+ SizeT req_bszB, frag_bszB, b_bszB;
+ UInt lno;
Superblock* new_sb;
Block* b = NULL;
@@ -958,5 +959,5 @@ void* VG_(arena_malloc) ( ArenaId aid, I
a = arenaId_to_ArenaP(aid);
- vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
+ vg_assert(req_pszB < MAX_PSZB);
req_pszB = align_req_pszB(req_pszB);
req_bszB = pszB_to_bszB(a, req_pszB);
@@ -993,5 +994,5 @@ void* VG_(arena_malloc) ( ArenaId aid, I
// Ok, we can allocate from b, which lives in list lno.
vg_assert(b != NULL);
- vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS);
+ vg_assert(lno < VG_N_MALLOC_LISTS);
vg_assert(a->freelist[lno] != NULL);
b_bszB = mk_plain_bszB(get_bszB_lo(b));
@@ -1040,5 +1041,6 @@ void VG_(arena_free) ( ArenaId aid, void
Block* other;
Block* b;
- Int b_bszB, b_pszB, other_bszB, b_listno;
+ SizeT b_bszB, b_pszB, other_bszB;
+ UInt b_listno;
Arena* a;
@@ -1159,10 +1161,10 @@ void VG_(arena_free) ( ArenaId aid, void
*/
-void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
+void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
{
- Int base_pszB_req, base_pszB_act, frag_bszB;
+ SizeT base_pszB_req, base_pszB_act, frag_bszB;
Block *base_b, *align_b;
UByte *base_p, *align_p;
- UInt saved_bytes_on_loan;
+ SizeT saved_bytes_on_loan;
Arena* a;
@@ -1172,5 +1174,5 @@ void* VG_(arena_malloc_aligned) ( ArenaI
a = arenaId_to_ArenaP(aid);
- vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
+ vg_assert(req_pszB < MAX_PSZB);
// Check that the requested alignment seems reasonable; that is, is
@@ -1253,5 +1255,5 @@ void* VG_(arena_malloc_aligned) ( ArenaI
-Int VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
+SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
{
Arena* a = arenaId_to_ArenaP(aid);
@@ -1265,7 +1267,8 @@ Int VG_(arena_payload_szB) ( ArenaId aid
/*------------------------------------------------------------*/
-void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes )
+void* VG_(arena_calloc) ( ArenaId aid, SizeT alignB, SizeT nmemb, SizeT nbytes )
{
- Int i, size;
+ UInt i;
+ SizeT size;
UChar* p;
@@ -1273,5 +1276,5 @@ void* VG_(arena_calloc) ( ArenaId aid, I
size = nmemb * nbytes;
- vg_assert(size >= 0);
+ vg_assert(size >= nmemb && size >= nbytes); // check against overflow
if (alignB == VG_MIN_MALLOC_SZB)
@@ -1289,8 +1292,9 @@ void* VG_(arena_calloc) ( ArenaId aid, I
void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
- Int req_alignB, Int req_pszB )
+ SizeT req_alignB, SizeT req_pszB )
{
Arena* a;
- Int old_bszB, old_pszB, i;
+ SizeT old_bszB, old_pszB;
+ UInt i;
UChar *p_old, *p_new;
Block* b;
@@ -1301,5 +1305,5 @@ void* VG_(arena_realloc) ( ArenaId aid,
a = arenaId_to_ArenaP(aid);
- vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
+ vg_assert(req_pszB < MAX_PSZB);
b = get_payload_block(a, ptr);
@@ -1339,5 +1343,5 @@ void* VG_(arena_realloc) ( ArenaId aid,
// All just wrappers to avoid exposing arenas to tools.
-void* VG_(malloc) ( Int nbytes )
+void* VG_(malloc) ( SizeT nbytes )
{
return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
@@ -1349,15 +1353,15 @@ void VG_(free) ( void* ptr )
}
-void* VG_(calloc) ( Int nmemb, Int nbytes )
+void* VG_(calloc) ( SizeT nmemb, SizeT nbytes )
{
return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb, nbytes );
}
-void* VG_(realloc) ( void* ptr, Int size )
+void* VG_(realloc) ( void* ptr, SizeT size )
{
return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size );
}
-void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB )
+void* VG_(malloc_aligned) ( SizeT req_alignB, SizeT req_pszB )
{
return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
@@ -1365,5 +1369,5 @@ void* VG_(malloc_aligned) ( Int req_alig
-void* VG_(cli_malloc) ( UInt align, Int nbytes )
+void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
{
// 'align' should be valid by now. VG_(arena_malloc_aligned)() will
@@ -1381,5 +1385,5 @@ void VG_(cli_free) ( void* p )
-Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size )
+Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
{
return (start - VG_(vg_malloc_redzone_szB) <= a
--- valgrind/helgrind/hg_main.c #1.85:1.86
@@ -1811,5 +1811,5 @@ UInt VG_(vg_malloc_redzone_szB) = 8;
protections correctly. */
-static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
+static void add_HG_Chunk ( ThreadId tid, Addr p, SizeT size )
{
HG_Chunk* hc;
@@ -1826,5 +1826,5 @@ static void add_HG_Chunk ( ThreadId tid,
/* Allocate memory and note change in memory available */
static __inline__
-void* alloc_and_new_mem ( Int size, UInt alignment, Bool is_zeroed )
+void* alloc_and_new_mem ( SizeT size, SizeT alignment, Bool is_zeroed )
{
Addr p;
@@ -1843,25 +1843,25 @@ void* alloc_and_new_mem ( Int size, UInt
}
-void* SK_(malloc) ( Int n )
+void* SK_(malloc) ( SizeT n )
{
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(__builtin_new) ( Int n )
+void* SK_(__builtin_new) ( SizeT n )
{
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(__builtin_vec_new) ( Int n )
+void* SK_(__builtin_vec_new) ( SizeT n )
{
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(memalign) ( Int align, Int n )
+void* SK_(memalign) ( SizeT align, SizeT n )
{
return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
}
-void* SK_(calloc) ( Int nmemb, Int size )
+void* SK_(calloc) ( SizeT nmemb, SizeT size )
{
return alloc_and_new_mem ( nmemb*size, VG_(clo_alignment),
@@ -1945,5 +1945,5 @@ void SK_(__builtin_vec_delete) ( void* p
}
-void* SK_(realloc) ( void* p, Int new_size )
+void* SK_(realloc) ( void* p, SizeT new_size )
{
HG_Chunk *hc;
--- valgrind/include/basic_types.h #1.1:1.2
@@ -55,4 +55,7 @@ typedef signed long long Long;
typedef UWord Addr; // 32 64
+typedef UWord SizeT; // 32 64
+typedef Word SSizeT; // 32 64
+
typedef UChar Bool; // 8 8
#define False ((Bool)0)
--- valgrind/include/tool.h.base #1.9:1.10
@@ -352,9 +352,9 @@
/* stdlib.h */
-extern void* VG_(malloc) ( Int nbytes );
+extern void* VG_(malloc) ( SizeT nbytes );
extern void VG_(free) ( void* p );
-extern void* VG_(calloc) ( Int n, Int nbytes );
-extern void* VG_(realloc) ( void* p, Int size );
-extern void* VG_(malloc_aligned) ( Int align_bytes, Int nbytes );
+extern void* VG_(calloc) ( SizeT n, SizeT nbytes );
+extern void* VG_(realloc) ( void* p, SizeT size );
+extern void* VG_(malloc_aligned) ( SizeT align_bytes, SizeT nbytes );
extern void VG_(print_malloc_stats) ( void );
@@ -1736,5 +1736,5 @@
be overridden by tool -- but must be done so *statically*, eg:
- Int VG_(vg_malloc_redzone_szB) = 4;
+ UInt VG_(vg_malloc_redzone_szB) = 4;
It can't be done from a function like SK_(pre_clo_init)(). So it can't,
@@ -1743,9 +1743,9 @@
/* Can be called from SK_(malloc) et al to do the actual alloc/freeing. */
-extern void* VG_(cli_malloc) ( UInt align, Int nbytes );
+extern void* VG_(cli_malloc) ( SizeT align, SizeT nbytes );
extern void VG_(cli_free) ( void* p );
/* Check if an address is within a range, allowing for redzones at edges */
-extern Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size );
+extern Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size );
/* ------------------------------------------------------------------ */
@@ -1760,5 +1760,5 @@
/* Minimum alignment in functions that don't specify alignment explicitly.
default: 0, i.e. use default of the machine (== 4) */
-extern Int VG_(clo_alignment);
+extern UInt VG_(clo_alignment);
extern Bool VG_(replacement_malloc_process_cmd_line_option) ( Char* arg );
--- valgrind/massif/ms_main.c #1.16:1.17
@@ -179,5 +179,5 @@ typedef
struct _HP_Chunk* next;
Addr data; // Ptr to actual block
- UInt size; // Size requested
+ SizeT size; // Size requested
XPt* where; // Where allocated; bottom-XPt
}
@@ -345,5 +345,5 @@ static XPt* alloc_xpt;
// Cheap allocation for blocks that never need to be freed. Saves about 10%
// for Konqueror startup with --depth=40.
-static void* perm_malloc(UInt n_bytes)
+static void* perm_malloc(SizeT n_bytes)
{
static Addr hp = 0; // current heap pointer
@@ -665,5 +665,5 @@ static void hp_census(void);
static
-void* new_block ( void* p, Int size, UInt align, Bool is_zeroed )
+void* new_block ( void* p, SizeT size, SizeT align, Bool is_zeroed )
{
HP_Chunk* hc;
@@ -739,25 +739,25 @@ void die_block ( void* p, Bool custom_fr
-void* SK_(malloc) ( Int n )
+void* SK_(malloc) ( SizeT n )
{
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(__builtin_new) ( Int n )
+void* SK_(__builtin_new) ( SizeT n )
{
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(__builtin_vec_new) ( Int n )
+void* SK_(__builtin_vec_new) ( SizeT n )
{
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
}
-void* SK_(calloc) ( Int m, Int size )
+void* SK_(calloc) ( SizeT m, SizeT size )
{
return new_block( NULL, m*size, VG_(clo_alignment), /*is_zeroed*/True );
}
-void *SK_(memalign)( Int align, Int n )
+void *SK_(memalign)( SizeT align, SizeT n )
{
return new_block( NULL, n, align, False );
@@ -779,5 +779,5 @@ void SK_(__builtin_vec_delete) ( void* p
}
-void* SK_(realloc) ( void* p_old, Int new_size )
+void* SK_(realloc) ( void* p_old, SizeT new_size )
{
HP_Chunk* hc;
@@ -785,5 +785,5 @@ void* SK_(realloc) ( void* p_old, Int ne
Int i;
void* p_new;
- UInt old_size;
+ SizeT old_size;
XPt *old_where, *new_where;
--- valgrind/memcheck/mac_malloc_wrappers.c #1.14:1.15
@@ -38,7 +38,7 @@
/* Stats ... */
-static UInt cmalloc_n_mallocs = 0;
-static UInt cmalloc_n_frees = 0;
-static UInt cmalloc_bs_mallocd = 0;
+static SizeT cmalloc_n_mallocs = 0;
+static SizeT cmalloc_n_frees = 0;
+static SizeT cmalloc_bs_mallocd = 0;
/* We want a 16B redzone on heap blocks for Addrcheck and Memcheck */
@@ -133,5 +133,5 @@ MAC_Chunk* MAC_(first_matching_freed_MAC
/* Allocate its shadow chunk, put it on the appropriate list. */
static
-void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind, VgHashTable table)
+void add_MAC_Chunk ( Addr p, SizeT size, MAC_AllocKind kind, VgHashTable table)
{
MAC_Chunk* mc;
@@ -159,7 +159,29 @@ void add_MAC_Chunk ( Addr p, UInt size,
/*------------------------------------------------------------*/
+static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
+{
+ // Cast to a signed type to catch any unexpectedly negative args. We're
+ // assuming here that the size asked for is not greater than 2^31 bytes
+ // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
+ if ((SSizeT)sizeB < 0) {
+ VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to %s()", sizeB, fn );
+ return True;
+ }
+ return False;
+}
+
+static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
+{
+ if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
+ VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
+ n, sizeB);
+ return True;
+ }
+ return False;
+}
+
/* Allocate memory and note change in memory available */
__inline__
-void* MAC_(new_block) ( Addr p, UInt size, UInt align, UInt rzB,
+void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
Bool is_zeroed, MAC_AllocKind kind, VgHashTable table)
{
@@ -192,8 +214,7 @@ void* MAC_(new_block) ( Addr p, UInt siz
}
-void* SK_(malloc) ( Int n )
+void* SK_(malloc) ( SizeT n )
{
- if (n < 0) {
- VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
+ if (complain_about_silly_args(n, "malloc")) {
return NULL;
} else {
@@ -204,8 +225,7 @@ void* SK_(malloc) ( Int n )
}
-void* SK_(__builtin_new) ( Int n )
+void* SK_(__builtin_new) ( SizeT n )
{
- if (n < 0) {
- VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
+ if (complain_about_silly_args(n, "__builtin_new")) {
return NULL;
} else {
@@ -216,9 +236,7 @@ void* SK_(__builtin_new) ( Int n )
}
-void* SK_(__builtin_vec_new) ( Int n )
+void* SK_(__builtin_vec_new) ( SizeT n )
{
- if (n < 0) {
- VG_(message)(Vg_UserMsg,
- "Warning: silly arg (%d) to __builtin_vec_new()", n );
+ if (complain_about_silly_args(n, "__builtin_vec_new")) {
return NULL;
} else {
@@ -229,8 +247,7 @@ void* SK_(__builtin_vec_new) ( Int n )
}
-void* SK_(memalign) ( Int align, Int n )
+void* SK_(memalign) ( SizeT align, SizeT n )
{
- if (n < 0) {
- VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
+ if (complain_about_silly_args(n, "memalign")) {
return NULL;
} else {
@@ -241,9 +258,7 @@ void* SK_(memalign) ( Int align, Int n )
}
-void* SK_(calloc) ( Int nmemb, Int size1 )
+void* SK_(calloc) ( SizeT nmemb, SizeT size1 )
{
- if (nmemb < 0 || size1 < 0) {
- VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
- nmemb, size1 );
+ if (complain_about_silly_args2(nmemb, size1)) {
return NULL;
} else {
@@ -256,5 +271,5 @@ void* SK_(calloc) ( Int nmemb, Int size1
static
void die_and_free_mem ( MAC_Chunk* mc,
- MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
+ MAC_Chunk** prev_chunks_next_ptr, SizeT rzB )
{
/* Note: ban redzones again -- just in case user de-banned them
@@ -322,5 +337,5 @@ void SK_(__builtin_vec_delete) ( void* p
}
-void* SK_(realloc) ( void* p, Int new_size )
+void* SK_(realloc) ( void* p, SizeT new_size )
{
MAC_Chunk *mc;
@@ -335,9 +350,6 @@ void* SK_(realloc) ( void* p, Int new_si
cmalloc_bs_mallocd += new_size;
- if (new_size < 0) {
- VG_(message)(Vg_UserMsg,
- "Warning: silly arg (%d) to realloc()", new_size );
+ if (complain_about_silly_args(new_size, "realloc"))
return NULL;
- }
/* First try and find the block. */
@@ -466,5 +478,5 @@ void MAC_(destroy_mempool)(Addr pool)
}
-void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size)
+void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size)
{
MAC_Mempool* mp;
@@ -516,5 +528,5 @@ typedef
struct {
UInt nblocks;
- UInt nbytes;
+ SizeT nbytes;
}
MallocStats;
--- valgrind/memcheck/mac_shared.h #1.23:1.24
@@ -305,5 +305,5 @@ extern void MAC_(clear_MAC_Error)
extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
-extern void* MAC_(new_block) ( Addr p, UInt size, UInt align, UInt rzB,
+extern void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
Bool is_zeroed, MAC_AllocKind kind,
VgHashTable table);
@@ -312,5 +312,5 @@ extern void MAC_(handle_free) ( Addr p,
extern void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed);
extern void MAC_(destroy_mempool)(Addr pool);
-extern void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size);
+extern void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size);
extern void MAC_(mempool_free)(Addr pool, Addr addr);
|
|
From: Tom H. <th...@cy...> - 2004-11-02 10:57:00
|
In message <Pin...@he...>
Nicholas Nethercote <nj...@ca...> wrote:
> On Tue, 2 Nov 2004, Tom Hughes wrote:
>
>> Make sure we are using perl 5.6.0 as earlier versions won't work.
>>
>> +use 5.006;
>
> Is that right? The numbers don't match... does Perl use a wacky
> numbering scheme?
Yes ;-)
Prior to version 5.6 the number were written as 5.005_03 which is
exactly equivalent to 5.00503 (the _ is syntactic sugar) which is
major version 5, minor version 5, patch level 3.
From 5.6 onward they started using 5.6.1 as the style for version
numbers add added a language construct that allowed v5.6.1 to be
written which is equivalent to 5.00601.
Of course if you want an old version to parse it right then using the
original form is best.
Tom
--
Tom Hughes (th...@cy...)
Software Engineer, Cyberscience Corporation
http://www.cyberscience.com/
|
|
From: Nicholas N. <nj...@ca...> - 2004-11-02 10:02:41
|
On Tue, 2 Nov 2004, Tom Hughes wrote: > Make sure we are using perl 5.6.0 as earlier versions won't work. > > +use 5.006; Is that right? The numbers don't match... does Perl use a wacky numbering scheme? |
|
From: Tom H. <th...@cy...> - 2004-11-02 09:49:21
|
CVS commit by thughes: Make sure we are using perl 5.6.0 as earlier versions won't work. M +1 -0 gen_insn_test.pl 1.2 --- valgrind/none/tests/x86/gen_insn_test.pl #1.1:1.2 @@ -1,4 +1,5 @@ #!/usr/bin/perl +use 5.006; use strict; use warnings; |
|
From: Tom H. <th...@cy...> - 2004-11-02 09:49:21
|
CVS commit by thughes: Make sure we are using perl 5.6.0 as earlier versions won't work. MERGE TO STABLE M +1 -0 gen_insn_test.pl 1.8.2.1 --- valgrind/none/tests/gen_insn_test.pl #1.8:1.8.2.1 @@ -1,4 +1,5 @@ #!/usr/bin/perl +use 5.006; use strict; use warnings; |
|
From: Tom H. <th...@cy...> - 2004-11-02 09:45:31
|
CVS commit by thughes:
Use -w instead of "use warnings" to enable warnings so that older
versions of perl can handle the script.
M +1 -2 gen_intercepts.pl 1.2
M +1 -2 gen_toolint.pl 1.6
--- valgrind/coregrind/gen_intercepts.pl #1.1:1.2
@@ -1,3 +1,3 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
# This file is part of Valgrind, an extensible x86 protected-mode
@@ -25,5 +25,4 @@
use strict;
-use warnings;
while(<>) {
--- valgrind/coregrind/gen_toolint.pl #1.5:1.6
@@ -1,3 +1,3 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
# This file is part of Valgrind, an extensible x86 protected-mode
@@ -25,5 +25,4 @@
use strict;
-use warnings;
my $output = shift @ARGV;
|
|
From: Tom H. <th...@cy...> - 2004-11-02 09:45:22
|
CVS commit by thughes:
Use -w instead of "use warnings" to enable warnings so that older
versions of perl can handle the script.
MERGE TO STABLE
M +1 -2 gen_intercepts.pl 1.1.2.1
M +1 -2 gen_toolint.pl 1.3.2.1
--- valgrind/coregrind/gen_intercepts.pl #1.1:1.1.2.1
@@ -1,3 +1,3 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
# This file is part of Valgrind, an extensible x86 protected-mode
@@ -25,5 +25,4 @@
use strict;
-use warnings;
while(<>) {
--- valgrind/coregrind/gen_toolint.pl #1.3:1.3.2.1
@@ -1,3 +1,3 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
# This file is part of Valgrind, an extensible x86 protected-mode
@@ -25,5 +25,4 @@
use strict;
-use warnings;
my $output = shift @ARGV;
|
|
From: Nicholas N. <nj...@ca...> - 2004-11-02 09:28:25
|
On Mon, 1 Nov 2004, Maurice van der Pot wrote: > Yesterday I submitted a wishlist item to bugzilla that can be found here: > > http://bugs.kde.org/show_bug.cgi?id=92456 > > The idea is to not only report the use of undefined values, but also > where the undefined values were copied from. The comments on the bug > report describe it in more detail. > > > What I am interested in is how feasible people think the approach from > comment #2 onwards is. The feasibility depends on things like how much > copying of undefined values is done and for how long the copies persist. > Again the report describes it more clearly. > > I would like to do some testing to gather numbers on this, but maybe > there are people on this list that already have insights into this > or even know of some things that have been overlooked. Note that some further discussion of this has taken place at bugs.kde.org/show_bug.cgi?id=92456, in case anyone wants to follow it. N |
|
From: Nicholas N. <nj...@ca...> - 2004-11-02 09:13:15
|
CVS commit by nethercote: clarify debugging instructions M +7 -1 README_DEVELOPERS 1.4 --- valgrind/README_DEVELOPERS #1.3:1.4 @@ -35,5 +35,7 @@ Debugging Valgrind with GDB ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To debug Valgrind itself with GDB, start Valgrind like this: +To debug stage 1 just run it under GDB in the normal way. + +To debug Valgrind proper (stage 2) with GDB, start Valgrind like this: valgrind --tool=none --wait-for-gdb=yes <prog> @@ -48,2 +50,6 @@ jump *$eip +Code that runs in the target program such as the pthread replacement +code or the malloc replacement code would have to be debugged as part +of the target program, probably by attaching a debugger after it has +started. We are not sure if this would work, however. |
|
From: <js...@ac...> - 2004-11-02 03:56:23
|
Nightly build on phoenix ( SuSE 9.1 ) started at 2004-11-02 03:50:01 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_basic: valgrind ./insn_basic insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: (cpu_test failed, skipping) insn_sse: valgrind ./insn_sse insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 177 tests, 2 stderr failures, 0 stdout failures ================= corecheck/tests/fdleak_fcntl (stderr) memcheck/tests/writev (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <to...@co...> - 2004-11-02 03:26:30
|
Nightly build on dunsmere ( Fedora Core 2 ) started at 2004-11-02 03:20:03 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 182 tests, 8 stderr failures, 0 stdout failures ================= corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_socketpair (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/writev (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-11-02 03:20:33
|
Nightly build on audi ( Red Hat 9 ) started at 2004-11-02 03:15:02 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 182 tests, 8 stderr failures, 0 stdout failures ================= corecheck/tests/fdleak_cmsg (stderr) corecheck/tests/fdleak_fcntl (stderr) corecheck/tests/fdleak_ipv4 (stderr) corecheck/tests/fdleak_socketpair (stderr) memcheck/tests/buflen_check (stderr) memcheck/tests/execve (stderr) memcheck/tests/execve2 (stderr) memcheck/tests/writev (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-11-02 03:13:33
|
Nightly build on ginetta ( Red Hat 8.0 ) started at 2004-11-02 03:10:02 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow cpuid: valgrind ./cpuid dastest: valgrind ./dastest fpu_lazy_eflags: valgrind ./fpu_lazy_eflags insn_basic: valgrind ./insn_basic insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 182 tests, 0 stderr failures, 0 stdout failures ================= |
|
From: Tom H. <th...@cy...> - 2004-11-02 03:08:18
|
Nightly build on alvis ( Red Hat 7.3 ) started at 2004-11-02 03:05:02 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow fpu_lazy_eflags: valgrind ./fpu_lazy_eflags insn_basic: valgrind ./insn_basic insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 182 tests, 1 stderr failure, 0 stdout failures ================= memcheck/tests/vgtest_ume (stderr) make: *** [regtest] Error 1 |
|
From: Tom H. <th...@cy...> - 2004-11-02 03:03:52
|
Nightly build on standard ( Red Hat 7.2 ) started at 2004-11-02 03:00:01 GMT Checking out source tree ... done Configuring ... done Building ... done Running regression tests ... done Last 20 lines of log.verbose follow fpu_lazy_eflags: valgrind ./fpu_lazy_eflags insn_basic: valgrind ./insn_basic insn_cmov: valgrind ./insn_cmov insn_fpu: valgrind ./insn_fpu insn_mmx: valgrind ./insn_mmx insn_mmxext: valgrind ./insn_mmxext insn_sse: valgrind ./insn_sse insn_sse2: (cpu_test failed, skipping) int: valgrind ./int pushpopseg: valgrind ./pushpopseg rcl_assert: valgrind ./rcl_assert seg_override: valgrind ./seg_override -- Finished tests in none/tests/x86 ------------------------------------ yield: valgrind ./yield -- Finished tests in none/tests ---------------------------------------- == 182 tests, 1 stderr failure, 0 stdout failures ================= memcheck/tests/vgtest_ume (stderr) make: *** [regtest] Error 1 |
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 19:37:30
|
On Mon, 1 Nov 2004, Robert Walsh wrote: > Looks like someone made a change and forgot to check in a file called > "basic_types.c" - my builds are failing right now. Whoops, my bad; the file is actually called basic_types.h... now fixed. N |
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 19:36:51
|
CVS commit by nethercote:
unbreak compilation...
M +1 -1 Makefile.am 1.12
--- valgrind/include/Makefile.am #1.11:1.12
@@ -10,5 +10,5 @@
incinc_HEADERS = \
- basic_types.c \
+ basic_types.h \
tool.h \
tool_asm.h \
|
|
From: Robert W. <rj...@du...> - 2004-11-01 19:04:48
|
Hi guys, Looks like someone made a change and forgot to check in a file called "basic_types.c" - my builds are failing right now. Regards, Robert. --=20 Robert Walsh Amalgamated Durables, Inc. - "We don't make the things you buy." Email: rj...@du... |
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 18:46:21
|
CVS commit by nethercote: wibble M +2 -2 .cvsignore 1.4 --- valgrind/addrcheck/tests/.cvsignore #1.3:1.4 @@ -1,6 +1,6 @@ Makefile.in Makefile -*.stdout.diff -*.stderr.diff +*.stdout.diff* +*.stderr.diff* *.stdout.out *.stderr.out |
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 18:42:27
|
CVS commit by nethercote:
Make ESZ less public.
M +8 -0 ume.c 1.38
M +0 -8 ume.h 1.16
--- valgrind/coregrind/ume.c #1.37:1.38
@@ -47,4 +47,12 @@
#include "ume.h"
+#if ELFSZ == 64
+#define ESZ(x) Elf64_##x
+#elif ELFSZ == 32
+#define ESZ(x) Elf32_##x
+#else
+#error ELFSZ needs to ==32 or ==64
+#endif
+
struct elfinfo
{
--- valgrind/coregrind/ume.h #1.15:1.16
@@ -47,12 +47,4 @@ void foreach_map(int (*fn)(char *start,
void* extra);
-#if ELFSZ == 64
-#define ESZ(x) Elf64_##x
-#elif ELFSZ == 32
-#define ESZ(x) Elf32_##x
-#else
-#error ELFSZ needs to ==32 or ==64
-#endif
-
// Jump to a new 'ip' with the stack 'sp'.
void jmp_with_stack(Addr ip, Addr sp) __attribute__((noreturn));
|
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 18:38:11
|
CVS commit by nethercote:
Don't need to use addr_t anymore, use Addr instead, for consistency.
M +2 -2 coregrind/stage1.c 1.30
M +1 -1 coregrind/ume.c 1.37
M +10 -13 coregrind/ume.h 1.15
M +11 -11 coregrind/vg_main.c 1.226
M +1 -1 coregrind/x86/jmp_with_stack.c 1.2
M +1 -1 memcheck/tests/vgtest_ume.c 1.6
--- valgrind/coregrind/stage1.c #1.29:1.30
@@ -298,5 +298,5 @@ static void main2(void)
}
- jmp_with_stack(info.init_eip, (addr_t)esp);
+ jmp_with_stack(info.init_eip, (Addr)esp);
}
@@ -320,5 +320,5 @@ int main(int argc, char** argv)
/* move onto another stack so we can play with the main one */
- jmp_with_stack((addr_t)main2, (addr_t)stack + sizeof(stack));
+ jmp_with_stack((Addr)main2, (Addr)stack + sizeof(stack));
}
--- valgrind/coregrind/ume.c #1.36:1.37
@@ -415,5 +415,5 @@ static int load_ELF(char *hdr, int len,
info->exe_end = maxaddr + ebase;
- info->init_eip = (addr_t)entry;
+ info->init_eip = (Addr)entry;
free(e);
--- valgrind/coregrind/ume.h #1.14:1.15
@@ -55,9 +55,6 @@ void foreach_map(int (*fn)(char *start,
#endif
-/* Integer type the same size as a pointer */
-typedef ESZ(Addr) addr_t;
-
// Jump to a new 'ip' with the stack 'sp'.
-void jmp_with_stack(addr_t ip, addr_t sp) __attribute__((noreturn));
+void jmp_with_stack(Addr ip, Addr sp) __attribute__((noreturn));
/*------------------------------------------------------------*/
@@ -69,16 +66,16 @@ void jmp_with_stack(addr_t ip, addr_t sp
struct exeinfo
{
- addr_t map_base; // IN: if non-zero, base address of mappings
+ Addr map_base; // IN: if non-zero, base address of mappings
char** argv; // IN: the original argv
- addr_t exe_base; // INOUT: lowest (allowed) address of exe
- addr_t exe_end; // INOUT: highest (allowed) address
+ Addr exe_base; // INOUT: lowest (allowed) address of exe
+ Addr exe_end; // INOUT: highest (allowed) address
- addr_t phdr; // OUT: address phdr was mapped at
+ Addr phdr; // OUT: address phdr was mapped at
int phnum; // OUT: number of phdrs
- addr_t interp_base; // OUT: where interpreter (ld.so) was mapped
- addr_t entry; // OUT: entrypoint in main executable
- addr_t init_eip; // OUT: initial eip
- addr_t brkbase; // OUT: base address of brk segment
+ Addr interp_base; // OUT: where interpreter (ld.so) was mapped
+ Addr entry; // OUT: entrypoint in main executable
+ Addr init_eip; // OUT: initial eip
+ Addr brkbase; // OUT: base address of brk segment
// These are the extra args added by #! scripts
--- valgrind/coregrind/vg_main.c #1.225:1.226
@@ -402,5 +402,5 @@ static void layout_remaining_space(Addr
Int ires;
void* vres;
- addr_t client_size, shadow_size;
+ Addr client_size, shadow_size;
// VG_(valgrind_base) should have been set by scan_auxv, but if not,
@@ -420,5 +420,5 @@ static void layout_remaining_space(Addr
/* where !FIXED mmap goes */
VG_(client_mapbase) = VG_(client_base) +
- PGROUNDDN((addr_t)(client_size * CLIENT_HEAP_PROPORTION));
+ PGROUNDDN((Addr)(client_size * CLIENT_HEAP_PROPORTION));
VG_(shadow_base) = VG_(client_end) + REDZONE_SIZE;
@@ -927,5 +927,5 @@ static Addr setup_client_stack(void* ini
char *strtab; /* string table */
char *stringbase;
- addr_t *ptr;
+ Addr *ptr;
struct ume_auxv *auxv;
const struct ume_auxv *orig_auxv;
@@ -936,5 +936,5 @@ static Addr setup_client_stack(void* ini
int envc; /* total number of env vars */
unsigned stacksize; /* total client stack size */
- addr_t cl_esp; /* client stack base (initial esp) */
+ Addr cl_esp; /* client stack base (initial esp) */
/* use our own auxv as a prototype */
@@ -1022,5 +1022,5 @@ static Addr setup_client_stack(void* ini
/* ==================== copy client stack ==================== */
- ptr = (addr_t *)cl_esp;
+ ptr = (Addr*)cl_esp;
/* --- argc --- */
@@ -1029,13 +1029,13 @@ static Addr setup_client_stack(void* ini
/* --- argv --- */
if (info->interp_name) {
- *ptr++ = (addr_t)copy_str(&strtab, info->interp_name);
+ *ptr++ = (Addr)copy_str(&strtab, info->interp_name);
free(info->interp_name);
}
if (info->interp_args) {
- *ptr++ = (addr_t)copy_str(&strtab, info->interp_args);
+ *ptr++ = (Addr)copy_str(&strtab, info->interp_args);
free(info->interp_args);
}
for (cpp = orig_argv; *cpp; ptr++, cpp++) {
- *ptr = (addr_t)copy_str(&strtab, *cpp);
+ *ptr = (Addr)copy_str(&strtab, *cpp);
}
*ptr++ = 0;
@@ -1044,5 +1044,5 @@ static Addr setup_client_stack(void* ini
VG_(client_envp) = (Char **)ptr;
for (cpp = orig_envp; cpp && *cpp; ptr++, cpp++)
- *ptr = (addr_t)copy_str(&strtab, *cpp);
+ *ptr = (Addr)copy_str(&strtab, *cpp);
*ptr++ = 0;
--- valgrind/coregrind/x86/jmp_with_stack.c #1.1:1.2
@@ -32,5 +32,5 @@
after exec; it therefore also clears all the other registers.
*/
-void jmp_with_stack(addr_t eip, addr_t esp)
+void jmp_with_stack(Addr eip, Addr esp)
{
asm volatile ("movl %1, %%esp;" /* set esp */
--- valgrind/memcheck/tests/vgtest_ume.c #1.5:1.6
@@ -135,5 +135,5 @@ static void test__do_exec(void)
// fprintf(stderr, "ume_go: %p %p\n", (void*)info.init_eip, (void*)esp);
- jmp_with_stack(info.init_eip, (addr_t)esp);
+ jmp_with_stack(info.init_eip, (Addr)esp);
assert(0); // UNREACHABLE
|
|
From: Maurice v. d. P. <gri...@ge...> - 2004-11-01 18:30:01
|
Hi, Yesterday I submitted a wishlist item to bugzilla that can be found here: http://bugs.kde.org/show_bug.cgi?id=3D92456 The idea is to not only report the use of undefined values, but also=20 where the undefined values were copied from. The comments on the bug=20 report describe it in more detail. What I am interested in is how feasible people think the approach from=20 comment #2 onwards is. The feasibility depends on things like how much=20 copying of undefined values is done and for how long the copies persist. Again the report describes it more clearly. I would like to do some testing to gather numbers on this, but maybe there are people on this list that already have insights into this or even know of some things that have been overlooked. Regards, Maurice. --=20 Maurice van der Pot Gentoo Linux Developer gri...@ge... http://www.gentoo.org Creator of BiteMe! gri...@kf... http://www.kfk4ever.com |
|
From: Nicholas N. <nj...@ca...> - 2004-11-01 18:22:10
|
CVS commit by nethercote:
- Make find_auxv() word-size independent.
- Introduced a new file, basic_types.h, for the basic types (eg. Int, Word).
A include/basic_types.h 1.1 [GPL (v2+)]
M +1 -0 valgrind.spec.in 1.18
M +10 -10 coregrind/ume.c 1.36
M +3 -1 coregrind/ume.h 1.14
M +1 -1 coregrind/vg_main.c 1.225
M +1 -0 include/Makefile.am 1.11
M +1 -40 include/tool.h.base 1.9
M +2 -1 memcheck/tests/vgtest_ume.c 1.5 [POSSIBLY UNSAFE: printf]
--- valgrind/valgrind.spec.in #1.17:1.18
@@ -37,4 +37,5 @@
/usr/include/valgrind/memcheck.h
/usr/include/valgrind/helgrind.h
+/usr/include/valgrind/basic_types.h
/usr/include/valgrind/tool.h
/usr/include/valgrind/tool_asm.h
--- valgrind/coregrind/ume.c #1.35:1.36
@@ -116,17 +116,17 @@ void foreach_map(int (*fn)(char *start,
/*------------------------------------------------------------*/
-struct ume_auxv *find_auxv(int *esp)
+struct ume_auxv *find_auxv(UWord* sp)
{
- esp++; /* skip argc */
+ sp++; // skip argc (Nb: is word-sized, not int-sized!)
- while(*esp != 0) /* skip argv */
- esp++;
- esp++;
+ while (*sp != 0) // skip argv
+ sp++;
+ sp++;
- while(*esp != 0) /* skip env */
- esp++;
- esp++;
+ while (*sp != 0) // skip env
+ sp++;
+ sp++;
- return (struct ume_auxv *)esp;
+ return (struct ume_auxv *)sp;
}
--- valgrind/coregrind/ume.h #1.13:1.14
@@ -36,4 +36,6 @@
#include <sys/types.h>
+#include "basic_types.h"
+
/*------------------------------------------------------------*/
/*--- General stuff ---*/
@@ -105,5 +107,5 @@ struct ume_auxv
};
-struct ume_auxv *find_auxv(int *orig_esp);
+struct ume_auxv *find_auxv(UWord* orig_esp);
/* Our private auxv entries */
--- valgrind/coregrind/vg_main.c #1.224:1.225
@@ -363,5 +363,5 @@ static void newpid(ThreadId unused)
int scan_auxv(void* init_sp)
{
- const struct ume_auxv *auxv = find_auxv((int *)init_sp);
+ const struct ume_auxv *auxv = find_auxv((UWord*)init_sp);
int padfile = -1, found = 0;
--- valgrind/include/Makefile.am #1.10:1.11
@@ -10,4 +10,5 @@
incinc_HEADERS = \
+ basic_types.c \
tool.h \
tool_asm.h \
--- valgrind/include/tool.h.base #1.8:1.9
@@ -35,48 +35,9 @@
#include <setjmp.h> /* for jmp_buf */
+#include "basic_types.h"
#include "tool_asm.h" // asm stuff
-
-/*====================================================================*/
-/*=== Basic types ===*/
-/*====================================================================*/
-
-// By choosing the right types, we can get these right for 32-bit and 64-bit
-// platforms without having to do any conditional compilation or anything.
-//
-// Size in bits on: 32-bit archs 64-bit archs
-// ------------ ------------
-typedef unsigned char UChar; // 8 8
-typedef unsigned short UShort; // 16 16
-typedef unsigned int UInt; // 32 32
-typedef unsigned long UWord; // 32 64
-typedef unsigned long long ULong; // 64 64
-
-typedef signed char Char; // 8 8
-typedef signed short Short; // 16 16
-typedef signed int Int; // 32 32
-typedef signed long Word; // 32 64
-typedef signed long long Long; // 64 64
-
-typedef UWord Addr; // 32 64
-
-typedef UChar Bool; // 8 8
-#define False ((Bool)0)
-#define True ((Bool)1)
-
-/* ---------------------------------------------------------------------
- Now the basic types are set up, we can haul in the kernel-interface
- definitions and tool_arch.h
- ------------------------------------------------------------------ */
-
#include "tool_arch.h" // arch-specific tool stuff
#include "vki.h"
-/* ---------------------------------------------------------------------
- Where to send bug reports to.
- ------------------------------------------------------------------ */
-
-#define VG_BUGS_TO "valgrind.kde.org"
-
-
/*====================================================================*/
/*=== Build options and table sizes. ===*/
--- valgrind/memcheck/tests/vgtest_ume.c #1.4:1.5
@@ -52,5 +52,5 @@ static void test__find_auxv(void)
fprintf(stderr, "Calling find_auxv()\n");
- auxv = find_auxv((int*)init_sp);
+ auxv = find_auxv((UWord*)init_sp);
// Check the auxv value looks sane
@@ -66,4 +66,5 @@ static void test__find_auxv(void)
default:
+ fprintf(stderr, "auxv->a_type = %d\n", auxv->a_type);
assert(0);
}
|
|
From: Tom H. <th...@cy...> - 2004-11-01 17:37:11
|
CVS commit by thughes:
Make sure source-location mapping entries of size zero are converted
to size one even if verbose more is off.
MERGE TO STABLE
M +6 -4 vg_symtab2.c 1.85.2.2
--- valgrind/coregrind/vg_symtab2.c #1.85.2.1:1.85.2.2
@@ -216,8 +216,10 @@ void VG_(addLineInfo) ( SegInfo* si,
* multiple instructions can map to the one line), but avoid
* catching any other instructions bogusly. */
- if (this > next && VG_(clo_verbosity) > 2) {
+ if (this > next) {
+ if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_DebugMsg,
"warning: line info addresses out of order "
"at entry %d: 0x%x 0x%x", entry, this, next);
+ }
size = 1;
}
|
|
From: Tom H. <th...@cy...> - 2004-11-01 17:36:22
|
CVS commit by thughes:
Make sure source-location mapping entries of size zero are converted
to size one even if verbose more is off.
BUG: 92528
M +6 -4 vg_symtab2.c 1.93
--- valgrind/coregrind/vg_symtab2.c #1.92:1.93
@@ -216,8 +216,10 @@ void VG_(addLineInfo) ( SegInfo* si,
* multiple instructions can map to the one line), but avoid
* catching any other instructions bogusly. */
- if (this > next && VG_(clo_verbosity) > 2) {
+ if (this > next) {
+ if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_DebugMsg,
"warning: line info addresses out of order "
"at entry %d: 0x%x 0x%x", entry, this, next);
+ }
size = 1;
}
|