|
From: <sv...@va...> - 2005-07-19 21:12:16
|
Author: njn
Date: 2005-07-19 22:11:54 +0100 (Tue, 19 Jul 2005)
New Revision: 4195
Log:
Get Addrcheck closer to compiling. It's still a long way from working,
though.
Modified:
trunk/addrcheck/ac_main.c
Modified: trunk/addrcheck/ac_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/addrcheck/ac_main.c 2005-07-19 20:54:08 UTC (rev 4194)
+++ trunk/addrcheck/ac_main.c 2005-07-19 21:11:54 UTC (rev 4195)
@@ -30,6 +30,15 @@
The GNU General Public License is contained in the file COPYING.
*/
=20
+#include "pub_tool_basics.h"
+#include "pub_tool_hashtable.h" // For mac_shared.h
+#include "pub_tool_libcbase.h"
+#include "pub_tool_libcassert.h"
+#include "pub_tool_libcprint.h"
+#include "pub_tool_profile.h" // For mac_shared.h
+#include "pub_tool_tooliface.h"
+#include "pub_tool_threadstate.h"
+
#include "mac_shared.h"
#include "memcheck.h"
=20
@@ -195,7 +204,7 @@
const AcSecMap *prototype)
{
AcSecMap* map;
- PROF_EVENT(10);
+ PROF_EVENT(10, "");
=20
map =3D (AcSecMap *)VG_(shadow_alloc)(sizeof(AcSecMap));
VG_(memcpy)(map, prototype, sizeof(*map));
@@ -211,7 +220,7 @@
{
AcSecMap* sm =3D primary_map[PM_IDX(a)];
UInt sm_off =3D SM_OFF(a);
- PROF_EVENT(20);
+ PROF_EVENT(20, "");
# if 0
if (IS_DISTINGUISHED_SM(sm))
VG_(message)(Vg_DebugMsg,=20
@@ -225,7 +234,7 @@
{
AcSecMap* sm;
UInt sm_off;
- PROF_EVENT(22);
+ PROF_EVENT(22, "");
ENSURE_MAPPABLE(a, "set_abit");
sm =3D primary_map[PM_IDX(a)];
sm_off =3D SM_OFF(a);
@@ -243,7 +252,7 @@
AcSecMap* sm;
UInt sm_off;
UChar abits8;
- PROF_EVENT(24);
+ PROF_EVENT(24, "");
# ifdef VG_DEBUG_MEMORY
tl_assert(VG_IS_4_ALIGNED(a));
# endif
@@ -268,7 +277,7 @@
UInt sm_off;
AcSecMap* sm;
=20
- PROF_EVENT(30);
+ PROF_EVENT(30, "");
=20
if (len =3D=3D 0)
return;
@@ -299,7 +308,7 @@
# ifdef VG_DEBUG_MEMORY
/* Do it ... */
while (True) {
- PROF_EVENT(31);
+ PROF_EVENT(31, "");
if (len =3D=3D 0) break;
set_abit ( a, example_a_bit );
set_vbyte ( a, vbyte );
@@ -310,7 +319,7 @@
# else
/* Slowly do parts preceding 8-byte alignment. */
while (True) {
- PROF_EVENT(31);
+ PROF_EVENT(31, "");
if (len =3D=3D 0) break;
if ((a % 8) =3D=3D 0) break;
set_abit ( a, example_a_bit );
@@ -326,7 +335,7 @@
=20
/* Once aligned, go fast up to primary boundary. */
for (; (a & SECONDARY_MASK) && len >=3D 8; a +=3D 8, len -=3D 8) {
- PROF_EVENT(32);
+ PROF_EVENT(32, "");
=20
/* If the primary is already pointing to a distinguished map
with the same properties as we're trying to set, then leave
@@ -356,7 +365,7 @@
=20
/* Now finished the remains. */
for (; len >=3D 8; a +=3D 8, len -=3D 8) {
- PROF_EVENT(32);
+ PROF_EVENT(32, "");
=20
/* If the primary is already pointing to a distinguished map
with the same properties as we're trying to set, then leave
@@ -372,7 +381,7 @@
=20
/* Finish the upper fragment. */
while (True) {
- PROF_EVENT(33);
+ PROF_EVENT(33, "");
if (len =3D=3D 0) break;
set_abit ( a, example_a_bit );
a++;
@@ -387,14 +396,14 @@
=20
static void ac_make_noaccess ( Addr a, SizeT len )
{
- PROF_EVENT(35);
+ PROF_EVENT(35, "");
DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
set_address_range_perms ( a, len, VGM_BIT_INVALID );
}
=20
static void ac_make_accessible ( Addr a, SizeT len )
{
- PROF_EVENT(38);
+ PROF_EVENT(38, "");
DEBUG("ac_make_accessible(%p, %x)\n", a, len);
set_address_range_perms ( a, len, VGM_BIT_VALID );
}
@@ -483,10 +492,10 @@
=20
DEBUG("ac_copy_address_range_state\n");
=20
- PROF_EVENT(40);
+ PROF_EVENT(40, "");
for (i =3D 0; i < len; i++) {
UChar abit =3D get_abit ( src+i );
- PROF_EVENT(41);
+ PROF_EVENT(41, "");
set_abit ( dst+i, abit );
}
}
@@ -501,9 +510,9 @@
{
UInt i;
UChar abit;
- PROF_EVENT(48);
+ PROF_EVENT(48, "");
for (i =3D 0; i < len; i++) {
- PROF_EVENT(49);
+ PROF_EVENT(49, "");
abit =3D get_abit(a);
if (abit =3D=3D VGM_BIT_INVALID) {
if (bad_addr !=3D NULL) *bad_addr =3D a;
@@ -520,9 +529,9 @@
{
UInt i;
UChar abit;
- PROF_EVENT(48);
+ PROF_EVENT(48, "");
for (i =3D 0; i < len; i++) {
- PROF_EVENT(49);
+ PROF_EVENT(49, "");
abit =3D get_abit(a);
if (abit =3D=3D VGM_BIT_VALID) {
if (bad_addr !=3D NULL) *bad_addr =3D a;
@@ -541,10 +550,10 @@
Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
{
UChar abit;
- PROF_EVENT(46);
+ PROF_EVENT(46, "");
DEBUG("ac_check_readable_asciiz\n");
while (True) {
- PROF_EVENT(47);
+ PROF_EVENT(47, "");
abit =3D get_abit(a);
if (abit !=3D VGM_BIT_VALID) {
if (bad_addr !=3D NULL) *bad_addr =3D a;
@@ -696,7 +705,7 @@
UChar abits =3D sm->abits[a_off];
abits >>=3D (a & 4);
abits &=3D 15;
- PROF_EVENT(66);
+ PROF_EVENT(66, "");
if (abits =3D=3D VGM_NIBBLE_VALID) {
/* Handle common case quickly: a is suitably aligned, is mapped,
and is addressible. So just return. */
@@ -716,7 +725,7 @@
UInt sec_no =3D rotateRight16(a) & 0x1FFFF;
AcSecMap* sm =3D primary_map[sec_no];
UInt a_off =3D (SM_OFF(a)) >> 3;
- PROF_EVENT(67);
+ PROF_EVENT(67, "");
if (sm->abits[a_off] =3D=3D VGM_BYTE_VALID) {
/* Handle common case quickly. */
return;
@@ -735,7 +744,7 @@
UInt sec_no =3D shiftRight16(a);
AcSecMap* sm =3D primary_map[sec_no];
UInt a_off =3D (SM_OFF(a)) >> 3;
- PROF_EVENT(68);
+ PROF_EVENT(68, "");
if (sm->abits[a_off] =3D=3D VGM_BYTE_VALID) {
/* Handle common case quickly. */
return;
@@ -789,7 +798,7 @@
{
Bool a0ok, a1ok, a2ok, a3ok;
=20
- PROF_EVENT(76);
+ PROF_EVENT(76, "");
=20
/* First establish independently the addressibility of the 4 bytes
involved. */
@@ -834,7 +843,7 @@
{
/* Check the address for validity. */
Bool aerr =3D False;
- PROF_EVENT(77);
+ PROF_EVENT(77, "");
=20
if (get_abit(a+0) !=3D VGM_BIT_VALID) aerr =3D True;
if (get_abit(a+1) !=3D VGM_BIT_VALID) aerr =3D True;
@@ -849,7 +858,7 @@
{
/* Check the address for validity. */
Bool aerr =3D False;
- PROF_EVENT(78);
+ PROF_EVENT(78, "");
=20
if (get_abit(a+0) !=3D VGM_BIT_VALID) aerr =3D True;
=20
@@ -879,7 +888,7 @@
UInt sm_off, a_off;
Addr addr4;
=20
- PROF_EVENT(90);
+ PROF_EVENT(90, "");
=20
# ifdef VG_DEBUG_MEMORY
ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
@@ -887,7 +896,7 @@
=20
if (size =3D=3D 4) {
if (!VG_IS_4_ALIGNED(addr)) goto slow4;
- PROF_EVENT(91);
+ PROF_EVENT(91, "");
/* Properly aligned. */
sm =3D primary_map[PM_IDX(addr)];
sm_off =3D SM_OFF(addr);
@@ -902,7 +911,7 @@
=20
if (size =3D=3D 8) {
if (!VG_IS_4_ALIGNED(addr)) goto slow8;
- PROF_EVENT(92);
+ PROF_EVENT(92, "");
/* Properly aligned. Do it in two halves. */
addr4 =3D addr + 4;
/* First half. */
@@ -927,13 +936,13 @@
/* Can't be bothered to huff'n'puff to make these (allegedly) rare
cases go quickly. */
if (size =3D=3D 2) {
- PROF_EVENT(93);
+ PROF_EVENT(93, "");
ac_fpu_ACCESS_check_SLOWLY ( addr, 2, isWrite );
return;
}
=20
if (size =3D=3D 16 || size =3D=3D 10 || size =3D=3D 28 || size =3D=3D=
108 || size =3D=3D 512) {
- PROF_EVENT(94);
+ PROF_EVENT(94, "");
ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
return;
}
@@ -964,9 +973,9 @@
{
Int i;
Bool aerr =3D False;
- PROF_EVENT(100);
+ PROF_EVENT(100, "");
for (i =3D 0; i < size; i++) {
- PROF_EVENT(101);
+ PROF_EVENT(101, "");
if (get_abit(addr+i) !=3D VGM_BIT_VALID)
aerr =3D True;
}
|