|
From: <sv...@va...> - 2006-03-28 12:35:26
|
Author: njn
Date: 2006-03-28 13:35:08 +0100 (Tue, 28 Mar 2006)
New Revision: 5796
Log:
Change some uses of bytes to bits for consistency.
Modified:
trunk/memcheck/mc_main.c
Modified: trunk/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/memcheck/mc_main.c 2006-03-28 01:02:38 UTC (rev 5795)
+++ trunk/memcheck/mc_main.c 2006-03-28 12:35:08 UTC (rev 5796)
@@ -860,13 +860,14 @@
#ifndef PERF_FAST_LOADV
INLINE
#endif
-ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
+ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
{
/* Make up a 64-bit result V word, which contains the loaded data for
valid addresses and Defined for invalid addresses. Iterate over
the bytes in the word, from the most significant down to the
least. */
ULong vbits64 =3D V_BITS64_UNDEFINED;
+ SizeT szB =3D nBits / 8;
SSizeT i =3D szB-1; // Must be signed
SizeT n_addrs_bad =3D 0;
Addr ai;
@@ -875,11 +876,11 @@
Bool ok;
=20
PROF_EVENT(30, "mc_LOADVn_slow");
- tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
+ tl_assert(nBits =3D=3D 64 || nBits =3D=3D 32 || nBits =3D=3D 16 || nB=
its =3D=3D 8);
=20
for (i =3D szB-1; i >=3D 0; i--) {
PROF_EVENT(31, "mc_LOADVn_slow(loop)");
- ai =3D a+byte_offset_w(szB,bigendian,i);
+ ai =3D a + byte_offset_w(szB, bigendian, i);
ok =3D get_vbits8(ai, &vbits8);
if (!ok) n_addrs_bad++;
vbits64 <<=3D 8;=20
@@ -915,22 +916,23 @@
#ifndef PERF_FAST_STOREV
INLINE
#endif
-void mc_STOREVn_slow ( Addr a, SizeT szB, ULong vbytes, Bool bigendian )
+void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian=
)
{
+ SizeT szB =3D nBits / 8;
SizeT i, n_addrs_bad =3D 0;
UChar vbits8;
Addr ai;
Bool ok;
=20
PROF_EVENT(35, "mc_STOREVn_slow");
- tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
+ tl_assert(nBits =3D=3D 64 || nBits =3D=3D 32 || nBits =3D=3D 16 || nB=
its =3D=3D 8);
=20
/* Dump vbytes in memory, iterating from least to most significant
byte. At the same time establish addressibility of the
location. */
for (i =3D 0; i < szB; i++) {
PROF_EVENT(36, "mc_STOREVn_slow(loop)");
- ai =3D a+byte_offset_w(szB,bigendian,i);
+ ai =3D a + byte_offset_w(szB, bigendian, i);
vbits8 =3D vbytes & 0xff;
ok =3D set_vbits8(ai, vbits8);
if (!ok) n_addrs_bad++;
@@ -3037,9 +3039,9 @@
*/
=20
/* If any part of '_a' indicated by the mask is 1, either
- '_a' is not naturally '_sz'-aligned, or it exceeds the range
+ '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
covered by the primary map. */
-#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz)))
+#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
=20
=20
@@ -3054,11 +3056,11 @@
PROF_EVENT(200, "mc_LOADV64");
=20
#ifndef PERF_FAST_LOADV
- return mc_LOADVn_slow( a, 8, isBigEndian );
+ return mc_LOADVn_slow( a, 64, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
PROF_EVENT(201, "mc_LOADV64-slow1");
- return (ULong)mc_LOADVn_slow( a, 8, isBigEndian );
+ return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -3075,7 +3077,7 @@
} else {
/* Slow case: the 8 bytes are not all-readable or all-writable. */
PROF_EVENT(202, "mc_LOADV64-slow2");
- return mc_LOADVn_slow( a, 8, isBigEndian );
+ return mc_LOADVn_slow( a, 64, isBigEndian );
}
#endif
}
@@ -3101,11 +3103,11 @@
#ifndef PERF_FAST_STOREV
// XXX: this slow case seems to be marginally faster than the fast ca=
se!
// Investigate further.
- mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 64, vbytes, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
PROF_EVENT(211, "mc_STOREV64-slow1");
- mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 64, vbytes, isBigEndian );
return;
}
=20
@@ -3127,12 +3129,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(212, "mc_STOREV64-slow2");
- mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 64, vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(213, "mc_STOREV64-slow3");
- mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 64, vbytes, isBigEndian );
}
#endif
}
@@ -3158,11 +3160,11 @@
PROF_EVENT(220, "mc_LOADV32");
=20
#ifndef PERF_FAST_LOADV
- return (UWord)mc_LOADVn_slow( a, 4, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,4) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
PROF_EVENT(221, "mc_LOADV32-slow1");
- return (UWord)mc_LOADVn_slow( a, 4, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -3181,7 +3183,7 @@
} else {
/* Slow case: the 4 bytes are not all-readable or all-writable. */
PROF_EVENT(222, "mc_LOADV32-slow2");
- return (UWord)mc_LOADVn_slow( a, 4, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
#endif
}
@@ -3205,11 +3207,11 @@
PROF_EVENT(230, "mc_STOREV32");
=20
#ifndef PERF_FAST_STOREV
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,4) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
PROF_EVENT(231, "mc_STOREV32-slow1");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
return;
}
=20
@@ -3230,7 +3232,7 @@
} else {
// not readable/writable, or distinguished and changing state
PROF_EVENT(232, "mc_STOREV32-slow2");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
} else if (V_BITS32_UNDEFINED =3D=3D vbytes) {
if (vabits8 =3D=3D (UInt)VA_BITS8_WRITABLE) {
@@ -3240,12 +3242,12 @@
} else {
// not readable/writable, or distinguished and changing state
PROF_EVENT(233, "mc_STOREV32-slow3");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
} else {
// Partially defined word
PROF_EVENT(234, "mc_STOREV32-slow4");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
//----------------------------------------------------------------------=
-----
#else
@@ -3263,12 +3265,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(232, "mc_STOREV32-slow2");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(233, "mc_STOREV32-slow3");
- mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
#endif
//----------------------------------------------------------------------=
-----
@@ -3296,11 +3298,11 @@
PROF_EVENT(240, "mc_LOADV16");
=20
#ifndef PERF_FAST_LOADV
- return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,2) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
PROF_EVENT(241, "mc_LOADV16-slow1");
- return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -3321,7 +3323,7 @@
else {
/* Slow case: the two bytes are not all-readable or all-writabl=
e. */
PROF_EVENT(242, "mc_LOADV16-slow2");
- return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
}
#endif
@@ -3346,11 +3348,11 @@
PROF_EVENT(250, "mc_STOREV16");
=20
#ifndef PERF_FAST_STOREV
- mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 16, (ULong)vbytes, isBigEndian );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,2) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
PROF_EVENT(251, "mc_STOREV16-slow1");
- mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 16, (ULong)vbytes, isBigEndian );
return;
}
=20
@@ -3373,12 +3375,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(252, "mc_STOREV16-slow2");
- mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 16, (ULong)vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(253, "mc_STOREV16-slow3");
- mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 16, (ULong)vbytes, isBigEndian );
}
#endif
}
@@ -3405,11 +3407,11 @@
PROF_EVENT(260, "mc_LOADV8");
=20
#ifndef PERF_FAST_LOADV
- return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
PROF_EVENT(261, "mc_LOADV8-slow1");
- return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -3430,7 +3432,7 @@
else {
/* Slow case: the byte is not all-readable or all-writable. */
PROF_EVENT(262, "mc_LOADV8-slow2");
- return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
}
#endif
@@ -3446,11 +3448,11 @@
PROF_EVENT(270, "mc_STOREV8");
=20
#ifndef PERF_FAST_STOREV
- mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 8, (ULong)vbyte, False/*irrelevant*/ );
#else
- if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
+ if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
PROF_EVENT(271, "mc_STOREV8-slow1");
- mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 8, (ULong)vbyte, False/*irrelevant*/ );
return;
}
=20
@@ -3477,12 +3479,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(272, "mc_STOREV8-slow2");
- mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 8, (ULong)vbyte, False/*irrelevant*/ );
}
} else {
/* Slow but general case. */
PROF_EVENT(273, "mc_STOREV8-slow3");
- mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 8, (ULong)vbyte, False/*irrelevant*/ );
}
#endif
}
|