|
From: <sv...@va...> - 2005-12-15 23:07:50
|
Author: njn
Date: 2005-12-15 23:07:45 +0000 (Thu, 15 Dec 2005)
New Revision: 5356
Log:
Remove some unnecessary variables.
Modified:
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-15 22:57:35 UTC (rev 53=
55)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-15 23:07:45 UTC (rev 53=
56)
@@ -804,13 +804,12 @@
primary map. =20
*/
=20
-static SecMap** find_secmap_binder_for_addr ( Addr aA )
+static SecMap** find_secmap_binder_for_addr ( Addr a )
{
- if (aA > MAX_PRIMARY_ADDRESS) {
- AuxMapEnt* am =3D find_or_alloc_in_auxmap(aA);
+ if (a > MAX_PRIMARY_ADDRESS) {
+ AuxMapEnt* am =3D find_or_alloc_in_auxmap(a);
return &am->sm;
} else {
- UWord a =3D (UWord)aA;
UWord sec_no =3D (UWord)(a >> 16);
# if VG_DEBUG_MEMORY >=3D 1
tl_assert(sec_no < N_PRIMARY_MAP);
@@ -856,7 +855,7 @@
UWord vabits8 =3D vabits64 & 0x3;
SizeT i;
for (i =3D 0; i < lenT; i++) {
- set_vabits8(aA + i, vabits8);
+ set_vabits8(a + i, vabits8);
}
return;
}
@@ -2642,21 +2641,19 @@
/* ------------------------ Size =3D 8 ------------------------ */
=20
static inline __attribute__((always_inline))
-ULong mc_LOADV8 ( Addr aA, Bool isBigEndian )
+ULong mc_LOADV8 ( Addr a, Bool isBigEndian )
{
- UWord a, sm_off64, vabits64;
+ UWord sm_off64, vabits64;
SecMap* sm;
=20
PROF_EVENT(200, "mc_LOADV8");
=20
if (VG_DEBUG_MEMORY >=3D 2)
- return mc_LOADVn_slow( aA, 8, isBigEndian );
+ return mc_LOADVn_slow( a, 8, isBigEndian );
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
PROF_EVENT(201, "mc_LOADV8-slow1");
- return (UWord)mc_LOADVn_slow( aA, 8, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 8, isBigEndian );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -2688,9 +2685,9 @@
=20
=20
static inline __attribute__((always_inline))
-void mc_STOREV8 ( Addr aA, ULong vbytes, Bool isBigEndian )
+void mc_STOREV8 ( Addr a, ULong vbytes, Bool isBigEndian )
{
- UWord a, sm_off64, vabits64;
+ UWord sm_off64, vabits64;
SecMap* sm;
=20
PROF_EVENT(210, "mc_STOREV8");
@@ -2698,15 +2695,13 @@
// XXX: this slow case seems to be marginally faster than the fast ca=
se!
// Investigate further.
if (VG_DEBUG_MEMORY >=3D 2) {
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
return;
}
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
PROF_EVENT(211, "mc_STOREV8-slow1");
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
return;
}
=20
@@ -2729,12 +2724,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(212, "mc_STOREV8-slow2");
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(213, "mc_STOREV8-slow3");
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 8, vbytes, isBigEndian );
}
}
=20
@@ -2802,23 +2797,21 @@
=20
=20
static inline __attribute__((always_inline))
-void mc_STOREV4 ( Addr aA, UWord vbytes, Bool isBigEndian )
+void mc_STOREV4 ( Addr a, UWord vbytes, Bool isBigEndian )
{
- UWord a, sm_off, vabits32;
+ UWord sm_off, vabits32;
SecMap* sm;
=20
PROF_EVENT(230, "mc_STOREV4");
=20
if (VG_DEBUG_MEMORY >=3D 2) {
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
return;
}
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,4) )) {
PROF_EVENT(231, "mc_STOREV4-slow1");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
return;
}
=20
@@ -2839,7 +2832,7 @@
} else {
// not readable/writable, or distinguished and changing state
PROF_EVENT(232, "mc_STOREV4-slow2");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
}
} else if (V_BITS32_INVALID =3D=3D vbytes) {
if (vabits32 =3D=3D (UInt)VA_BITS32_WRITABLE) {
@@ -2849,12 +2842,12 @@
} else {
// not readable/writable, or distinguished and changing state
PROF_EVENT(233, "mc_STOREV4-slow3");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
}
} else {
// Partially defined word
PROF_EVENT(234, "mc_STOREV4-slow4");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
}
//----------------------------------------------------------------------=
-----
#else
@@ -2873,12 +2866,12 @@
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(232, "mc_STOREV4-slow2");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(233, "mc_STOREV4-slow3");
- mc_STOREVn_slow( aA, 4, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 4, (ULong)vbytes, isBigEndian );
}
#endif
//----------------------------------------------------------------------=
-----
@@ -2899,21 +2892,19 @@
/* ------------------------ Size =3D 2 ------------------------ */
=20
static inline __attribute__((always_inline))
-UWord mc_LOADV2 ( Addr aA, Bool isBigEndian )
+UWord mc_LOADV2 ( Addr a, Bool isBigEndian )
{
- UWord a, sm_off, vabits32;
+ UWord sm_off, vabits32;
SecMap* sm;
=20
PROF_EVENT(240, "mc_LOADV2");
=20
if (VG_DEBUG_MEMORY >=3D 2)
- return (UWord)mc_LOADVn_slow( aA, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,2) )) {
PROF_EVENT(241, "mc_LOADV2-slow1");
- return (UWord)mc_LOADVn_slow( aA, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -2930,7 +2921,7 @@
// XXX: could extract the vabits16 and check it first... (see
// LOADV1)... depends how common this case is.
PROF_EVENT(242, "mc_LOADV2-slow2");
- return (UWord)mc_LOADVn_slow( aA, 2, isBigEndian );
+ return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
}
}
=20
@@ -2947,23 +2938,21 @@
=20
=20
static inline __attribute__((always_inline))
-void mc_STOREV2 ( Addr aA, UWord vbytes, Bool isBigEndian )
+void mc_STOREV2 ( Addr a, UWord vbytes, Bool isBigEndian )
{
- UWord a, sm_off, vabits32;
+ UWord sm_off, vabits32;
SecMap* sm;
=20
PROF_EVENT(250, "mc_STOREV2");
=20
if (VG_DEBUG_MEMORY >=3D 2) {
- mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
return;
}
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,2) )) {
PROF_EVENT(251, "mc_STOREV2-slow1");
- mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
return;
}
=20
@@ -2979,22 +2968,20 @@
// Convert full V-bits in register to compact 2-bit form.
// XXX: is it best to check for VALID before INVALID?
if (V_BITS16_VALID =3D=3D vbytes) {
- //mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
insert_vabits16_into_vabits32( a, VA_BITS16_READABLE,
&(sm->vabits32[sm_off]) );
} else if (V_BITS16_INVALID =3D=3D vbytes) {
- //mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
insert_vabits16_into_vabits32( a, VA_BITS16_WRITABLE,
&(sm->vabits32[sm_off]) );
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(252, "mc_STOREV2-slow2");
- mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
}
} else {
/* Slow but general case. */
PROF_EVENT(253, "mc_STOREV2-slow3");
- mc_STOREVn_slow( aA, 2, (ULong)vbytes, isBigEndian );
+ mc_STOREVn_slow( a, 2, (ULong)vbytes, isBigEndian );
}
}
=20
@@ -3014,22 +3001,20 @@
/* Note: endianness is irrelevant for size =3D=3D 1 */
=20
VG_REGPARM(1)
-UWord MC_(helperc_LOADV1) ( Addr aA )
+UWord MC_(helperc_LOADV1) ( Addr a )
{
- UWord a, sm_off, vabits32;
+ UWord sm_off, vabits32;
SecMap* sm;
=20
PROF_EVENT(260, "helperc_LOADV1");
=20
# if VG_DEBUG_MEMORY >=3D 2
- return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
# endif
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
PROF_EVENT(261, "helperc_LOADV1-slow1");
- return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
}
=20
sm =3D get_secmap_readable_low(a);
@@ -3050,30 +3035,28 @@
else {
/* Slow but general case. */
PROF_EVENT(262, "helperc_LOADV1-slow2");
- return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
+ return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
}
}
}
=20
=20
VG_REGPARM(2)
-void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
+void MC_(helperc_STOREV1) ( Addr a, UWord vbyte )
{
- UWord a, sm_off, vabits32;
+ UWord sm_off, vabits32;
SecMap* sm;
=20
PROF_EVENT(270, "helperc_STOREV1");
=20
# if VG_DEBUG_MEMORY >=3D 2
- mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
return;
# endif
=20
- a =3D (UWord)aA;
-
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
PROF_EVENT(271, "helperc_STOREV1-slow1");
- mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
+ mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
return;
}
=20
@@ -4124,6 +4107,8 @@
=20
tl_assert( mc_expensive_sanity_check() );
=20
+ // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
+ tl_assert(sizeof(UWord) =3D=3D sizeof(Addr));
}
=20
VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
|