|
From: <sv...@va...> - 2005-12-19 23:44:07
|
Author: njn
Date: 2005-12-19 23:43:58 +0000 (Mon, 19 Dec 2005)
New Revision: 5392
Log:
Minor clean-ups.
Modified:
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-19 23:28:23 UTC (rev 53=
91)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-19 23:43:58 UTC (rev 53=
92)
@@ -53,8 +53,6 @@
#include "mc_include.h"
#include "memcheck.h" /* for client requests */
=20
-// XXX: introduce PM_OFF macro
-
#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
=20
@@ -1119,7 +1117,6 @@
}
=20
=20
-// XXX: can surely merge this somehow with make_aligned_word32_writable
static __inline__
void make_aligned_word32_noaccess ( Addr a )
{
@@ -2654,6 +2651,8 @@
sm_off64 =3D SM_OFF_64(a);
vabits64 =3D ((UShort*)(sm->vabits32))[sm_off64];
=20
+ // Handle common case quickly: a is suitably aligned, is mapped, and
+ // addressible.
// Convert V bits from compact memory form to expanded register form.
if (EXPECTED_TAKEN(vabits64 =3D=3D VA_BITS64_READABLE)) {
return V_BITS64_VALID;
@@ -2710,7 +2709,6 @@
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
- // XXX: is it best to check for VALID before INVALID?
if (V_BITS64_VALID =3D=3D vbytes) {
((UShort*)(sm->vabits32))[sm_off64] =3D (UShort)VA_BITS64_READA=
BLE;
} else if (V_BITS64_INVALID =3D=3D vbytes) {
@@ -2761,10 +2759,9 @@
sm_off =3D SM_OFF(a);
vabits32 =3D sm->vabits32[sm_off];
=20
- // XXX: copy this comment to all the LOADV* functions.
- // Handle common case quickly: a is suitably aligned, is mapped, and =
is
- // addressible.
- // Convert V bits from compact memory form to expanded register form
+ // Handle common case quickly: a is suitably aligned, is mapped, and =
the
+ // entire word32 it lives in is addressible.
+ // Convert V bits from compact memory form to expanded register form.
// For 64-bit platforms, set the high 32 bits of retval to 1 (undefin=
ed).
// Almost certainly not necessary, but be paranoid.
if (EXPECTED_TAKEN(vabits32 =3D=3D VA_BITS32_READABLE)) {
@@ -2852,7 +2849,6 @@
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
- // XXX: is it best to check for VALID before INVALID?
if (V_BITS32_VALID =3D=3D vbytes) {
sm->vabits32[sm_off] =3D VA_BITS32_READABLE;
} else if (V_BITS32_INVALID =3D=3D vbytes) {
@@ -2904,17 +2900,23 @@
sm =3D get_secmap_readable_low(a);
sm_off =3D SM_OFF(a);
vabits32 =3D sm->vabits32[sm_off];
+ // Handle common case quickly: a is suitably aligned, is mapped, and =
is
+ // addressible.
// Convert V bits from compact memory form to expanded register form
- // XXX: checking READABLE before WRITABLE a good idea?
- // XXX: set the high 16/48 bits of retval to 1?
- if (EXPECTED_TAKEN(vabits32 =3D=3D VA_BITS32_READABLE)) {
- return V_BITS16_VALID;
- } else if (EXPECTED_TAKEN(vabits32 =3D=3D VA_BITS32_WRITABLE)) {
- return V_BITS16_INVALID;
- } else {
- /* Slow case: the 4 (yes, 4) bytes are not all-readable or all-wri=
table. */
- PROF_EVENT(242, "mc_LOADV2-slow2");
- return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
+ // XXX: set the high 16/48 bits of retval to 1 for 64-bit paranoia?
+ if (vabits32 =3D=3D VA_BITS32_READABLE) { return V_BITS16_VALID;=
}
+ else if (vabits32 =3D=3D VA_BITS32_WRITABLE) { return V_BITS16_INVALI=
D; }
+ else {
+ // The 4 (yes, 4) bytes are not all-readable or all-writable, chec=
k
+ // the two sub-bytes.
+ UChar vabits16 =3D extract_vabits16_from_vabits32(a, vabits32);
+ if (vabits16 =3D=3D VA_BITS16_READABLE) { return V_BITS16_VAL=
ID; }
+ else if (vabits16 =3D=3D VA_BITS16_WRITABLE) { return V_BITS16_INV=
ALID; }
+ else {
+ /* Slow case: the two bytes are not all-readable or all-writabl=
e. */
+ PROF_EVENT(242, "mc_LOADV2-slow2");
+ return (UWord)mc_LOADVn_slow( a, 2, isBigEndian );
+ }
}
}
=20
@@ -2959,7 +2961,6 @@
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
- // XXX: is it best to check for VALID before INVALID?
if (V_BITS16_VALID =3D=3D vbytes) {
insert_vabits16_into_vabits32( a, VA_BITS16_READABLE,
&(sm->vabits32[sm_off]) );
@@ -2999,14 +3000,14 @@
UWord sm_off, vabits32;
SecMap* sm;
=20
- PROF_EVENT(260, "helperc_LOADV1");
+ PROF_EVENT(260, "mc_LOADV1");
=20
# if VG_DEBUG_MEMORY >=3D 2
return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
# endif
=20
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
- PROF_EVENT(261, "helperc_LOADV1-slow1");
+ PROF_EVENT(261, "mc_LOADV1-slow1");
return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
}
=20
@@ -3014,14 +3015,12 @@
sm_off =3D SM_OFF(a);
vabits32 =3D sm->vabits32[sm_off];
// Convert V bits from compact memory form to expanded register form
- /* Handle common case quickly: a is mapped, and the entire
- word32 it lives in is addressible. */
- // XXX: set the high 24/56 bits of retval to 1?
+ // Handle common case quickly: a is mapped, and the entire
+ // word32 it lives in is addressible.
+ // XXX: set the high 24/56 bits of retval to 1 for 64-bit paranoia?
if (vabits32 =3D=3D VA_BITS32_READABLE) { return V_BITS8_VALID; =
}
else if (vabits32 =3D=3D VA_BITS32_WRITABLE) { return V_BITS8_INVALID=
; }
else {
- // XXX: Could just do the slow but general case if this is uncommo=
n,
- // but removing it slowed perf/bz2 down some...
// The 4 (yes, 4) bytes are not all-readable or all-writable, chec=
k
// the single byte.
UChar vabits8 =3D extract_vabits8_from_vabits32(a, vabits32);
@@ -3029,7 +3028,7 @@
else if (vabits8 =3D=3D VA_BITS8_WRITABLE) { return V_BITS8_INVALI=
D; }
else {
/* Slow case: the byte is not all-readable or all-writable. */
- PROF_EVENT(262, "helperc_LOADV1-slow2");
+ PROF_EVENT(262, "mc_LOADV1-slow2");
return (UWord)mc_LOADVn_slow( a, 1, False/*irrelevant*/ );
}
}
@@ -3042,7 +3041,7 @@
UWord sm_off, vabits32;
SecMap* sm;
=20
- PROF_EVENT(270, "helperc_STOREV1");
+ PROF_EVENT(270, "mc_STOREV1");
=20
# if VG_DEBUG_MEMORY >=3D 2
mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
@@ -3050,7 +3049,7 @@
# endif
=20
if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,1) )) {
- PROF_EVENT(271, "helperc_STOREV1-slow1");
+ PROF_EVENT(271, "mc_STOREV1-slow1");
mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
return;
}
@@ -3065,7 +3064,6 @@
/* Handle common case quickly: a is mapped, the entire word32 it
lives in is addressible. */
// Convert full V-bits in register to compact 2-bit form.
- // XXX: is it best to check for VALID before INVALID?
if (V_BITS8_VALID =3D=3D vbyte) {
insert_vabits8_into_vabits32( a, VA_BITS8_READABLE,
&(sm->vabits32[sm_off]) );
@@ -3074,12 +3072,12 @@
&(sm->vabits32[sm_off]) );
} else {
/* Slow but general case -- writing partially defined bytes. */
- PROF_EVENT(272, "helperc_STOREV1-slow2");
+ PROF_EVENT(272, "mc_STOREV1-slow2");
mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
}
} else {
/* Slow but general case. */
- PROF_EVENT(273, "helperc_STOREV1-slow3");
+ PROF_EVENT(273, "mc_STOREV1-slow3");
mc_STOREVn_slow( a, 1, (ULong)vbyte, False/*irrelevant*/ );
}
}
|