|
From: <sv...@va...> - 2006-01-11 21:56:24
|
Author: njn
Date: 2006-01-11 21:56:19 +0000 (Wed, 11 Jan 2006)
New Revision: 5516
Log:
Reinstated the SET_VBITS/GET_VBITS client requests.
Modified:
branches/COMPVBITS/memcheck/docs/mc-manual.xml
branches/COMPVBITS/memcheck/mc_main.c
branches/COMPVBITS/memcheck/memcheck.h
Modified: branches/COMPVBITS/memcheck/docs/mc-manual.xml
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/docs/mc-manual.xml 2006-01-11 21:54:19 UT=
C (rev 5515)
+++ branches/COMPVBITS/memcheck/docs/mc-manual.xml 2006-01-11 21:56:19 UT=
C (rev 5516)
@@ -1074,8 +1074,7 @@
you to get and set the V (validity) bits for an address
range. You should probably only set V bits that you have got
with <computeroutput>VALGRIND_GET_VBITS</computeroutput>.
- Only for those who really know what they are doing. Note: currently
- disabled in Valgrind 3.1.0.</para>
+ Only for those who really know what they are doing.</para>
</listitem>
=20
</itemizedlist>
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2006-01-11 21:54:19 UTC (rev 55=
15)
+++ branches/COMPVBITS/memcheck/mc_main.c 2006-01-11 21:56:19 UTC (rev 55=
16)
@@ -521,7 +521,7 @@
return 0xf & vabits32; // mask out the rest
}
=20
-// Note that these two are only used in slow cases. The fast cases do
+// Note that these four are only used in slow cases. The fast cases do
// clever things like combine the auxmap check (in
// get_secmap_{read,writ}able) with alignment checks.
=20
@@ -542,7 +542,57 @@
return extract_vabits8_from_vabits32(a, vabits32);
}
=20
+// Forward declarations
+static UWord get_sec_vbits8(Addr a);
+static void set_sec_vbits8(Addr a, UWord vbits8);
=20
+// Returns False if there was an addressability error.
+static inline
+Bool set_vbits8 ( Addr a, UChar vbits8 )
+{
+ Bool ok =3D True;
+ UChar vabits8 =3D get_vabits8(a);
+ if ( VA_BITS8_NOACCESS !=3D vabits8 ) {
+ // Addressable. Convert in-register format to in-memory format.
+ // Also remove any existing sec V bit entry for the byte if no
+ // longer necessary.
+ if ( V_BITS8_VALID =3D=3D vbits8 ) { vabits8 =3D VA_BITS8_R=
EADABLE; }
+ else if ( V_BITS8_INVALID =3D=3D vbits8 ) { vabits8 =3D VA_BITS8_W=
RITABLE; }
+ else { vabits8 =3D VA_BITS8_OTHER=
;
+ set_sec_vbits8(a, vbits8);=
}
+ set_vabits8(a, vabits8);
+
+ } else {
+ // Unaddressable! Do nothing -- when writing to unaddressable
+ // memory it acts as a black hole, and the V bits can never be see=
n
+ // again. So we don't have to write them at all.
+ ok =3D False;
+ }
+ return ok;
+}
+
+// Returns False if there was an addressability error. In that case, we=
put
+// all defined bits into vbits8.
+static inline
+Bool get_vbits8 ( Addr a, UChar* vbits8 )
+{
+ Bool ok =3D True;
+ UChar vabits8 =3D get_vabits8(a);
+
+ // Convert the in-memory format to in-register format.
+ if ( VA_BITS8_READABLE =3D=3D vabits8 ) { *vbits8 =3D V_BITS8_VA=
LID; }
+ else if ( VA_BITS8_WRITABLE =3D=3D vabits8 ) { *vbits8 =3D V_BITS8_IN=
VALID; }
+ else if ( VA_BITS8_NOACCESS =3D=3D vabits8 ) {
+ *vbits8 =3D V_BITS8_VALID; // Make V bits defined!
+ ok =3D False;
+ } else {
+ tl_assert( VA_BITS8_OTHER =3D=3D vabits8 );
+ *vbits8 =3D get_sec_vbits8(a);
+ }
+ return ok;
+}
+
+
/* --------------- Secondary V bit table ------------ */
=20
// This table holds the full V bit pattern for partially-defined bytes
@@ -768,7 +818,8 @@
SizeT n_addrs_bad =3D 0;
Addr ai;
Bool partial_load_exemption_applies;
- UWord vbits8, vabits8;
+ UChar vbits8;
+ Bool ok;
=20
PROF_EVENT(30, "mc_LOADVn_slow");
tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
@@ -776,17 +827,8 @@
for (i =3D szB-1; i >=3D 0; i--) {
PROF_EVENT(31, "mc_LOADVn_slow(loop)");
ai =3D a+byte_offset_w(szB,bigendian,i);
- vabits8 =3D get_vabits8(ai);
- // Convert the in-memory format to in-register format.
- if ( VA_BITS8_READABLE =3D=3D vabits8 ) { vbits8 =3D V_BITS8_=
VALID; }
- else if ( VA_BITS8_WRITABLE =3D=3D vabits8 ) { vbits8 =3D V_BITS8_=
INVALID; }
- else if ( VA_BITS8_NOACCESS =3D=3D vabits8 ) {
- vbits8 =3D V_BITS8_VALID; // Make V bits defined!
- n_addrs_bad++;
- } else {
- tl_assert( VA_BITS8_OTHER =3D=3D vabits8 );
- vbits8 =3D get_sec_vbits8(ai);
- }
+ ok =3D get_vbits8(ai, &vbits8);
+ if (!ok) n_addrs_bad++;
vbits64 <<=3D 8;=20
vbits64 |=3D vbits8;
}
@@ -820,8 +862,9 @@
void mc_STOREVn_slow ( Addr a, SizeT szB, ULong vbytes, Bool bigendian )
{
SizeT i, n_addrs_bad =3D 0;
- UWord vbits8, vabits8;
+ UChar vbits8;
Addr ai;
+ Bool ok;
=20
PROF_EVENT(35, "mc_STOREVn_slow");
tl_assert(szB =3D=3D 8 || szB =3D=3D 4 || szB =3D=3D 2 || szB =3D=3D =
1);
@@ -831,25 +874,10 @@
location. */
for (i =3D 0; i < szB; i++) {
PROF_EVENT(36, "mc_STOREVn_slow(loop)");
- ai =3D a+byte_offset_w(szB,bigendian,i);
- vbits8 =3D vbytes & 0xff;
- vabits8 =3D get_vabits8(ai);
- if ( VA_BITS8_NOACCESS !=3D vabits8 ) {
- // Addressable. Convert in-register format to in-memory format=
.
- // Also remove any existing sec V bit entry for the byte if no
- // longer necessary.
- if ( V_BITS8_VALID =3D=3D vbits8 ) { vabits8 =3D VA_BITS=
8_READABLE; }
- else if ( V_BITS8_INVALID =3D=3D vbits8 ) { vabits8 =3D VA_BITS=
8_WRITABLE; }
- else { vabits8 =3D VA_BITS8_OT=
HER;
- set_sec_vbits8(ai, vbit=
s8); }
- set_vabits8(ai, vabits8);
-
- } else {
- // Unaddressable! Do nothing -- when writing to unaddressable
- // memory it acts as a black hole, and the V bits can never be =
seen
- // again. So we don't have to write them at all.
- n_addrs_bad++;
- }
+ ai =3D a+byte_offset_w(szB,bigendian,i);
+ vbits8 =3D vbytes & 0xff;
+ ok =3D set_vbits8(ai, vbits8);
+ if (!ok) n_addrs_bad++;
vbytes >>=3D 8;
}
=20
@@ -859,52 +887,6 @@
}
=20
=20
-//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses.=
*/
-//zz=20
-//zz static INLINE UChar get_abits4_ALIGNED ( Addr a )
-//zz {
-//zz SecMap* sm;
-//zz UInt sm_off;
-//zz UChar abits8;
-//zz PROF_EVENT(24);
-//zz # ifdef VG_DEBUG_MEMORY
-//zz tl_assert(VG_IS_4_ALIGNED(a));
-//zz # endif
-//zz sm =3D primary_map[PM_IDX(a)];
-//zz sm_off =3D SM_OFF(a);
-//zz abits8 =3D sm->abits[sm_off >> 3];
-//zz abits8 >>=3D (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
-//zz abits8 &=3D 0x0F;
-//zz return abits8;
-//zz }
-//zz=20
-//zz static UInt INLINE get_vbytes4_ALIGNED ( Addr a )
-//zz {
-//zz SecMap* sm =3D primary_map[PM_IDX(a)];
-//zz UInt sm_off =3D SM_OFF(a);
-//zz PROF_EVENT(25);
-//zz # ifdef VG_DEBUG_MEMORY
-//zz tl_assert(VG_IS_4_ALIGNED(a));
-//zz # endif
-//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
-//zz }
-//zz=20
-//zz=20
-//zz static void INLINE set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
-//zz {
-//zz SecMap* sm;
-//zz UInt sm_off;
-//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
-//zz sm =3D primary_map[PM_IDX(a)];
-//zz sm_off =3D SM_OFF(a);
-//zz PROF_EVENT(23);
-//zz # ifdef VG_DEBUG_MEMORY
-//zz tl_assert(VG_IS_4_ALIGNED(a));
-//zz # endif
-//zz ((UInt*)(sm->vbyte))[sm_off >> 2] =3D vbytes;
-//zz }
-
-
/*------------------------------------------------------------*/
/*--- Setting permissions over address ranges. ---*/
/*------------------------------------------------------------*/
@@ -3191,79 +3173,72 @@
}
=20
=20
-//zz /*------------------------------------------------------------*/
-//zz /*--- Metadata get/set functions, for client requests. ---*/
-//zz /*------------------------------------------------------------*/
-//zz=20
-//zz /* Copy Vbits for src into vbits. Returns: 1 =3D=3D OK, 2 =3D=3D al=
ignment
-//zz error, 3 =3D=3D addressing error. */
-//zz static Int mc_get_or_set_vbits_for_client (=20
-//zz ThreadId tid,
-//zz Addr dataV,=20
-//zz Addr vbitsV,=20
-//zz SizeT size,=20
-//zz Bool setting /* True <=3D> set vbits, False <=3D> get vbits */=20
-//zz )
-//zz {
-//zz Bool addressibleD =3D True;
-//zz Bool addressibleV =3D True;
-//zz UInt* data =3D (UInt*)dataV;
-//zz UInt* vbits =3D (UInt*)vbitsV;
-//zz SizeT szW =3D size / 4; /* sigh */
-//zz SizeT i;
-//zz UInt* dataP =3D NULL; /* bogus init to keep gcc happy */
-//zz UInt* vbitsP =3D NULL; /* ditto */
-//zz=20
-//zz /* Check alignment of args. */
-//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
-//zz return 2;
-//zz if ((size & 3) !=3D 0)
-//zz return 2;
-//zz =20
-//zz /* Check that arrays are addressible. */
-//zz for (i =3D 0; i < szW; i++) {
-//zz dataP =3D &data[i];
-//zz vbitsP =3D &vbits[i];
-//zz if (get_abits4_ALIGNED((Addr)dataP) !=3D V_NIBBLE_VALID) {
-//zz addressibleD =3D False;
-//zz break;
-//zz }
-//zz if (get_abits4_ALIGNED((Addr)vbitsP) !=3D V_NIBBLE_VALID) {
-//zz addressibleV =3D False;
-//zz break;
-//zz }
-//zz }
-//zz if (!addressibleD) {
-//zz mc_record_address_error( tid, (Addr)dataP, 4,=20
-//zz setting ? True : False );
-//zz return 3;
-//zz }
-//zz if (!addressibleV) {
-//zz mc_record_address_error( tid, (Addr)vbitsP, 4,=20
-//zz setting ? False : True );
-//zz return 3;
-//zz }
-//zz =20
-//zz /* Do the copy */
-//zz if (setting) {
-//zz /* setting */
-//zz for (i =3D 0; i < szW; i++) {
-//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) !=3D V_WORD_VAL=
ID)
-//zz mc_record_value_error(tid, 4);
-//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
-//zz }
-//zz } else {
-//zz /* getting */
-//zz for (i =3D 0; i < szW; i++) {
-//zz vbits[i] =3D get_vbytes4_ALIGNED( (Addr)&data[i] );
-//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], V_WORD_VALID );
-//zz }
-//zz }
-//zz=20
-//zz return 1;
-//zz }
+/*------------------------------------------------------------*/
+/*--- Metadata get/set functions, for client requests. ---*/
+/*------------------------------------------------------------*/
=20
+// Nb: this expands the V+A bits out into register-form V bits, even tho=
ugh
+// they're in memory. This is for backward compatibility, and because i=
t's
+// probably what the user wants.
=20
+/* Copy Vbits from/to address 'a'. Returns: 1 =3D=3D OK, 2 =3D=3D alignm=
ent
+ error [no longer used], 3 =3D=3D addressing error. */
+static Int mc_get_or_set_vbits_for_client (=20
+ ThreadId tid,
+ Addr a,=20
+ Addr vbits,=20
+ SizeT szB,=20
+ Bool setting /* True <=3D> set vbits, False <=3D> get vbits */=20
+)
+{
+ SizeT i;
+ Bool ok;
+ UChar vbits8;
+
+ /* Check that arrays are addressible before doing any getting/setting=
. */
+ for (i =3D 0; i < szB; i++) {
+ if (VA_BITS8_NOACCESS =3D=3D get_vabits8(a + i)) {
+ mc_record_address_error( tid, a + i, 1, setting ? True : Fa=
lse );
+ return 3;
+ }
+ if (VA_BITS8_NOACCESS =3D=3D get_vabits8(vbits + i)) {
+ mc_record_address_error( tid, vbits + i, 1, setting ? False : T=
rue );
+ return 3;
+ }
+ }
+
+ /* Do the copy */
+ if (setting) {
+
+ // It's actually a tool ClientReq, but Vg_CoreClientReq is the clo=
sest
+ // thing we have... XXX: actually, mc_check_is_readable() can't ha=
ndle
+ // that...
+ mc_check_is_readable(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
+ vbits, szB);
+ =20
+ /* setting */
+ for (i =3D 0; i < szB; i++) {
+ ok =3D set_vbits8(a + i, ((UChar*)vbits)[i]);
+ tl_assert(ok);
+ }
+ } else {
+ /* getting */
+ for (i =3D 0; i < szB; i++) {
+ ok =3D get_vbits8(a + i, &vbits8);
+ tl_assert(ok);
+// XXX: used to do this, but it's a pain
+// if (V_BITS8_VALID !=3D vbits8)
+// mc_record_value_error(tid, 1);
+ ((UChar*)vbits)[i] =3D vbits8;
+ }
+ // The bytes in vbits[] have now been set, so mark them as such.
+ MC_(make_readable)(vbits, szB);
+ }
+
+ return 1;
+}
+
+
/*------------------------------------------------------------*/
/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
/*------------------------------------------------------------*/
@@ -3745,22 +3720,22 @@
}
break;
=20
-//zz case VG_USERREQ__GET_VBITS:
-//zz /* Returns: 1 =3D=3D OK, 2 =3D=3D alignment error, 3 =3D=3D=
addressing
-//zz error. */
-//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg=
[3] ); */
-//zz *ret =3D mc_get_or_set_vbits_for_client
-//zz ( tid, arg[1], arg[2], arg[3], False /* get them=
*/ );
-//zz break;
-//zz=20
-//zz case VG_USERREQ__SET_VBITS:
-//zz /* Returns: 1 =3D=3D OK, 2 =3D=3D alignment error, 3 =3D=3D=
addressing
-//zz error. */
-//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg=
[3] ); */
-//zz *ret =3D mc_get_or_set_vbits_for_client
-//zz ( tid, arg[1], arg[2], arg[3], True /* set them =
*/ );
-//zz break;
+ case VG_USERREQ__GET_VBITS:
+ /* Returns: 1 =3D=3D OK, 2 =3D=3D alignment error, 3 =3D=3D add=
ressing
+ error. */
+ /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] )=
; */
+ *ret =3D mc_get_or_set_vbits_for_client
+ ( tid, arg[1], arg[2], arg[3], False /* get them */ )=
;
+ break;
=20
+ case VG_USERREQ__SET_VBITS:
+ /* Returns: 1 =3D=3D OK, 2 =3D=3D alignment error, 3 =3D=3D add=
ressing
+ error. */
+ /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] )=
; */
+ *ret =3D mc_get_or_set_vbits_for_client
+ ( tid, arg[1], arg[2], arg[3], True /* set them */ );
+ break;
+
case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
UWord** argp =3D (UWord**)arg;
// MC_(bytes_leaked) et al were set by the last leak check (or =
zero
@@ -4089,3 +4064,4 @@
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
+
Modified: branches/COMPVBITS/memcheck/memcheck.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/memcheck.h 2006-01-11 21:54:19 UTC (rev 5=
515)
+++ branches/COMPVBITS/memcheck/memcheck.h 2006-01-11 21:56:19 UTC (rev 5=
516)
@@ -211,43 +211,41 @@
}
=20
=20
-/* Get in zzvbits the validity data for the zznbytes starting at
- zzsrc. Return values:
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+ into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
- 2 if zzsrc/zzvbits arrays are not aligned 0 % 4, or
- zznbytes is not 0 % 4.
+ 2 [previously indicated unaligned arrays; these are now allowed=
]
3 if any parts of zzsrc/zzvbits are not addressible.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
-#define VALGRIND_GET_VBITS(zzsrc,zzvbits,zznbytes) \
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(__extension__({unsigned int _qzz_res; \
- char* czzsrc =3D (char*)zzsrc; \
+ char* czza =3D (char*)zza; \
char* czzvbits =3D (char*)zzvbits; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__GET_VBITS, \
- czzsrc, czzvbits, zznbytes,0 ); \
+ czza, czzvbits, zznbytes,0 ); \
_qzz_res; \
}))
=20
-/* Apply the validity data in zzvbits to the zznbytes starting at
- zzdst. Return values:
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+ from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
- 2 if zzdst/zzvbits arrays are not aligned 0 % 4, or
- zznbytes is not 0 % 4.
- 3 if any parts of zzdst/zzvbits are not addressible.
+ 2 [previously indicated unaligned arrays; these are now allowed=
]
+ 3 if any parts of zza/zzvbits are not addressible.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
-#define VALGRIND_SET_VBITS(zzdst,zzvbits,zznbytes) \
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(__extension__({unsigned int _qzz_res; \
- char* czzdst =3D (char*)zzdst; \
+ char* czza =3D (char*)zza; \
char* czzvbits =3D (char*)zzvbits; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__SET_VBITS, \
- czzdst, czzvbits, zznbytes,0 ); \
+ czza, czzvbits, zznbytes,0 ); \
_qzz_res; \
}))
=20
|