|
From: <sv...@va...> - 2005-12-06 23:19:43
|
Author: njn
Date: 2005-12-06 23:19:39 +0000 (Tue, 06 Dec 2005)
New Revision: 5303
Log:
Got STOREV8 working. Fixed the same bug that was affecting the unused ca=
se
in STOREV4.
Modified:
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-06 22:19:08 UTC (rev 53=
02)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-06 23:19:39 UTC (rev 53=
03)
@@ -491,7 +491,10 @@
{
SecMap* sm =3D get_secmap_writable(a);
UWord sm_off =3D SM_OFF(a);
+// VG_(printf)("se:%p, %d\n", a, sm_off);
+// VG_(printf)("s1:%p (0x%x)\n", &(sm->vabits32[sm_off]), vabits8);
insert_vabit8_into_vabits32( a, vabits8, &(sm->vabits32[sm_off]) );
+// VG_(printf)("s2: 0x%x\n", sm->vabits32[sm_off]);
}
=20
static inline
@@ -1741,19 +1744,16 @@
static inline __attribute__((always_inline))
void mc_STOREV8 ( Addr aA, ULong vbytes, Bool isBigEndian )
{
-// UWord mask, a, sec_no, sm_off64, vabits64;
-// SecMap* sm;
+ UWord mask, a, sec_no, sm_off64, vabits64;
+ SecMap* sm;
=20
PROF_EVENT(210, "mc_STOREV8");
=20
-// XXX: enable
-// if (VG_DEBUG_MEMORY >=3D 2) {
+ if (VG_DEBUG_MEMORY >=3D 2) {
mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
return;
-// }
+ }
=20
-// XXX: not working, I haven't yet worked out why
-#if 0
mask =3D ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
a =3D (UWord)aA;
=20
@@ -1776,47 +1776,18 @@
sm_off64 =3D SM_OFF_64(a);
vabits64 =3D ((UShort*)(sm->vabits32))[sm_off64];
=20
- VG_(printf)("BAR: 0x%lx\n", vabits64);
- VG_(printf)("BAZ: %lx %lx\n", sm->vabits32[SM_OFF(a)], sm->vabits32[S=
M_OFF(a)+1]);
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&=20
- MC_BITS64_NOACCESS !=3D vabits64 ))
+ (MC_BITS64_READABLE =3D=3D vabits64 ||
+ MC_BITS64_WRITABLE =3D=3D vabits64) ))
{
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
// XXX: is it best to check for VALID before INVALID?
if (VGM_WORD64_VALID =3D=3D vbytes) {
- //((UShort*)(sm->vabits32))[sm_off64] =3D (UShort)MC_BITS64_REA=
DABLE;
- OINK(8);
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
+ ((UShort*)(sm->vabits32))[sm_off64] =3D (UShort)MC_BITS64_READA=
BLE;
} else if (VGM_WORD64_INVALID =3D=3D vbytes) {
-// ((UShort*)(sm->vabits32))[sm_off64] =3D (UShort)MC_BITS64_WRI=
TABLE;
-
- VG_(printf)("0: %lx %lx, %llx\n",
- sm->vabits32[SM_OFF(a)+0], sm->vabits32[SM_OFF(a)+1],
- vbytes);
- mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
-// VG_(printf)("FOO: %p, 0x%lx\n", &( ((UShort*)(sm->vabits32))[sm_off=
64] ),
-// vbytes);
-{
- UWord x1, x2, y1, y2;
- VG_(printf)("a: %lx %lx, %p\n",
- sm->vabits32[SM_OFF(a)+0], sm->vabits32[SM_OFF(a)+1],=
a);
-
- x1 =3D sm->vabits32[SM_OFF(a)+0];
- x2 =3D sm->vabits32[SM_OFF(a)+1];
-
- sm->vabits32[SM_OFF(a)+0] =3D MC_BITS32_WRITABLE;
- sm->vabits32[SM_OFF(a)+1] =3D MC_BITS32_WRITABLE;
- VG_(printf)("c: %lx %lx\n",
- sm->vabits32[SM_OFF(a)+0], sm->vabits32[SM_OFF(a)+1])=
;
-
- y1 =3D sm->vabits32[SM_OFF(a)+0];
- y2 =3D sm->vabits32[SM_OFF(a)+1];
-
- tl_assert2(x1=3D=3Dy1 && x2=3D=3Dy2,
- "%lx %lx, %lx %lx\n", x1, y1, x2, y2);
-}
+ ((UShort*)(sm->vabits32))[sm_off64] =3D (UShort)MC_BITS64_WRITA=
BLE;
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(212, "mc_STOREV8-slow2");
@@ -1827,7 +1798,6 @@
PROF_EVENT(213, "mc_STOREV8-slow3");
mc_STOREVn_slow( aA, 8, vbytes, isBigEndian );
}
-#endif
}
=20
VG_REGPARM(1)
@@ -1972,7 +1942,8 @@
//----------------------------------------------------------------------=
-----
#else
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&=20
- MC_BITS32_NOACCESS !=3D vabits32 ))
+ (MC_BITS32_READABLE =3D=3D vabits32 ||
+ MC_BITS32_WRITABLE =3D=3D vabits32) ))
{
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
|