|
From: <sv...@va...> - 2005-12-06 22:19:13
|
Author: njn
Date: 2005-12-06 22:19:08 +0000 (Tue, 06 Dec 2005)
New Revision: 5302
Log:
Fill in helperc_MAKE_STACK_UNINIT. In theory it should now work on AMD64=
,
though I haven't tried it.
Modified:
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-06 21:47:38 UTC (rev 53=
01)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-06 22:19:08 UTC (rev 53=
02)
@@ -1117,14 +1117,40 @@
);
=20
=20
+/* The AMD64 ABI says:
+
+ "The 128-byte area beyond the location pointed to by %rsp is consider=
ed
+ to be reserved and shall not be modified by signal or interrupt
+ handlers. Therefore, functions may use this area for temporary data
+ that is not needed across function calls. In particular, leaf funct=
ions
+ may use this area for their entire stack frame, rather than adjustin=
g
+ the stack pointer in the prologue and epilogue. This area is known =
as
+ red zone [sic]."
+
+ So after any call or return we need to mark this redzone as containin=
g
+ undefined values.
+
+ Consider this: we're in function f. f calls g. g moves rsp down
+ modestly (say 16 bytes) and writes stuff all over the red zone, makin=
g it
+ defined. g returns. f is buggy and reads from parts of the red zone
+ that it didn't write on. But because g filled that area in, f is goi=
ng
+ to be picking up defined V bits and so any errors from reading bits o=
f
+ the red zone it didn't write, will be missed. The only solution I co=
uld
+ think of was to make the red zone undefined when g returns to f.
+
+ This is in accordance with the ABI, which makes it clear the redzone
+ is volatile across function calls.
+
+ The problem occurs the other way round too: f could fill the RZ up
+ with defined values and g could mistakenly read them. So the RZ
+ also needs to be nuked on function calls.
+*/
void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
{
tl_assert(sizeof(UWord) =3D=3D sizeof(SizeT));
if (0)
VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
=20
-tl_assert(0); // XXX
-
# if 0
/* Really slow version */
mc_make_writable(base, len);
@@ -1164,20 +1190,13 @@
* the address range falls entirely with a single
secondary map
* the SM is modifiable
- If all those conditions hold, just update the V bits
- by writing directly on the v-bit array. We don't care
- about A bits; if the address range is marked invalid,
- any attempt to access it will elicit an addressing error,
- and that's good enough.
+ If all those conditions hold, just update the V+A bits
+ by writing directly into the vabits array. =20
*/
-// XXX: Real version that I commented out -- njn
-tl_assert(0);
-#if 0
if (EXPECTED_TAKEN( len =3D=3D 128
&& VG_IS_8_ALIGNED(base)=20
)) {
- /* Now we know the address range is suitably sized and
- aligned. */
+ /* Now we know the address range is suitably sized and aligned. */
UWord a_lo =3D (UWord)base;
UWord a_hi =3D (UWord)(base + 127);
UWord sec_lo =3D a_lo >> 16;
@@ -1194,24 +1213,24 @@
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
/* And finally, now we know that the secondary in question
is modifiable. */
- UWord v_off =3D a_lo & 0xFFFF;
- ULong* p =3D (ULong*)(&sm->vbyte[v_off]);
- p[ 0] =3D VGM_WORD64_INVALID;
- p[ 1] =3D VGM_WORD64_INVALID;
- p[ 2] =3D VGM_WORD64_INVALID;
- p[ 3] =3D VGM_WORD64_INVALID;
- p[ 4] =3D VGM_WORD64_INVALID;
- p[ 5] =3D VGM_WORD64_INVALID;
- p[ 6] =3D VGM_WORD64_INVALID;
- p[ 7] =3D VGM_WORD64_INVALID;
- p[ 8] =3D VGM_WORD64_INVALID;
- p[ 9] =3D VGM_WORD64_INVALID;
- p[10] =3D VGM_WORD64_INVALID;
- p[11] =3D VGM_WORD64_INVALID;
- p[12] =3D VGM_WORD64_INVALID;
- p[13] =3D VGM_WORD64_INVALID;
- p[14] =3D VGM_WORD64_INVALID;
- p[15] =3D VGM_WORD64_INVALID;
+ UWord v_off =3D SM_OFF(a_lo);
+ UShort* p =3D (UShort*)(&sm->vabits32[v_off]);
+ p[ 0] =3D MC_BITS64_WRITABLE;
+ p[ 1] =3D MC_BITS64_WRITABLE;
+ p[ 2] =3D MC_BITS64_WRITABLE;
+ p[ 3] =3D MC_BITS64_WRITABLE;
+ p[ 4] =3D MC_BITS64_WRITABLE;
+ p[ 5] =3D MC_BITS64_WRITABLE;
+ p[ 6] =3D MC_BITS64_WRITABLE;
+ p[ 7] =3D MC_BITS64_WRITABLE;
+ p[ 8] =3D MC_BITS64_WRITABLE;
+ p[ 9] =3D MC_BITS64_WRITABLE;
+ p[10] =3D MC_BITS64_WRITABLE;
+ p[11] =3D MC_BITS64_WRITABLE;
+ p[12] =3D MC_BITS64_WRITABLE;
+ p[13] =3D MC_BITS64_WRITABLE;
+ p[14] =3D MC_BITS64_WRITABLE;
+ p[15] =3D MC_BITS64_WRITABLE;
return;
}
}
@@ -1219,7 +1238,6 @@
=20
/* else fall into slow case */
mc_make_writable(base, len);
-#endif
}
=20
=20
|