|
From: <sv...@va...> - 2005-12-21 01:29:52
|
Author: njn
Date: 2005-12-21 01:29:37 +0000 (Wed, 21 Dec 2005)
New Revision: 5395
Log:
Make it compile again.
Modified:
branches/COMPVBITS/include/pub_tool_libcbase.h
branches/COMPVBITS/memcheck/mc_main.c
Modified: branches/COMPVBITS/include/pub_tool_libcbase.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/include/pub_tool_libcbase.h 2005-12-20 23:02:43 UT=
C (rev 5394)
+++ branches/COMPVBITS/include/pub_tool_libcbase.h 2005-12-21 01:29:37 UT=
C (rev 5395)
@@ -90,6 +90,7 @@
------------------------------------------------------------------ */
=20
// Check if an address/whatever is aligned
+#define VG_IS_2_ALIGNED(aaa_p) (0 =3D=3D (((Addr)(aaa_p)) & ((Addr)0x=
1)))
#define VG_IS_4_ALIGNED(aaa_p) (0 =3D=3D (((Addr)(aaa_p)) & ((Addr)0x=
3)))
#define VG_IS_8_ALIGNED(aaa_p) (0 =3D=3D (((Addr)(aaa_p)) & ((Addr)0x=
7)))
#define VG_IS_16_ALIGNED(aaa_p) (0 =3D=3D (((Addr)(aaa_p)) & ((Addr)0x=
f)))
Modified: branches/COMPVBITS/memcheck/mc_main.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- branches/COMPVBITS/memcheck/mc_main.c 2005-12-20 23:02:43 UTC (rev 53=
94)
+++ branches/COMPVBITS/memcheck/mc_main.c 2005-12-21 01:29:37 UTC (rev 53=
95)
@@ -34,6 +34,9 @@
=20
test whether it would be faster, for LOADV4, to check
only for 8-byte validity on the fast path
+
+ (That could only be faster for 64-bit platforms, right? =20
+ --njn Dec-20-2005)
*/
=20
#include "pub_tool_basics.h"
@@ -575,7 +578,9 @@
static inline
void insert_vabits16_into_vabits32 ( Addr a, UChar vabits16, UChar* vabi=
ts32 )
{
- UInt shift =3D (a & 2) << 1; // shift by 0 or 4
+ UInt shift;
+ tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
+ shift =3D (a & 2) << 1; // shift by 0 or 4
*vabits32 &=3D ~(0xf << shift); // mask out the four old bits
*vabits32 |=3D (vabits16 << shift); // mask in the four new bits
}
@@ -583,11 +588,21 @@
static inline
UChar extract_vabits8_from_vabits32 ( Addr a, UChar vabits32 )
{
- UInt shift =3D (a & 3) << 1; // use (a % 4) for the offset
- vabits32 >>=3D shift; // shift the two bits to the botto=
m
- return 0x3 & vabits32; // mask out the rest
+ UInt shift =3D (a & 3) << 1; // shift by 0, 2, 4, or 6
+ vabits32 >>=3D shift; // shift the two bits to the bo=
ttom
+ return 0x3 & vabits32; // mask out the rest
}
=20
+static inline
+UChar extract_vabits16_from_vabits32 ( Addr a, UChar vabits32 )
+{
+ UInt shift;
+ tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
+ shift =3D (a & 2) << 1; // shift by 0 or 4
+ vabits32 >>=3D shift; // shift the four bits to the b=
ottom
+ return 0xf & vabits32; // mask out the rest
+}
+
// Note that these two are only used in slow cases. The fast cases do
// clever things like combine the auxmap check (in
// get_secmap_{read,writ}able) with alignment checks.
|