[xtensa-cvscommit] linux/include/asm-xtensa uaccess.h,1.3,1.4
Brought to you by:
zankel
|
From: <joe...@us...> - 2003-07-07 22:42:02
|
Update of /cvsroot/xtensa/linux/include/asm-xtensa
In directory sc8-pr-cvs1:/tmp/cvs-serv22563/include/asm-xtensa
Modified Files:
uaccess.h
Log Message:
Check for user unaligned addresses before trying to access user space.
Index: uaccess.h
===================================================================
RCS file: /cvsroot/xtensa/linux/include/asm-xtensa/uaccess.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -C2 -d -r1.3 -r1.4
*** uaccess.h 12 Jun 2003 23:43:54 -0000 1.3
--- uaccess.h 7 Jul 2003 22:41:59 -0000 1.4
***************
*** 255,277 ****
retval = 0; \
switch (size) { \
! case 1: __put_user_asm(x,ptr,retval,"s8i"); break; \
! case 2: __put_user_asm(x,ptr,retval,"s16i"); break; \
! case 4: __put_user_asm(x,ptr,retval,"s32i"); break; \
! case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \
break; \
! } \
! default: __put_user_bad(); \
} \
} while (0)
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*/
! #define __put_user_asm(x, addr, err, insn) \
__asm__ __volatile__( \
"1: "insn" %1, %2, 0 \n" \
"2: \n" \
--- 255,315 ----
retval = 0; \
switch (size) { \
! case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \
! case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \
! case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \
! case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \
break; \
! } \
! default: __put_user_bad(); \
} \
} while (0)
+
+ /* Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+ #define __check_align_1 ""
+
+ #define __check_align_2 \
+ " _bbci.l %2, 0, 1f \n" \
+ " movi %0, %3 \n" \
+ " _j 2f \n"
+
+ #define __check_align_4 \
+ " _bbsi.l %2, 0, 0f \n" \
+ " _bbci.l %2, 1, 1f \n" \
+ "0: movi %0, %3 \n" \
+ " _j 2f \n"
+
+
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
*/
! #define __put_user_asm(x, addr, err, align, insn) \
__asm__ __volatile__( \
+ __check_align_##align \
"1: "insn" %1, %2, 0 \n" \
"2: \n" \
***************
*** 291,295 ****
:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
-
#define __get_user_nocheck(x,ptr,size) \
({ \
--- 329,332 ----
***************
*** 312,329 ****
extern long __get_user_bad(void);
! #define __get_user_size(x,ptr,size,retval) \
! do { \
! retval = 0; \
! switch (size) { \
! case 1: __get_user_asm(x,ptr,retval,"l8ui"); break; \
! case 2: __get_user_asm(x,ptr,retval,"l16ui"); break; \
! case 4: __get_user_asm(x,ptr,retval,"l32i"); break; \
! case 8: retval = __copy_from_user(&x,ptr,8); break; \
! default: (x) = __get_user_bad(); \
! } \
} while (0)
! #define __get_user_asm(x, addr, err, insn) \
__asm__ __volatile__( \
"1: "insn" %1, %2, 0 \n" \
"2: \n" \
--- 349,372 ----
extern long __get_user_bad(void);
! #define __get_user_size(x,ptr,size,retval) \
! do { \
! retval = 0; \
! switch (size) { \
! case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \
! case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \
! case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \
! case 8: retval = __copy_from_user(&x,ptr,8); break; \
! default: (x) = __get_user_bad(); \
! } \
} while (0)
!
! /*
! * WARNING: If you modify this macro at all, verify that the
! * __check_align_* macros still work.
! */
! #define __get_user_asm(x, addr, err, align, insn) \
__asm__ __volatile__( \
+ __check_align_##align \
"1: "insn" %1, %2, 0 \n" \
"2: \n" \
|