From: David H. <dho...@re...> - 2000-11-23 16:03:03
|
Here's another patch to traps.c. this one uses set_fs() and changes all __copy_user() to copy_from_user() or copy_to_user() to protect against userspace programs trying to be sneaky and access kernel space on unaligned addresses. David Howells ============= diff -uNr -x CVS -x .* linuxsh/arch/sh/kernel/traps.c my-linuxsh/arch/sh/kernel/traps.c --- linuxsh/arch/sh/kernel/traps.c Thu Nov 23 09:23:46 2000 +++ my-linuxsh/arch/sh/kernel/traps.c Thu Nov 23 14:59:59 2000 @@ -133,7 +133,7 @@ *(unsigned long*)dst = 0; #ifdef __LITTLE_ENDIAN__ - if (__copy_user(dst, src, count)) + if (copy_from_user(dst, src, count)) goto fetch_fault; if ((count == 2) && dst[1] & 0x80) { @@ -160,7 +160,7 @@ dst = (unsigned char*) *rn; dst += regs->regs[0]; - if (__copy_user(dst, src, count)) + if (copy_to_user(dst, src, count)) goto fetch_fault; } ret = 0; @@ -171,7 +171,7 @@ dst = (unsigned char*) *rn; dst += (instruction&0x000F)<<2; - if (__copy_user(dst,src,4)) + if (copy_to_user(dst,src,4)) goto fetch_fault; ret = 0; break; @@ -184,7 +184,7 @@ #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif - if (__copy_user(dst, src, count)) + if (copy_to_user(dst, src, count)) goto fetch_fault; ret = 0; break; @@ -195,7 +195,7 @@ dst = (unsigned char*) rn; *(unsigned long*)dst = 0; - if (__copy_user(dst,src,4)) + if (copy_from_user(dst,src,4)) goto fetch_fault; ret = 0; break; @@ -208,7 +208,7 @@ *(unsigned long*)dst = 0; #ifdef __LITTLE_ENDIAN__ - if (__copy_user(dst, src, count)) + if (copy_from_user(dst, src, count)) goto fetch_fault; if ((count == 2) && dst[1] & 0x80) { @@ -218,7 +218,7 @@ #else dst += 4-count; - if (__copy_user(dst, src, count)) + if (copy_from_user(dst, src, count)) goto fetch_fault; if ((count == 2) && dst[2] & 0x80) { @@ -239,7 +239,7 @@ dst = (unsigned char*) *rm; /* called Rn in the spec */ dst += (instruction&0x000F)<<1; - if (__copy_user(dst, src, 2)) + if (copy_to_user(dst, src, 2)) goto fetch_fault; ret = 0; break; @@ -254,7 +254,7 @@ dst += 2; #endif - if (__copy_user(dst, src, 2)) + if (copy_from_user(dst, src, 2)) goto fetch_fault; #ifdef __LITTLE_ENDIAN__ @@ -290,7 +290,7 @@ { u16 instruction; - if (__copy_user(&instruction, (u16 *)(regs->pc+2), 2)) { + if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { /* the instruction-fetch faulted */ if (user_mode(regs)) return -EFAULT; @@ -442,11 +442,14 @@ unsigned long address) { unsigned long error_code; + mm_segment_t oldfs; u16 instruction; int tmp; asm volatile("stc r2_bank,%0": "=r" (error_code)); + oldfs = get_fs(); + if (user_mode(regs)) { sti(); current->thread.error_code = error_code; @@ -456,14 +459,17 @@ if (regs->pc & 1) goto uspace_segv; - if (__copy_user(&instruction, (u16 *)(regs->pc), 2)) { + set_fs(USER_DS); + if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. This should never happen non-SMP */ + set_fs(oldfs); goto uspace_segv; } tmp = handle_unaligned_access(instruction, regs); + set_fs(oldfs); if (tmp==0) return; /* sorted */ @@ -475,14 +481,17 @@ if (regs->pc & 1) die("unaligned program counter", regs, error_code); - if (__copy_user(&instruction, (u16 *)(regs->pc), 2)) { + set_fs(KERNEL_DS); + if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. This should never happen non-SMP */ + set_fs(oldfs); die("insn faulting in do_address_error", regs, 0); } handle_unaligned_access(instruction, regs); + set_fs(oldfs); } } |