From: Jesper S. <js...@re...> - 2000-11-07 14:15:27
|
The below patch is based on an old patch from Philipp Rumpf. It allows the kernel to make unaligned access if it should happen to think that it a good thing to do. handle_unaligned got a bit ugly to look at. Somebody with nothing better to do could probably have some fun rewriting it :) Below the patch is the code I used to test the patch. It needs someone to eyeball the output to make sure it's sensible - oh, and you probably need some time to look at the test code to understand it, it's not exactly beautiful. Cheers, Jesper * arch/sh/kernel/traps.c: Added bad alignment handling, based on original code by Philipp Rumpf, bug fixed, and enhanced to handle r0-indexed access as well. * arch/sh/kernel/entry.S: Same. diff -urN --exclude-from=/home/jskov/lib/diff-excludes -P /opt/RH-linuxsh/LinuxSH/kernel/arch/sh/kernel/entry.S ./entry.S --- /opt/RH-linuxsh/LinuxSH/kernel/arch/sh/kernel/entry.S Fri Oct 27 13:27:41 2000 +++ ./entry.S Mon Nov 6 17:01:13 2000 @@ -199,6 +199,28 @@ 2: .long SYMBOL_NAME(__do_page_fault) 3: .long SYMBOL_NAME(do_page_fault) + .align 2 +address_error_load: + bra call_dae + mov #0,r5 ! writeaccess = 0 + + .align 2 +address_error_store: + bra call_dae + mov #1,r5 ! writeaccess = 1 + +call_dae: + STI() + mov.l 1f, r0 + mov.l @r0, r6 ! address + mov.l 2f, r0 + jmp @r0 + mov r15, r4 ! regs + + .align 2 +1: .long MMU_TEA +2: .long SYMBOL_NAME(do_address_error) + #if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS) .align 2 /* Unwind the stack and jmp to the debug entry */ @@ -833,8 +855,8 @@ .long initial_page_write .long tlb_protection_violation_load .long tlb_protection_violation_store - .long error ! address_error_load (filled by trap_init) - .long error ! address_error_store (filled by trap_init) + .long address_error_load + .long address_error_store #if defined(__SH4__) .long SYMBOL_NAME(do_fpu_error) #else diff -urN --exclude-from=/home/jskov/lib/diff-excludes -P /opt/RH-linuxsh/LinuxSH/kernel/arch/sh/kernel/traps.c ./traps.c --- /opt/RH-linuxsh/LinuxSH/kernel/arch/sh/kernel/traps.c Fri Oct 27 13:27:41 2000 +++ ./traps.c Tue Nov 7 12:49:30 2000 @@ -2,7 +2,8 @@ * * linux/arch/sh/traps.c * - * SuperH version: Copyright (C) 1999 Niibe Yutaka + * SuperH version: Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf */ /* @@ -89,8 +90,126 @@ } } -DO_ERROR( 7, SIGSEGV, "address error (load)", address_error_load, current) -DO_ERROR( 8, SIGSEGV, "address error (store)", address_error_store, current) +static void handle_unaligned(u16 instruction, struct pt_regs *regs) +{ + int index, count; + unsigned long *rm, *rn; + unsigned char *src, *dst; + + index = (instruction>>8)&15; + rn = ®s->regs[index]; + + index = (instruction>>4)&15; + rm = ®s->regs[index]; + + count = 1<<(instruction&3); + + switch(instruction>>12) { + case 0: /* mov.[bwl] to/from memory via r0+rn */ + if (instruction & 8) { + /* from memory */ + src = (unsigned char*) *rm; + src += regs->regs[0]; + dst = (unsigned char*) rn; + *(unsigned long*)dst = 0; + +#ifdef __BIG_ENDIAN + dst += 4-count; + + memcpy(dst, src, count); + + if ((count == 2) && dst[2] & 0x80) { + dst[0] = 0xff; + dst[1] = 0xff; + } +#else + memcpy(dst, src, count); + + if ((count == 2) && dst[1] & 0x80) { + dst[2] = 0xff; + dst[3] = 0xff; + } +#endif + } else { + /* to memory */ + src = (unsigned char*) rm; +#ifdef __BIG_ENDIAN + src += 4-count; +#endif + dst = (unsigned char*) *rn; + dst += regs->regs[0]; + + memcpy(dst, src, count); + } + break; + + case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ + if(instruction & 4) + *rn -= count; + src = (unsigned char*) rm; + dst = (unsigned char*) *rn; +#ifdef __BIG_ENDIAN + src += 4-count; +#endif + + memcpy(dst, src, count); + + break; + case 6: + /* mov.[bwl] from memory, possibly with post-increment */ + src = (unsigned char*) *rm; + if(instruction & 4) + *rm += count; + dst = (unsigned char*) rn; + *(unsigned long*)dst = 0; + +#ifdef __BIG_ENDIAN + dst += 4-count; + + memcpy(dst, src, count); + + if ((count == 2) && dst[2] & 0x80) { + dst[0] = 0xff; + dst[1] = 0xff; + } +#else + memcpy(dst, src, count); + + if ((count == 2) && dst[1] & 0x80) { + dst[2] = 0xff; + dst[3] = 0xff; + } +#endif + } + + regs->pc += 2; +} + +asmlinkage void do_address_error(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) +{ + unsigned long error_code; + + asm volatile("stc r2_bank,%0": "=r" (error_code)); + + if(user_mode(regs)) { + sti(); + current->thread.error_code = error_code; + current->thread.trap_no = (writeaccess) ? 8 : 7; + force_sig(SIGSEGV, current); + } else { + u16 instruction; + + if(regs->pc & 1) + die("unaligned program counter", regs, error_code); + + instruction = *(u16 *)(regs->pc); + + handle_unaligned(instruction, regs); + } +} + DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) @@ -108,8 +227,6 @@ extern void *vbr_base; extern void *exception_handling_table[14]; - exception_handling_table[7] = (void *)do_address_error_load; - exception_handling_table[8] = (void *)do_address_error_store; exception_handling_table[12] = (void *)do_reserved_inst; exception_handling_table[13] = (void *)do_illegal_slot_inst; --------------------------- testing code ---------------------------- unsigned char tst[] = {0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0x83}; unsigned char tst2[16]; #define LOAD(p) \ asm volatile ("mov.b @%3, %0; mov.w @%3, %1; mov.l @%3, %2;" \ : "=&r" (b), "=&r" (w), "=&r" (l) : "r" (p)); \ printk("b %08x w %08x l %08x\n", b, w, l); #define STORE(v, pw, pl) \ asm volatile ("mov.w %0, @%1; mov.l %0, @%2;" \ : : "r" (v), "r" (pw), "r" (pl)); #define LOADI(p, i) \ asm volatile ("mov.b @(r0,%3), %0; mov.w @(r0,%3), %1; mov.l @(r0,%3), %2;" \ : "=&r" (b), "=&r" (w), "=&r" (l) : "r" (p), "z" (i)); \ printk("b %08x w %08x l %08x\n", b, w, l); #define STOREI(v, pw, pl, i) \ asm volatile ("mov.w %0, @(r0,%1); mov.l %0, @(r0,%2);" \ : : "r" (v), "r" (pw), "r" (pl), "z" (i)); #define DUMP() \ printk("%02x %02x %02x %02x %02x %02x %02x %02x ", \ tst2[0], tst2[1], tst2[2], tst2[3], \ tst2[4], tst2[5], tst2[6], tst2[7]); \ printk("%02x %02x %02x %02x %02x %02x %02x %02x\n", \ tst2[8], tst2[9], tst2[10], tst2[11], \ tst2[12], tst2[13], tst2[14], tst2[15]); #define LOADP(p) \ asm volatile ("mov %4, %0; mov.l @%0+, %1; mov.w @%0+, %2; mov.b @%0+, %3;" \ : "=r" (tmp), "=&r" (l), "=&r" (w), "=&r" (b) : "r" (p)); \ printk("b %08x w %08x l %08x\n", b, w, l); #define STOREP(v, p) \ asm volatile ("mov %2,%0 ; mov.b %1,@-%2; mov.w %1,@-%2 ;mov.l %1, @-%2;" \ : "=r" (tmp) : "r" (v), "0" (p)); static void crashme(void) { unsigned char* p = tst; unsigned char* p2 = tst2; unsigned int b, w, l; unsigned long tmp; printk("@rm load test\n"); LOAD(p); LOAD(p+1); LOAD(p+2); LOAD(p+5); LOAD(p+6); printk("@rm store test\n"); STORE(0x11223344, p2+1, p2+5); DUMP(); STORE(0x55667788, p2+2, p2+6); DUMP(); STORE(0xaabbccdd, p2+3, p2+7); DUMP(); printk("@-rm+ store/load test\n"); STOREP(0x01020304, p2+7); LOADP(p2+7-7); DUMP(); STOREP(0x08090a0b, p2+8); LOADP(p2+8-7); DUMP(); STOREP(0x0c0d0e0f, p2+9); LOADP(p2+9-7); DUMP(); printk("r0,rx test, load\n"); LOADI(p, 1); LOADI(p, 2); LOADI(p, 3); printk("r0,rx test, store\n"); STOREI(0x11223344, p2, p2+4, 1); DUMP(); STOREI(0x55667788, p2, p2+4, 2); DUMP(); STOREI(0x99aabbcc, p2, p2+4, 3); DUMP(); for(;;); } |