From: peter g. <pga...@li...> - 2003-03-25 07:51:23
|
This patch applies Stuart Menefy's patch to 2.5.13. I have done it in a mostly parrot-like fashion, so be warned. It doesnt seem to make my setup crash. --------------------------------------------------------------------------------------------------- diff -ruN linuxsh-2.5.13.notlb/arch/sh/kernel/entry.S linuxsh-2.5.13/arch/sh/kernel/entry.S --- linuxsh-2.5.13.notlb/arch/sh/kernel/entry.S Fri Mar 21 09:33:09 2003 +++ linuxsh-2.5.13/arch/sh/kernel/entry.S Fri Mar 21 18:26:28 2003 @@ -9,10 +9,14 @@ * for more details. * */ +#define NEW_TLB #include <linux/sys.h> #include <linux/linkage.h> #include <linux/config.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/page.h> #include <asm/thread_info.h> #if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE) @@ -66,12 +70,10 @@ #else INTEVT = 0xffffffd8 #endif -MMU_TEA = 0xfffffffc ! TLB Exception Address Register #elif defined(__SH4__) TRA = 0xff000020 EXPEVT = 0xff000024 INTEVT = 0xff000028 -MMU_TEA = 0xff00000c ! TLB Exception Address Register #endif /* Offsets to the stack */ @@ -159,6 +161,14 @@ mov #1, r5 call_dpf: +#ifdef NEW_TLB + mov.l 1f, r0 + mov.l @r0, r6 !address + mov.l 3f,r0 + STI() + jmp @r0 + mov r15, r4 !regs +#else mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 @@ -179,10 +189,14 @@ mov r8, r5 jmp @r0 mov r15, r4 +#endif + .align 2 1: .long MMU_TEA +#ifndef NEW_TLB 2: .long __do_page_fault +#endif 3: .long do_page_fault .align 2 @@ -534,9 +548,130 @@ 2: .long ret_from_exception ! ! + +/* This code makes some assumptions to improve performance. + * Make sure they are stil true. */ +#if PTRS_PER_PGD != PTRS_PER_PTE +#error PDG and PTE sizes don't match +#endif +#if PTRS_PER_PMD != 1 +#error PMD is not folded int pgd +#endif + +/* gas doesn't flag impossible values for mov #immediate as an error */ +#if (_PAGE_PRESENT >> 2) > 0x7f +#error cannot load PAGE_PRESENT as an immediate +#endif + .balign 1024,0,1024 tlb_miss: - mov.l 1f, k2 + +#ifdef NEW_TLB + ! Increment the counts +#ifdef COUNT_EXCEPTIONS + mov.l 9f, k1 + mov.l @k1, k2 + add #1, k2 + mov.l k2, @k1 +#endif + + ! k0 scratch + ! k1 page table pointer + ! k2 faulting address + ! k3 pgd and pte index masks + ! k4 shift + +#if defined(__SH4__) + mov.l 8f, k0 ! 9 LS (latency=2) +#else + mov #MMU_PTEH, k0 +#endif + + mov.l 4f, k3 ! 9 LS (latency=2) + mov #-(PGDIR_SHIFT-2), k4 ! 6 EX + + mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2) + + cmp/pz k2 + mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2) + + bt 18f + + mov.l 1f, k1 +18: + + + ! Load up the pgd entry (k1) + mov k2, k0 ! 5 MT (latency=0) + shld k4, k0 ! 99 EX + + and k3, k0 ! 78 EX + + mov.l @(k0, k1), k1 ! 21 LS (latency=2) + + + mov #-(PAGE_SHIFT-2), k4 ! 6 EX + + tst k1, k1 ! 86 MT + + bt 20f ! 110 BR + + ! Load up the pte entry (k1) + mov k2, k0 ! 5 MT (latency=0) + shld k4, k0 ! 99 EX + + and k3, k0 ! 78 EX + + mov.l @(k0, k1), k1 ! 21 LS + mov #(_PAGE_PRESENT>>2), k4 ! 6 EX + + ! Test the entry for none and present + tst k1, k1 ! 86 MT + shll2 k4 ! 101 EX + +#if defined(__SH4__) + mov.l 8f, k0 ! 9 LS (latency=2) +#else + mov #MMU_PTEH, k0 +#endif + bt 20f ! 110 BR + + tst k4, k1 ! 68 MT + mov.l 7f, k3 ! 9 LS (latency=2) + + ! stall for load + + and k3, k1 ! 78 EX + bt 20f ! 110 BR + + ! We could check for writes to non-writable pages, + ! or writes to clean pages here... + ! We could also make the page young... + + ! Set up the entry + ! Should also be setting up MMU.PTEA here for SH4 + + mov.l k1, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS + ldtlb ! 128 CO + + ! At least one instruction between ldtlb and rte + nop + + rte + nop + + .align 2 +#define SYMBOL_NAME(x) x +1: .long SYMBOL_NAME(swapper_pg_dir) +4: .long (PTRS_PER_PGD-1) << 2 +7: .long _PAGE_FLAGS_HARDWARE_MASK +8: .long MMU_PTEH +/*9: .long SYMBOL_NAME(exception_count_miss)*/ + + +#endif + ! Either pgd or pte not present +20: mov.l 1f, k2 mov.l 4f, k3 bra handle_exception mov.l @k2, k2 @@ -613,6 +748,15 @@ stc k_ex_code, r8 shlr2 r8 shlr r8 + +#ifdef COUNT_EXCEPTIONS + mov.l 2f, r9 + add r8, r9 + mov.l @r9, r10 + add #1, r10 + mov.l r10, @r9 +#endif + mov.l 4f, r9 add r8, r9 mov.l @r9, r9 diff -ruN linuxsh-2.5.13.notlb/arch/sh/mm/fault.c linuxsh-2.5.13/arch/sh/mm/fault.c --- linuxsh-2.5.13.notlb/arch/sh/mm/fault.c Fri Mar 21 17:07:53 2003 +++ linuxsh-2.5.13/arch/sh/mm/fault.c Fri Mar 21 18:26:28 2003 @@ -99,6 +99,40 @@ tsk = current; mm = tsk->mm; + if (address >= TASK_SIZE) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pmd_t *pmd, *pmd_k; + + if (!mm) { + panic("kernel fault with no valid mm\n"); + } + + + pgd = mm->pgd + offset; + pgd_k = init_mm.pgd + offset; + + /* This will never happen with the folded page table. */ + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pmd = pmd_offset(pgd, address); + pmd_k = pmd_offset(pgd_k, address); + + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + return; + } + /* * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -159,6 +193,7 @@ bad_area: up_read(&mm->mmap_sem); +bad_area_nosemaphore: if (user_mode(regs)) { tsk->thread.address = address; tsk->thread.error_code = writeaccess; @@ -234,6 +269,7 @@ goto no_context; } +#if 0 /* * Called with interrupt disabled. */ @@ -282,6 +318,9 @@ update_mmu_cache(NULL, address, entry); return 0; } +#endif + + void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { @@ -293,9 +332,14 @@ unsigned long ptea; #endif - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; + /* vma can be null when called for a P3 address from + * copy_user_page */ + if (vma) { + /* Ptrace may call this routine to access an address + * in the process being debugged. */ + if (current->active_mm != vma->vm_mm) + return; + } #if defined(__SH4__) page = pte_page(pte); @@ -313,7 +357,7 @@ ctrl_outl(vpn, MMU_PTEH); pteval = pte_val(pte); -#if defined(__SH4__) +#if defined(__SH4__) && ! defined(CONFIG_CPU_SUBTYPE_ST40) /* Set PTEA register */ /* TODO: make this look less hacky */ ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1); diff -ruN linuxsh-2.5.13.notlb/arch/sh/mm/init.c linuxsh-2.5.13/arch/sh/mm/init.c --- linuxsh-2.5.13.notlb/arch/sh/mm/init.c Fri Mar 21 09:33:09 2003 +++ linuxsh-2.5.13/arch/sh/mm/init.c Fri Mar 21 18:26:28 2003 @@ -90,20 +90,19 @@ /* * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { int i; pgd_t * pg_dir; - /* We don't need kernel mapping as hardware support that. */ + /* We don't need to map the kernel through the TLB, as it is + * permanatly mapped using P1. So clear the entire pgd (actually + * the pmd, because we have a folded page table). */ pg_dir = swapper_pg_dir; for (i=0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; + pmd_clear(pmd_offset(&pg_dir[i], 0)); /* Enable MMU */ ctrl_outl(MMU_CONTROL_INIT, MMUCR); @@ -114,6 +113,9 @@ mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); + /* set up the initial value of the MMU.TTB */ + activate_mm(NULL, &init_mm); + { unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; unsigned long max_dma, low, start_pfn; @@ -219,12 +221,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); + unsigned int pgd_size = (PTRS_PER_PGD * sizeof(pgd_t)); pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); - if (pgd) - memset(pgd, 0, pgd_size); + if (pgd) { + memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + PTRS_PER_PGD - USER_PTRS_PER_PGD); + } return pgd; } diff -ruN linuxsh-2.5.13.notlb/include/asm-sh/mmu_context.h linuxsh-2.5.13/include/asm-sh/mmu_context.h --- linuxsh-2.5.13.notlb/include/asm-sh/mmu_context.h Fri Mar 21 09:33:09 2003 +++ linuxsh-2.5.13/include/asm-sh/mmu_context.h Fri Mar 21 18:26:28 2003 @@ -14,6 +14,8 @@ * (b) ASID (Address Space IDentifier) */ +#ifndef __ASSEMBLY__ + /* * Cache of MMU context last used. */ @@ -84,6 +86,8 @@ /* Do nothing */ } +#endif /* __ASSEMBLY */ + /* Other MMU related constants. */ #if defined(__sh3__) @@ -127,6 +131,8 @@ #define MMU_ITLB_VALID 0x100 #endif +#ifndef __ASSEMBLY__ + static __inline__ void set_asid(unsigned long asid) { unsigned long __dummy; @@ -161,8 +167,8 @@ set_asid(mm->context & MMU_CONTEXT_ASID_MASK); } -/* MMU_TTB can be used for optimizing the fault handling. - (Currently not used) */ +/* MMU.TTB is used for optimizing the fault handling. + * It always points to the base of the current pgd. */ static __inline__ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned int cpu) @@ -184,5 +190,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) { } + +#endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_MMU_CONTEXT_H */ diff -ruN linuxsh-2.5.13.notlb/include/asm-sh/pgalloc.h linuxsh-2.5.13/include/asm-sh/pgalloc.h --- linuxsh-2.5.13.notlb/include/asm-sh/pgalloc.h Fri Mar 21 09:33:09 2003 +++ linuxsh-2.5.13/include/asm-sh/pgalloc.h Mon Mar 24 08:58:30 2003 @@ -11,12 +11,12 @@ #define pgtable_cache_size 0L #define pmd_populate_kernel(mm, pmd, pte) \ - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) + set_pmd(pmd, __pmd(pte)) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { - set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); + set_pmd(pmd, __pmd( page_address(pte))); } /* diff -ruN linuxsh-2.5.13.notlb/include/asm-sh/pgtable.h linuxsh-2.5.13/include/asm-sh/pgtable.h --- linuxsh-2.5.13.notlb/include/asm-sh/pgtable.h Fri Mar 21 09:33:09 2003 +++ linuxsh-2.5.13/include/asm-sh/pgtable.h Mon Mar 24 08:57:59 2003 @@ -43,7 +43,6 @@ #define PTE_PHYS_MASK 0x1ffff000 -#ifndef __ASSEMBLY__ /* * First 1MB map is used by fixed purpose. * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) @@ -95,8 +94,8 @@ #define _PAGE_SHARED _PAGE_U0_SHARED -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#ifndef __ASSEMBLY__ + #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) @@ -137,9 +136,11 @@ #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_present(x) (pmd_val(x)) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) + +#define pmd_bad(x) ((pmd_page_kernel(x) > (unsigned long) high_memory) || \ + (pmd_page_kernel(x) < PAGE_OFFSET)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) @@ -202,11 +203,9 @@ #define page_pte(page) page_pte_prot(page, __pgprot(0)) -#define pmd_page_kernel(pmd) \ -((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page_kernel(pmd) pmd_val(pmd) -#define pmd_page(pmd) \ - (phys_to_page(pmd_val(pmd))) +#define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) diff -ruN linuxsh-2.5.13.notlb/include/asm-sh/tlbflush.h linuxsh-2.5.13/include/asm-sh/tlbflush.h --- linuxsh-2.5.13.notlb/include/asm-sh/tlbflush.h Fri Apr 12 14:26:29 2002 +++ linuxsh-2.5.13/include/asm-sh/tlbflush.h Fri Mar 21 18:26:28 2003 @@ -1,6 +1,6 @@ #ifndef __ASM_SH_TLBFLUSH_H #define __ASM_SH_TLBFLUSH_H - +#ifndef __ASSEMBLY__ /* * TLB flushing: * @@ -28,4 +28,5 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLBFLUSH_H */ ------------------------------------------------------------------------------------------------- -- ______________________________________________ http://www.linuxmail.org/ Now with e-mail forwarding for only US$5.95/yr Powered by Outblaze |