From: NIIBE Y. <gn...@ch...> - 2000-10-13 02:20:09
|
NIIBE Yutaka wrote: > I'll do the changes of optimizing PGD and removing set_pgdir for SuperH. Done. When I'll import changes in test10-pre2, I'll commit this change. 2000-10-13 NIIBE Yutaka <gn...@m1...> * arch/sh/mm/fault.c (__do_page_fault): Removed (now it's call_dpf in entry.S). (__do_page_fault): Rename from __do_page_fault1. * arch/sh/kernel/entry.S (call_dpf): New entry. (tlb_miss_load, tlb_miss_store, initial_page_write, tlb_protection_violation_load, tlb_protection_violation_store): Use call_dpf. * include/asm-sh/pgalloc-2level.h (get_pmd_fast, free_pmd_fast, free_pmd_slow, pmd_alloc): Make them static inline. * arch/sh/mm/ioremap.c (remap_area_pages): Use pgd_offset_k. (remap_area_pte): Use _PAGE_HW_SHARED. (remap_area_pages): Remove set_pgdir. * include/asm-sh/pgalloc.h (set_pgdir): Removed. (get_pgd_slow, get_pgd_fast, free_pgd_fast, free_pgd_slow, get_pte_fast, free_pte_fast, free_pte_slow, pte_alloc_kernel, pte_alloc, pmd_free, flush_tlb_pgtables): Make them static inline. (get_pgd_slow, free_pgd_slow): Use 2KB PGD. Index: arch/sh/kernel/entry.S =================================================================== RCS file: /cvsroot/linuxsh/kernel/arch/sh/kernel/entry.S,v retrieving revision 1.26 diff -u -p -r1.26 entry.S --- arch/sh/kernel/entry.S 2000/09/30 03:43:30 1.26 +++ arch/sh/kernel/entry.S 2000/10/13 02:14:16 @@ -149,52 +149,55 @@ SYSCALL_NR = (16*4+6*4) .align 2 tlb_miss_load: - mov.l 2f, $r0 - mov.l @$r0, $r6 - mov $r15, $r4 - mov.l 1f, $r0 - jmp @$r0 + bra call_dpf mov #0, $r5 .align 2 tlb_miss_store: - mov.l 2f, $r0 - mov.l @$r0, $r6 - mov $r15, $r4 - mov.l 1f, $r0 - jmp @$r0 + bra call_dpf mov #1, $r5 .align 2 initial_page_write: - mov.l 2f, $r0 - mov.l @$r0, $r6 - mov $r15, $r4 - mov.l 1f, $r0 - jmp @$r0 + bra call_dpf mov #1, $r5 .align 2 tlb_protection_violation_load: - mov.l 2f, $r0 - mov.l @$r0, $r6 - mov $r15, $r4 - mov.l 1f, $r0 - jmp @$r0 + bra call_dpf mov #0, $r5 .align 2 tlb_protection_violation_store: - mov.l 2f, $r0 - mov.l @$r0, $r6 - mov $r15, $r4 + bra call_dpf + mov #1, $r5 + +call_dpf: mov.l 1f, $r0 + mov.l @$r0, $r6 + ! r4, r5 and r6 may be clobbered + mov $r6, $r9 + mov $r5, $r8 + ! + mov.l 2f, $r0 + jsr @$r0 + mov $r15, $r4 + ! + cmp/eq #0, $r0 + bf 0f + rts + nop +0: STI() + mov.l 3f, $r0 + mov $r9, $r6 + mov $r8, $r5 jmp @$r0 - mov #1, $r5 + mov $r15, $r4 .align 2 -1: .long SYMBOL_NAME(__do_page_fault) -2: .long MMU_TEA +1: .long MMU_TEA +2: .long SYMBOL_NAME(__do_page_fault) +3: .long SYMBOL_NAME(do_page_fault) #if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS) .align 2 Index: arch/sh/mm/fault.c =================================================================== RCS file: /cvsroot/linuxsh/kernel/arch/sh/mm/fault.c,v retrieving revision 1.21 diff -u -p -r1.21 fault.c --- arch/sh/mm/fault.c 2000/09/03 03:40:12 1.21 +++ arch/sh/mm/fault.c 2000/10/13 02:14:17 @@ -231,7 +231,10 @@ do_sigbus: goto no_context; } -static int __do_page_fault1(struct pt_regs *regs, unsigned long writeaccess, +/* + * Called with interrupt disabled. + */ +asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { pgd_t *dir; @@ -240,8 +243,6 @@ static int __do_page_fault1(struct pt_re pte_t entry; if (address >= VMALLOC_START && address < VMALLOC_END) - /* We can change the implementation of P3 area pte entries. - set_pgdir and such. */ dir = pgd_offset_k(address); else dir = pgd_offset(current->mm, address); @@ -273,23 +274,6 @@ static int __do_page_fault1(struct pt_re set_pte(pte, entry); update_mmu_cache(NULL, address, entry); return 0; -} - -/* - * Called with interrupt disabled. - */ -asmlinkage void __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) -{ - /* - * XXX: Could you please implement this (calling __do_page_fault1) - * in assembler language in entry.S? - */ - if (__do_page_fault1(regs, writeaccess, address) == 0) - /* Done. */ - return; - sti(); - do_page_fault(regs, writeaccess, address); } void update_mmu_cache(struct vm_area_struct * vma, Index: arch/sh/mm/ioremap.c =================================================================== RCS file: /cvsroot/linuxsh/kernel/arch/sh/mm/ioremap.c,v retrieving revision 1.1.1.1 diff -u -p -r1.1.1.1 ioremap.c --- arch/sh/mm/ioremap.c 2000/04/14 16:49:01 1.1.1.1 +++ arch/sh/mm/ioremap.c 2000/10/13 02:14:17 @@ -17,6 +17,9 @@ static inline void remap_area_pte(pte_t unsigned long phys_addr, unsigned long flags) { unsigned long end; + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | + _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); address &= ~PMD_MASK; end = address + size; @@ -25,8 +28,7 @@ static inline void remap_area_pte(pte_t do { if (!pte_none(*pte)) printk("remap_area_pte: page already exists\n"); - set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_DIRTY | _PAGE_ACCESSED | flags))); + set_pte(pte, mk_pte_phys(phys_addr, pgprot)); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; @@ -55,22 +57,21 @@ static inline int remap_area_pmd(pmd_t * } static int remap_area_pages(unsigned long address, unsigned long phys_addr, - unsigned long size, unsigned long flags) + unsigned long size, unsigned long flags) { pgd_t * dir; unsigned long end = address + size; phys_addr -= address; - dir = pgd_offset(&init_mm, address); + dir = pgd_offset_k(address); flush_cache_all(); while (address < end) { pmd_t *pmd = pmd_alloc_kernel(dir, address); if (!pmd) return -ENOMEM; if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) + phys_addr + address, flags)) return -ENOMEM; - set_pgdir(address, *dir); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } Index: include/asm-sh/pgalloc-2level.h =================================================================== RCS file: /cvsroot/linuxsh/kernel/include/asm-sh/pgalloc-2level.h,v retrieving revision 1.1.1.1 diff -u -p -r1.1.1.1 pgalloc-2level.h --- include/asm-sh/pgalloc-2level.h 2000/04/14 16:48:21 1.1.1.1 +++ include/asm-sh/pgalloc-2level.h 2000/10/13 02:14:18 @@ -5,15 +5,15 @@ * traditional two-level paging, page table allocation routines: */ -extern __inline__ pmd_t *get_pmd_fast(void) +static __inline__ pmd_t *get_pmd_fast(void) { return (pmd_t *)0; } -extern __inline__ void free_pmd_fast(pmd_t *pmd) { } -extern __inline__ void free_pmd_slow(pmd_t *pmd) { } +static __inline__ void free_pmd_fast(pmd_t *pmd) { } +static __inline__ void free_pmd_slow(pmd_t *pmd) { } -extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) +static __inline__ pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) { if (!pgd) BUG(); Index: include/asm-sh/pgalloc.h =================================================================== RCS file: /cvsroot/linuxsh/kernel/include/asm-sh/pgalloc.h,v retrieving revision 1.1.1.1 diff -u -p -r1.1.1.1 pgalloc.h --- include/asm-sh/pgalloc.h 2000/04/14 16:48:21 1.1.1.1 +++ include/asm-sh/pgalloc.h 2000/10/13 02:14:18 @@ -17,18 +17,18 @@ * if any. */ -extern __inline__ pgd_t *get_pgd_slow(void) +static __inline__ pgd_t *get_pgd_slow(void) { - pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL); + unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); + pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); - if (ret) { - memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } + if (ret) + memset(ret, 0, pgd_size); + return ret; } -extern __inline__ pgd_t *get_pgd_fast(void) +static __inline__ pgd_t *get_pgd_fast(void) { unsigned long *ret; @@ -41,22 +41,22 @@ extern __inline__ pgd_t *get_pgd_fast(vo return (pgd_t *)ret; } -extern __inline__ void free_pgd_fast(pgd_t *pgd) +static __inline__ void free_pgd_fast(pgd_t *pgd) { *(unsigned long *)pgd = (unsigned long) pgd_quicklist; pgd_quicklist = (unsigned long *) pgd; pgtable_cache_size++; } -extern __inline__ void free_pgd_slow(pgd_t *pgd) +static __inline__ void free_pgd_slow(pgd_t *pgd) { - free_page((unsigned long)pgd); + kfree(pgd); } extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted); -extern __inline__ pte_t *get_pte_fast(void) +static __inline__ pte_t *get_pte_fast(void) { unsigned long *ret; @@ -68,14 +68,14 @@ extern __inline__ pte_t *get_pte_fast(vo return (pte_t *)ret; } -extern __inline__ void free_pte_fast(pte_t *pte) +static __inline__ void free_pte_fast(pte_t *pte) { *(unsigned long *)pte = (unsigned long) pte_quicklist; pte_quicklist = (unsigned long *) pte; pgtable_cache_size++; } -extern __inline__ void free_pte_slow(pte_t *pte) +static __inline__ void free_pte_slow(pte_t *pte) { free_page((unsigned long)pte); } @@ -85,7 +85,7 @@ extern __inline__ void free_pte_slow(pte #define pgd_free(pgd) free_pgd_slow(pgd) #define pgd_alloc() get_pgd_fast() -extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) +static __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) { if (!pmd) BUG(); @@ -105,7 +105,7 @@ extern inline pte_t * pte_alloc_kernel(p return (pte_t *) pmd_page(*pmd) + address; } -extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address) +static __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); @@ -132,7 +132,7 @@ fix: * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */ -extern inline void pmd_free(pmd_t * pmd) +static __inline__ void pmd_free(pmd_t * pmd) { } @@ -141,22 +141,6 @@ extern inline void pmd_free(pmd_t * pmd) extern int do_check_pgt_cache(int, int); -extern inline void set_pgdir(unsigned long address, pgd_t entry) -{ - struct task_struct * p; - pgd_t *pgd; - - read_lock(&tasklist_lock); - for_each_task(p) { - if (!p->mm) - continue; - *pgd_offset(p->mm,address) = entry; - } - read_unlock(&tasklist_lock); - for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) - pgd[address >> PGDIR_SHIFT] = entry; -} - /* * TLB flushing: * @@ -174,8 +158,9 @@ extern void flush_tlb_mm(struct mm_struc extern void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); -extern inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) + +static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) { } |