From: James S. <jsi...@us...> - 2002-02-26 17:35:30
|
Update of /cvsroot/linux-mips/linux/arch/mips64/mm In directory usw-pr-cvs1:/tmp/cvs-serv23768/arch/mips64/mm Added Files: tlb-dbg-r4k.c tlb-glue-r4k.S tlbex-r4k.S Log Message: Most of it was a collection of fixes and cleanups for mips64 and SMP stuff --- NEW FILE: tlb-dbg-r4k.c --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * TLB debugging routines. These perform horribly slow but can easily be * modified for debugging purposes. */ #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include <asm/system.h> asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address); asmlinkage void tlb_refill_debug(struct pt_regs regs) { show_regs(®s); panic(__FUNCTION__ " called. This Does Not Happen (TM)."); } asmlinkage void xtlb_refill_debug(struct pt_regs *regs) { unsigned long addr; pgd_t *pgd; pmd_t *pmd; pte_t *pte; addr = regs->cp0_badvaddr & ~((PAGE_SIZE << 1) - 1); pgd = pgd_offset(current->active_mm, addr); pmd = pmd_offset(pgd, addr); pte = pte_offset(pmd, addr); set_entrylo0(pte_val(pte[0]) >> 6); set_entrylo1(pte_val(pte[1]) >> 6); __asm__ __volatile__("nop;nop;nop"); tlb_write_random(); } asmlinkage void xtlb_mod_debug(struct pt_regs *regs) { unsigned long addr; addr = regs->cp0_badvaddr; do_page_fault(regs, 1, addr); } asmlinkage void xtlb_tlbl_debug(struct pt_regs *regs) { unsigned long addr; addr = regs->cp0_badvaddr; do_page_fault(regs, 0, addr); } asmlinkage void xtlb_tlbs_debug(struct pt_regs *regs) { unsigned long addr; addr = regs->cp0_badvaddr; do_page_fault(regs, 1, addr); } --- NEW FILE: tlb-glue-r4k.S --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. */ #define __ASSEMBLY__ #include <linux/init.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> .macro __BUILD_cli CLI .endm .macro __BUILD_sti STI .endm .macro tlb_handler name interruptible writebit NESTED(__\name, PT_SIZE, sp) SAVE_ALL dmfc0 a2, CP0_BADVADDR __BUILD_\interruptible li a1, \writebit sd a2, PT_BVADDR(sp) move a0, sp jal do_page_fault j ret_from_exception END(__\name) .endm tlb_handler xtlb_mod sti 1 tlb_handler xtlb_tlbl sti 0 tlb_handler xtlb_tlbs sti 1 --- NEW FILE: tlbex-r4k.S --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000 Silicon Graphics, Inc. * Written by Ulf Carlsson (ul...@en...) */ #define __ASSEMBLY__ #include <linux/config.h> #include <linux/init.h> #include <linux/threads.h> #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/pgtable.h> #include <asm/stackframe.h> .data .comm pgd_current, NR_CPUS * 8, 8 /* * After this macro runs we have a pointer to the pte of the address * that caused the fault in in PTR. */ .macro LOAD_PTE2, ptr, tmp #ifdef CONFIG_SMP dmfc0 \tmp, CP0_CONTEXT dla \ptr, pgd_current dsrl \tmp, 23 daddu \ptr, \tmp #else dla \ptr, pgd_current #endif dmfc0 \tmp, CP0_BADVADDR ld \ptr, (\ptr) bltz \tmp, kaddr dsrl \tmp, (PGDIR_SHIFT-3) # get pgd offset in bytes andi \tmp, ((PTRS_PER_PGD - 1)<<3) daddu \ptr, \tmp # add in pgd offset dmfc0 \tmp, CP0_BADVADDR ld \ptr, (\ptr) # get pmd pointer dsrl \tmp, (PMD_SHIFT-3) # get pmd offset in bytes andi \tmp, ((PTRS_PER_PMD - 1)<<3) daddu \ptr, \tmp # add in pmd offset dmfc0 \tmp, CP0_XCONTEXT ld \ptr, (\ptr) # get pte pointer andi \tmp, 0xff0 # get pte offset daddu \ptr, \tmp .endm /* * This places the even/odd pte pair in the page table at the pte * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1. */ .macro PTE_RELOAD, pte0, pte1 dsrl \pte0, 6 # convert to entrylo0 dmtc0 \pte0, CP0_ENTRYLO0 # load it dsrl \pte1, 6 # convert to entrylo1 dmtc0 \pte1, CP0_ENTRYLO1 # load it .endm .text .set noreorder .set mips3 __INIT .align 5 FEXPORT(except_vec0) .set noat PANIC("Unused vector called") 1: b 1b nop /* * TLB refill handler for the R10000. * Attention: We may only use 32 instructions / 128 bytes. */ .align 5 LEAF(except_vec1_r10k) .set noat LOAD_PTE2 k1 k0 ld k0, 0(k1) # get even pte ld k1, 8(k1) # get odd pte PTE_RELOAD k0 k1 nop tlbwr eret kaddr: dla k0, handle_vmalloc_address # MAPPED kernel needs this jr k0 nop END(except_vec1_r10k) __FINIT .align 5 LEAF(handle_vmalloc_address) .set noat /* * First, determine that the address is in/above vmalloc range. */ dmfc0 k0, CP0_BADVADDR dli k1, VMALLOC_START /* * Now find offset into kptbl. */ dsubu k0, k0, k1 dla k1, kptbl dsrl k0, (PAGE_SHIFT+1) # get vpn2 dsll k0, 4 # byte offset of pte daddu k1, k1, k0 /* * Determine that fault address is within vmalloc range. */ dla k0, ekptbl sltu k0, k1, k0 beqz k0, not_vmalloc /* * Load cp0 registers. */ ld k0, 0(k1) # get even pte ld k1, 8(k1) # get odd pte not_vmalloc: PTE_RELOAD k0 k1 nop tlbwr eret END(handle_vmalloc_address) |