From: James S. <jsi...@us...> - 2002-01-28 20:32:31
|
Update of /cvsroot/linux-mips/linux/arch/mips64/mm In directory usw-pr-cvs1:/tmp/cvs-serv7656/arch/mips64/mm Modified Files: andes.c fault.c loadmmu.c r4xx0.c Added Files: Makefile c-sb1.c pg-sb1.c tlb-sb1.c Removed Files: sb1.c Log Message: Big overhaul of 64-bit kernel along the lines of what we already have for the 64-bit kernel just more radical. --- NEW FILE: Makefile --- # # Makefile for the Linux/MIPS-specific parts of the memory manager. # O_TARGET := mm.o export-objs += umap.o obj-y := extable.o init.o fault.o loadmmu.o obj-$(CONFIG_CPU_R4300) += r4xx0.o obj-$(CONFIG_CPU_R4X00) += r4xx0.o obj-$(CONFIG_CPU_R5000) += r4xx0.o obj-$(CONFIG_CPU_NEVADA) += r4xx0.o obj-$(CONFIG_CPU_R10000) += andes.o obj-$(CONFIG_CPU_SB1) += pg-sb1.o c-sb1.o tlb-sb1.o obj-$(CONFIG_SGI_IP22) += umap.o include $(TOPDIR)/Rules.make --- NEW FILE: c-sb1.c --- /* * Copyright (C) 1996 David S. Miller (dm...@en...) * Copyright (C) 1997, 2001 Ralf Baechle (ra...@gn...) * Copyright (C) 2000, 2001 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <asm/mmu_context.h> #include <asm/bootinfo.h> #include <asm/cacheops.h> #include <asm/cpu.h> /* These are probed at ld_mmu time */ static unsigned int icache_size; static unsigned int dcache_size; static unsigned int icache_line_size; static unsigned int dcache_line_size; static unsigned int icache_index_mask; static unsigned int icache_assoc; static unsigned int dcache_assoc; static unsigned int icache_sets; static unsigned int dcache_sets; /* * The dcache is fully coherent to the system, with one * big caveat: the instruction stream. In other words, * if we miss in the icache, and have dirty data in the * L1 dcache, then we'll go out to memory (or the L2) and * get the not-as-recent data. * * So the only time we have to flush the dcache is when * we're flushing the icache. Since the L2 is fully * coherent to everything, including I/O, we never have * to flush it */ static void sb1_flush_cache_all(void) { } static void local_sb1___flush_cache_all(void) { /* * Haven't worried too much about speed here; given that we're flushing * the icache, the time to invalidate is dwarfed by the time it's going * to take to refill it. Register usage: * * $1 - moving cache index * $2 - set count */ __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " move $1, %2 \n" /* Start at index 0 */ "1: cache %3, 0($1) \n" /* WB/Invalidate this index */ " daddiu %1, %1, -1 \n" /* Decrement loop count */ " bnez %1, 1b \n" /* loop test */ " addu $1, $1, %0 \n" /* Next address */ ".set pop \n" : : "r" (dcache_line_size), "r" (dcache_sets * dcache_assoc), "r" (KSEG0), "i" (Index_Writeback_Inv_D)); __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set mips2 \n" "sync \n" #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* Bug 1384 */ "sync \n" #endif ".set pop \n"); __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " move $1, %2 \n" /* Start at index 0 */ "1: cache %3, 0($1) \n" /* Invalidate this index */ " daddiu %1, %1, -1 \n" /* Decrement loop count */ " bnez %1, 1b \n" /* loop test */ " addu $1, $1, %0 \n" /* Next address */ ".set pop \n" : : "r" (icache_line_size), "r" (icache_sets * icache_assoc), "r" (KSEG0), "i" (Index_Invalidate_I)); } #ifdef CONFIG_SMP extern void sb1___flush_cache_all_ipi(void *ignored); asm("sb1___flush_cache_all_ipi = local_sb1___flush_cache_all"); static void sb1___flush_cache_all(void) { smp_call_function(sb1___flush_cache_all_ipi, 0, 1, 1); local_sb1___flush_cache_all(); } #else extern void sb1___flush_cache_all(void); asm("sb1___flush_cache_all = local_sb1___flush_cache_all"); #endif /* * When flushing a range in the icache, we have to first writeback * the dcache for the same range, so new ifetches will see any * data that was dirty in the dcache. Also, if the flush is very * large, just flush the whole cache rather than spinning in here * forever. Fills from the (always coherent) L2 come in relatively * quickly. * * Also, at the moment we just hit-writeback the dcache instead * of writeback-invalidating it. Not doing the invalidates * doesn't cost us anything, since we're coherent * */ static void local_sb1_flush_icache_range(unsigned long start, unsigned long end) { #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS unsigned long flags; local_irq_save(flags); #endif __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " move $1, %0 \n" "1: \n" #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS ".align 3 \n" " lw $0, 0($1) \n" /* Bug 1370, 1368 */ " sync \n" " cache 0x15, 0($1) \n" /* Hit-WB-inval this address */ #else " cache 0x19, 0($1) \n" /* Hit-WB this address */ #endif " bne $1, %1, 1b \n" /* loop test */ " addu $1, $1, %2 \n" /* next line */ ".set pop \n" : : "r" (start & ~(dcache_line_size - 1)), "r" ((end - 1) & ~(dcache_line_size - 1)), "r" (dcache_line_size)); __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set mips2 \n" "sync \n" #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* Bug 1384 */ "sync \n" #endif ".set pop \n"); #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS local_irq_restore(flags); #endif /* Guess what: these Kseg0 addressese aren't enough to let us figure * out what may be in the cache under mapped Useg tags. The situation * is even worse, because bit 12 belongs to both the page number AND * the cache index, which means the Kseg0 page number may have a * different cache index than the Useg address. For these two reasons, * we have to flush the entire thing. Since the Dcache is physically * tagged, we *can* use hit operations. */ start = 0; end = icache_index_mask; __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " move $1, %0 \n" ".align 3 \n" "1: cache 0, (0<<13)($1) \n" /* Index-inval this address */ " cache 0, (1<<13)($1) \n" /* Index-inval this address */ " cache 0, (2<<13)($1) \n" /* Index-inval this address */ " cache 0, (3<<13)($1) \n" /* Index-inval this address */ " bne $1, %1, 1b \n" /* loop test */ " addu $1, $1, %2 \n" /* next line */ ".set pop \n" : : "r" (start & ~(icache_line_size - 1)), "r" ((end - 1) & ~(dcache_line_size - 1)), "r" (icache_line_size)); } #ifdef CONFIG_SMP struct flush_icache_range_args { unsigned long start; unsigned long end; }; static void sb1_flush_icache_range_ipi(void *info) { struct flush_icache_range_args *args = info; local_sb1_flush_icache_range(args->start, args->end); } void sb1_flush_icache_range(unsigned long start, unsigned long end) { struct flush_icache_range_args args; args.start = start; args.end = end; smp_call_function(sb1_flush_icache_range_ipi, &args, 1, 1); local_sb1_flush_icache_range(start, end); } #else void sb1_flush_icache_range(unsigned long start, unsigned long end); asm("sb1_flush_icache_range = local_sb1_flush_icache_range"); #endif /* * If there's no context yet, or the page isn't executable, no icache flush * is needed */ static void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page) { unsigned long addr; if ((vma->vm_mm->context == 0) || !(vma->vm_flags & VM_EXEC)) { return; } addr = (unsigned long)page_address(page); /* * XXXKW addr is a Kseg0 address, whereas hidden higher up the call * stack, we may really need to flush a Useg address. Our Icache is * virtually tagged, which means we have to be super conservative. * See comments in sb1_flush_icache_rage. */ sb1_flush_icache_range(addr, addr + PAGE_SIZE); } static inline void protected_flush_icache_line(unsigned long addr) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips4 \n" "1: cache 0x10, (%0) \n" "2: .set pop \n" " .section __ex_table,\"a\"\n" " .word 1b, 2b \n" " .previous" : : "r" (addr)); } static inline void protected_writeback_dcache_line(unsigned long addr) { #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* Have to be sure the TLB entry exists for the cache op, so we have to be sure that nothing happens in between the lw and the cache op */ unsigned long flags; local_irq_save(flags); #endif __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips4 \n" "1: \n" #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS " lw $0, (%0) \n" " sync \n" " cache 0x15, 0(%0) \n" /* Hit-WB-inval this address */ #else " cache 0x19, 0(%0) \n" /* Hit-WB this address */ #endif /* XXX: should be able to do this after both dcache cache ops, but there's no guarantee that this will be inlined, and the pass1 restriction checker can't detect syncs following cache ops except in the following basic block. */ " sync \n" #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS /* Bug 1384 */ " sync \n" #endif "2: .set pop \n" " .section __ex_table,\"a\"\n" " .word 1b, 2b \n" " .previous" : : "r" (addr)); #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS local_irq_restore(flags); #endif } /* * XXX - Still need to really understand this. This is mostly just * derived from the r10k and r4k implementations, and seems to work * but things that "seem to work" when I don't understand *why* they * "seem to work" disturb me greatly...JDC */ static void local_sb1_flush_cache_sigtramp(unsigned long addr) { unsigned long daddr, iaddr; daddr = addr & ~(dcache_line_size - 1); protected_writeback_dcache_line(daddr); protected_writeback_dcache_line(daddr + dcache_line_size); iaddr = addr & ~(icache_line_size - 1); protected_flush_icache_line(iaddr); protected_flush_icache_line(iaddr + icache_line_size); } #ifdef CONFIG_SMP extern void sb1_flush_cache_sigtramp_ipi(void *ignored); asm("sb1_flush_cache_sigtramp_ipi = local_sb1_flush_cache_sigtramp"); static void sb1_flush_cache_sigtramp(unsigned long addr) { smp_call_function(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); local_sb1_flush_cache_sigtramp(addr); } #else void sb1_flush_cache_sigtramp(unsigned long addr); asm("sb1_flush_cache_sigtramp = local_sb1_flush_cache_sigtramp"); #endif static void sb1_flush_icache_all(void) { /* * Haven't worried too much about speed here; given that we're flushing * the icache, the time to invalidate is dwarfed by the time it's going * to take to refill it. Register usage: * * $1 - moving cache index * $2 - set count */ __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " move $1, %2 \n" /* Start at index 0 */ "1: cache %3, 0($1) \n" /* Invalidate this index */ " daddiu %1, %1, -1 \n" /* Decrement loop count */ " bnez %1, 1b \n" /* loop test */ " addu $1, $1, %0 \n" /* Next address */ ".set pop \n" : : "r" (icache_line_size), "r" (icache_sets * icache_assoc), "r" (KSEG0), "i" (Index_Invalidate_I)); } /* * Anything that just flushes dcache state can be ignored, as we're always * coherent in dcache space. This is just a dummy function that all the * nop'ed routines point to */ static void sb1_nop(void) { } /* * This only needs to make sure stores done up to this * point are visible to other agents outside the CPU. Given * the coherent nature of the ZBus, all that's required here is * a sync to make sure the data gets out to the caches and is * visible to an arbitrary A Phase from an external agent * * Actually, I'm not even sure that's necessary; the semantics * of this function aren't clear. If it's supposed to serve as * a memory barrier, this is needed. If it's only meant to * prevent data from being invisible to non-cpu memory accessors * for some indefinite period of time (e.g. in a non-coherent * dcache) then this function would be a complete nop. */ static void sb1_flush_page_to_ram(struct page *page) { __asm__ __volatile__( " sync \n" /* Short pipe */ :::"memory"); } /* * Cache set values (from the mips64 spec) * 0 - 64 * 1 - 128 * 2 - 256 * 3 - 512 * 4 - 1024 * 5 - 2048 * 6 - 4096 * 7 - Reserved */ static unsigned int decode_cache_sets(unsigned int config_field) { if (config_field == 7) { /* JDCXXX - Find a graceful way to abort. */ return 0; } return (1<<(config_field + 6)); } /* * Cache line size values (from the mips64 spec) * 0 - No cache present. * 1 - 4 bytes * 2 - 8 bytes * 3 - 16 bytes * 4 - 32 bytes * 5 - 64 bytes * 6 - 128 bytes * 7 - Reserved */ static unsigned int decode_cache_line_size(unsigned int config_field) { if (config_field == 0) { return 0; } else if (config_field == 7) { /* JDCXXX - Find a graceful way to abort. */ return 0; } return (1<<(config_field + 1)); } /* * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) * * 24:22 Icache sets per way * 21:19 Icache line size * 18:16 Icache Associativity * 15:13 Dcache sets per way * 12:10 Dcache line size * 9:7 Dcache Associativity */ static __init void probe_cache_sizes(void) { u32 config1; config1 = read_mips32_cp0_config1(); icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); icache_sets = decode_cache_sets((config1 >> 22) & 0x7); dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); icache_assoc = ((config1 >> 16) & 0x7) + 1; dcache_assoc = ((config1 >> 7) & 0x7) + 1; icache_size = icache_line_size * icache_sets * icache_assoc; dcache_size = dcache_line_size * dcache_sets * dcache_assoc; icache_index_mask = (icache_sets - 1) * icache_line_size; } /* * This is called from loadmmu.c. We have to set up all the * memory management function pointers, as well as initialize * the caches and tlbs */ void ld_mmu_sb1(void) { probe_cache_sizes(); _clear_page = sb1_clear_page; _copy_page = sb1_copy_page; _flush_cache_all = sb1_flush_cache_all; ___flush_cache_all = sb1___flush_cache_all; _flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; _flush_cache_range = (void *) sb1_nop; _flush_page_to_ram = sb1_flush_page_to_ram; _flush_icache_page = sb1_flush_icache_page; _flush_icache_range = sb1_flush_icache_range; /* None of these are needed for the sb1 */ _flush_cache_page = (void *) sb1_nop; _flush_cache_sigtramp = sb1_flush_cache_sigtramp; _flush_icache_all = sb1_flush_icache_all; change_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_COW); flush_cache_all(); } --- NEW FILE: pg-sb1.c --- /* * Copyright (C) 1996 David S. Miller (dm...@en...) * Copyright (C) 1997, 2001 Ralf Baechle (ra...@gn...) * Copyright (C) 2000 Sibyte * * Written by Justin Carlson (ca...@si...) * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/config.h> #include <asm/page.h> #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS #define SB1_PREF_LOAD_STREAMED_HINT "0" #define SB1_PREF_STORE_STREAMED_HINT "1" #else #define SB1_PREF_LOAD_STREAMED_HINT "4" #define SB1_PREF_STORE_STREAMED_HINT "5" #endif /* These are the functions hooked by the memory management function pointers */ void sb1_clear_page(void *page) { /* JDCXXX - This should be bottlenecked by the write buffer, but these things tend to be mildly unpredictable...should check this on the performance model */ /* We prefetch 4 lines ahead. We're also "cheating" slightly here... since we know we're on an SB1, we force the assembler to take 64-bit operands to speed things up */ __asm__ __volatile__( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " daddiu $1, %0, %2 \n" /* Calculate the end of the page to clear */ " pref " SB1_PREF_STORE_STREAMED_HINT ", 0(%0) \n" /* Prefetch the first 4 lines */ " pref " SB1_PREF_STORE_STREAMED_HINT ", 32(%0) \n" " pref " SB1_PREF_STORE_STREAMED_HINT ", 64(%0) \n" " pref " SB1_PREF_STORE_STREAMED_HINT ", 96(%0) \n" "1: sd $0, 0(%0) \n" /* Throw out a cacheline of 0's */ " sd $0, 8(%0) \n" " sd $0, 16(%0) \n" " sd $0, 24(%0) \n" " pref " SB1_PREF_STORE_STREAMED_HINT ",128(%0) \n" /* Prefetch 4 lines ahead */ " bne $1, %0, 1b \n" " daddiu %0, %0, 32 \n" /* Next cacheline (This instruction better be short piped!) */ ".set pop \n" :"=r" (page) :"0" (page), "I" (PAGE_SIZE-32) :"$1","memory"); } void sb1_copy_page(void *to, void *from) { /* This should be optimized in assembly...can't use ld/sd, though, * because the top 32 bits could be nuked if we took an interrupt * during the routine. And this is not a good place to be cli()'ing */ /* The pref's used here are using "streaming" hints, which cause the * copied data to be kicked out of the cache sooner. A page copy often * ends up copying a lot more data than is commonly used, so this seems * to make sense in terms of reducing cache pollution, but I've no real * performance data to back this up */ __asm__ __volatile__( ".set push \n" ".set noreorder \n" ".set noat \n" ".set mips4 \n" " daddiu $1, %0, %4 \n" /* Calculate the end of the page to copy */ " pref " SB1_PREF_LOAD_STREAMED_HINT ", 0(%0) \n" /* Prefetch the first 3 lines */ " pref " SB1_PREF_STORE_STREAMED_HINT ", 0(%1) \n" " pref " SB1_PREF_LOAD_STREAMED_HINT ", 32(%0) \n" " pref " SB1_PREF_STORE_STREAMED_HINT ", 32(%1) \n" " pref " SB1_PREF_LOAD_STREAMED_HINT ", 64(%0) \n" " pref " SB1_PREF_STORE_STREAMED_HINT ", 64(%1) \n" "1: lw $2, 0(%0) \n" /* Block copy a cacheline */ " lw $3, 4(%0) \n" " lw $4, 8(%0) \n" " lw $5, 12(%0) \n" " lw $6, 16(%0) \n" " lw $7, 20(%0) \n" " lw $8, 24(%0) \n" " lw $9, 28(%0) \n" " pref " SB1_PREF_LOAD_STREAMED_HINT ", 96(%0) \n" /* Prefetch ahead */ " pref " SB1_PREF_STORE_STREAMED_HINT ", 96(%1) \n" " sw $2, 0(%1) \n" " sw $3, 4(%1) \n" " sw $4, 8(%1) \n" " sw $5, 12(%1) \n" " sw $6, 16(%1) \n" " sw $7, 20(%1) \n" " sw $8, 24(%1) \n" " sw $9, 28(%1) \n" " daddiu %1, %1, 32 \n" /* Next cacheline */ " nop \n" /* Force next add to short pipe */ " nop \n" /* Force next add to short pipe */ " bne $1, %0, 1b \n" " daddiu %0, %0, 32 \n" /* Next cacheline */ ".set pop \n" :"=r" (to), "=r" (from) : "0" (from), "1" (to), "I" (PAGE_SIZE-32) :"$1","$2","$3","$4","$5","$6","$7","$8","$9","memory"); /* unsigned long *src = from; unsigned long *dest = to; unsigned long *target = (unsigned long *) (((unsigned long)src) + PAGE_SIZE); while (src != target) { *dest++ = *src++; } */ } --- NEW FILE: tlb-sb1.c --- /* * Copyright (C) 1996 David S. Miller (dm...@en...) * Copyright (C) 1997, 2001 Ralf Baechle (ra...@gn...) * Copyright (C) 2000, 2001 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <asm/mmu_context.h> #include <asm/bootinfo.h> #include <asm/cpu.h> /* Dump the current entry* and pagemask registers */ static inline void dump_cur_tlb_regs(void) { unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi; unsigned int entrylo1lo, pagemask; __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" "#.set mips64 \n" ".set mips4 \n" ".set noat \n" " tlbr \n" " dmfc0 $1, $10 \n" " dsrl32 %0, $1, 0 \n" " sra %1, $1, 0 \n" " dmfc0 $1, $2 \n" " dsrl32 %2, $1, 0 \n" " sra %3, $1, 0 \n" " dmfc0 $1, $3 \n" " dsrl32 %4, $1, 0 \n" " sra %5, $1, 0 \n" " mfc0 %6, $5 \n" ".set pop \n" : "=r" (entryhihi), "=r" (entryhilo), "=r" (entrylo0hi), "=r" (entrylo0lo), "=r" (entrylo1hi), "=r" (entrylo1lo), "=r" (pagemask)); printk("%08X%08X %08X%08X %08X%08X %08X", entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi, entrylo1lo, pagemask); } void sb1_dump_tlb(void) { int entry; printk("Current TLB registers state:\n" " EntryHi EntryLo0 EntryLo1 PageMask Index\n" "--------------------------------------------------------------------\n"); dump_cur_tlb_regs(); printk(" %08X\n", read_32bit_cp0_register(CP0_INDEX)); printk("\n\nFull TLB Dump:" "Idx EntryHi EntryLo0 EntryLo1 PageMask\n" "--------------------------------------------------------------\n"); for (entry = 0; entry < mips_cpu.tlbsize; entry++) { set_index(entry); printk("\n%02i ", entry); __asm__ __volatile__ ( ".set push \n" "#.set mips64 \n" ".set mips4 \n" " tlbr \n" ".set pop \n"); dump_cur_tlb_regs(); } printk("\n"); } void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; __save_and_cli(flags); /* Save old context and create impossible VPN2 value */ old_ctx = (get_entryhi() & 0xff); set_entrylo0(0); set_entrylo1(0); for (entry = 0; entry < mips_cpu.tlbsize; entry++) { set_entryhi(KSEG0 + (PAGE_SIZE << 1) * entry); set_index(entry); tlb_write_indexed(); } set_entryhi(old_ctx); __restore_flags(flags); } /* * Use a bogus region of memory (starting at 0) to sanitize the TLB's. * Use increments of the maximum page size (16MB), and check for duplicate * entries before doing a given write. Then, when we're safe from collisions * with the firmware, go back and give all the entries invalid addresses with * the normal flush routine. */ void sb1_sanitize_tlb(void) { int entry; long addr = 0; long inc = 1<<24; /* 16MB */ /* Save old context and create impossible VPN2 value */ set_entrylo0(0); set_entrylo1(0); for (entry = 0; entry < mips_cpu.tlbsize; entry++) { do { addr += inc; set_entryhi(addr); tlb_probe(); } while ((int)(get_index()) >= 0); set_index(entry); tlb_write_indexed(); } /* Now that we know we're safe from collisions, we can safely flush the TLB with the "normal" routine. */ local_flush_tlb_all(); } void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { unsigned long flags; int cpu; __save_and_cli(flags); cpu = smp_processor_id(); if(CPU_CONTEXT(cpu, mm) != 0) { int size; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if(size <= (mips_cpu.tlbsize/2)) { int oldpid = (get_entryhi() & 0xff); int newpid = (CPU_CONTEXT(cpu, mm) & 0xff); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; set_entryhi(start | newpid); start += (PAGE_SIZE << 1); tlb_probe(); idx = get_index(); set_entrylo0(0); set_entrylo1(0); set_entryhi(KSEG0 + (idx << (PAGE_SHIFT+1))); if(idx < 0) continue; tlb_write_indexed(); } set_entryhi(oldpid); } else { get_new_mmu_context(mm, cpu); if (mm == current->active_mm) set_entryhi(CPU_CONTEXT(cpu, mm) & 0xff); } } __restore_flags(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned long flags; #ifdef CONFIG_SMP /* * This variable is eliminated from CPU_CONTEXT() if SMP isn't defined, * so conditional it to get rid of silly "unused variable" compiler * complaints */ int cpu = smp_processor_id(); #endif __save_and_cli(flags); if (CPU_CONTEXT(cpu, vma->vm_mm) != 0) { int oldpid, newpid, idx; #ifdef DEBUG_TLB printk("[tlbpage<%d,%08lx>]", CPU_CONTEXT(cpu, vma->vm_mm), page); #endif newpid = (CPU_CONTEXT(cpu, vma->vm_mm) & 0xff); page &= (PAGE_MASK << 1); oldpid = (get_entryhi() & 0xff); set_entryhi (page | newpid); tlb_probe(); idx = get_index(); set_entrylo0(0); set_entrylo1(0); if(idx < 0) goto finish; /* Make sure all entries differ. */ set_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1))); tlb_write_indexed(); finish: set_entryhi(oldpid); } __restore_flags(flags); } /* All entries common to a mm share an asid. To effectively flush these entries, we just bump the asid. */ void local_flush_tlb_mm(struct mm_struct *mm) { unsigned long flags; int cpu; __save_and_cli(flags); cpu = smp_processor_id(); if (CPU_CONTEXT(cpu, mm) != 0) { get_new_mmu_context(mm, smp_processor_id()); if (mm == current->active_mm) { set_entryhi(CPU_CONTEXT(cpu, mm) & 0xff); } } __restore_flags(flags); } /* Stolen from mips32 routines */ void sb1_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; int idx, pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; __save_and_cli(flags); pid = get_entryhi() & 0xff; #ifdef DEBUG_TLB if((pid != (CPU_CONTEXT(cpu, vma->vm_mm) & 0xff)) || (CPU_CONTEXT(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n", (int) (CPU_CONTEXT(cpu, vma->vm_mm) & 0xff), pid); } #endif address &= (PAGE_MASK << 1); set_entryhi(address | (pid)); pgdp = pgd_offset(vma->vm_mm, address); tlb_probe(); pmdp = pmd_offset(pgdp, address); idx = get_index(); ptep = pte_offset(pmdp, address); set_entrylo0(pte_val(*ptep++) >> 6); set_entrylo1(pte_val(*ptep) >> 6); set_entryhi(address | (pid)); if(idx < 0) { tlb_write_random(); } else { tlb_write_indexed(); } set_entryhi(pid); __restore_flags(flags); } /* * This is called from loadmmu.c. We have to set up all the * memory management function pointers, as well as initialize * the caches and tlbs */ void sb1_tlb_init(void) { u32 config1; config1 = read_mips32_cp0_config1(); mips_cpu.tlbsize = ((config1 >> 25) & 0x3f) + 1; /* * We don't know what state the firmware left the TLB's in, so this is * the ultra-conservative way to flush the TLB's and avoid machine * check exceptions due to duplicate TLB entries */ sb1_sanitize_tlb(); _update_mmu_cache = sb1_update_mmu_cache; } Index: andes.c =================================================================== RCS file: /cvsroot/linux-mips/linux/arch/mips64/mm/andes.c,v retrieving revision 1.8 retrieving revision 1.9 diff -u -d -r1.8 -r1.9 --- andes.c 2001/11/20 17:57:32 1.8 +++ andes.c 2002/01/28 20:31:57 1.9 @@ -358,7 +358,7 @@ while(1); } - update_mmu_cache = andes_update_mmu_cache; + _update_mmu_cache = andes_update_mmu_cache; _show_regs = andes_show_regs; @@ -372,9 +372,10 @@ * be set for 4kb pages. */ write_32bit_cp0_register(CP0_PAGEMASK, PM_4K); + write_32bit_cp0_register(CP0_FRAMEMASK, 0); /* From this point on the ARC firmware is dead. */ local_flush_tlb_all(); - /* Did I tell you that ARC SUCKS? */ + /* Did I tell you that ARC SUCKS? */ } Index: fault.c =================================================================== RCS file: /cvsroot/linux-mips/linux/arch/mips64/mm/fault.c,v retrieving revision 1.8 retrieving revision 1.9 diff -u -d -r1.8 -r1.9 --- fault.c 2001/12/07 19:28:38 1.8 +++ fault.c 2002/01/28 20:31:57 1.9 @@ -115,7 +115,7 @@ * only copy the information from the master page table, * nothing more. */ - if (address >= TASK_SIZE) + if (address >= VMALLOC_START) goto vmalloc_fault; info.si_code = SEGV_MAPERR; Index: loadmmu.c =================================================================== RCS file: /cvsroot/linux-mips/linux/arch/mips64/mm/loadmmu.c,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- loadmmu.c 2001/11/26 17:17:26 1.3 +++ loadmmu.c 2002/01/28 20:31:57 1.4 @@ -24,14 +24,19 @@ void (*_copy_page)(void * to, void * from); /* Cache operations. */ +void (*_flush_cache_all)(void); +void (*___flush_cache_all)(void); void (*_flush_cache_mm)(struct mm_struct *mm); void (*_flush_cache_range)(struct mm_struct *mm, unsigned long start, unsigned long end); void (*_flush_cache_page)(struct vm_area_struct *vma, unsigned long page); +void (*_flush_cache_sigtramp)(unsigned long addr); +void (*_flush_icache_range)(unsigned long start, unsigned long end); +void (*_flush_icache_page)(struct vm_area_struct *vma, struct page *page); void (*_flush_page_to_ram)(struct page * page); +void (*_flush_icache_all)(void); /* MIPS specific cache operations */ -void (*_flush_cache_sigtramp)(unsigned long addr); void (*_flush_cache_l2)(void); void (*_flush_cache_l1)(void); @@ -42,8 +47,8 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size); /* Miscellaneous. */ -void (*update_mmu_cache)(struct vm_area_struct * vma, - unsigned long address, pte_t pte); +void (*_update_mmu_cache)(struct vm_area_struct * vma, + unsigned long address, pte_t pte); void (*_show_regs)(struct pt_regs *); Index: r4xx0.c =================================================================== RCS file: /cvsroot/linux-mips/linux/arch/mips64/mm/r4xx0.c,v retrieving revision 1.11 retrieving revision 1.12 diff -u -d -r1.11 -r1.12 --- r4xx0.c 2001/11/30 18:34:10 1.11 +++ r4xx0.c 2002/01/28 20:31:57 1.12 @@ -2388,7 +2388,7 @@ _flush_cache_l2 = r4k_flush_cache_l2; - update_mmu_cache = r4k_update_mmu_cache; + _update_mmu_cache = r4k_update_mmu_cache; _show_regs = r4k_show_regs; --- sb1.c DELETED --- |