Update of /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm
In directory usw-pr-cvs1:/tmp/cvs-serv18937/mips64/mm
Modified Files:
Makefile andes.c extable.c fault.c init.c loadmmu.c r4xx0.c
umap.c
Log Message:
synch 2.4.15 commit 38
Index: Makefile
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/Makefile,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- Makefile 25 Feb 2001 23:15:22 -0000 1.1.1.2
+++ Makefile 10 Apr 2002 14:43:21 -0000 1.2
@@ -4,7 +4,8 @@
O_TARGET := mm.o
-obj-y := extable.o init.o fault.o loadmmu.o
+export-objs += umap.o
+obj-y := extable.o init.o fault.o loadmmu.o
obj-$(CONFIG_CPU_R4300) += r4xx0.o
obj-$(CONFIG_CPU_R4X00) += r4xx0.o
Index: andes.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/andes.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- andes.c 14 Jan 2001 19:54:18 -0000 1.1.1.1
+++ andes.c 10 Apr 2002 14:43:21 -0000 1.2
@@ -125,14 +125,8 @@
static void
andes_flush_cache_sigtramp(unsigned long addr)
{
- unsigned long daddr, iaddr;
-
- daddr = addr & ~(dc_lsize - 1);
- protected_writeback_dcache_line(daddr);
- protected_writeback_dcache_line(daddr + dc_lsize);
- iaddr = addr & ~(ic_lsize - 1);
- protected_flush_icache_line(iaddr);
- protected_flush_icache_line(iaddr + ic_lsize);
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
}
#define NTLB_ENTRIES 64
@@ -284,8 +278,8 @@
if((pid != (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) & 0xff)) ||
(CPU_CONTEXT(smp_processor_id(), vma->vm_mm) == 0)) {
- printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d
- tlbpid=%d\n", (int) (CPU_CONTEXT(smp_processor_id(),
+ printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d "
+ "tlbpid=%d\n", (int) (CPU_CONTEXT(smp_processor_id(),
vma->vm_mm) & 0xff), pid);
}
@@ -332,8 +326,8 @@
printk("Lo : %016lx\n", regs->lo);
/* Saved cp0 registers. */
- printk("epc : %016lx\nbadvaddr: %016lx\n",
- regs->cp0_epc, regs->cp0_badvaddr);
+ printk("epc : %016lx %s\nbadvaddr: %016lx\n",
+ regs->cp0_epc, print_tainted(), regs->cp0_badvaddr);
printk("Status : %08x\nCause : %08x\n",
(unsigned int) regs->cp0_status, (unsigned int) regs->cp0_cause);
}
Index: extable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/extable.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- extable.c 14 Jan 2001 19:54:18 -0000 1.1.1.1
+++ extable.c 10 Apr 2002 14:43:21 -0000 1.2
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -9,6 +8,7 @@
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
extern const struct exception_table_entry __start___ex_table[];
@@ -35,26 +35,31 @@
return 0;
}
-unsigned long
-search_exception_table(unsigned long addr)
-{
- unsigned long ret;
+extern spinlock_t modlist_lock;
+unsigned long search_exception_table(unsigned long addr)
+{
+ unsigned long ret = 0;
+ unsigned long flags;
+
#ifndef CONFIG_MODULES
/* There is only the kernel to search. */
ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
- if (ret) return ret;
+ return ret;
#else
/* The kernel is the last "module" -- no need to treat it special. */
struct module *mp;
+
+ spin_lock_irqsave(&modlist_lock, flags);
for (mp = module_list; mp != NULL; mp = mp->next) {
if (mp->ex_table_start == NULL)
continue;
ret = search_one_table(mp->ex_table_start,
mp->ex_table_end - 1, addr);
- if (ret) return ret;
+ if (ret)
+ break;
}
+ spin_unlock_irqrestore(&modlist_lock, flags);
+ return ret;
#endif
-
- return 0;
}
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/fault.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- fault.c 14 Jan 2001 19:54:18 -0000 1.1.1.1
+++ fault.c 10 Apr 2002 14:43:21 -0000 1.2
@@ -6,6 +6,7 @@
* Copyright (C) 1995 - 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 by Silicon Graphics, Inc.
*/
+#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -57,17 +58,33 @@
printk("Got exception 0x%lx at 0x%lx\n", retaddr, regs.cp0_epc);
}
-extern spinlock_t console_lock, timerlist_lock;
+extern spinlock_t timerlist_lock;
/*
* Unlock any spinlocks which will prevent us from getting the
- * message out (timerlist_lock is aquired through the
+ * message out (timerlist_lock is acquired through the
* console unblank code)
*/
-void bust_spinlocks(void)
+void bust_spinlocks(int yes)
{
- spin_lock_init(&console_lock);
spin_lock_init(&timerlist_lock);
+ if (yes) {
+ oops_in_progress = 1;
+ } else {
+ int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
}
/*
@@ -84,6 +101,18 @@
unsigned long fixup;
siginfo_t info;
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= TASK_SIZE)
+ goto vmalloc_fault;
+
info.si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
@@ -95,7 +124,7 @@
printk("Cpu%d[%s:%d:%08lx:%ld:%08lx]\n", smp_processor_id(), current->comm,
current->pid, address, write, regs->cp0_epc);
#endif
- down(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -138,7 +167,7 @@
goto out_of_memory;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return;
/*
@@ -146,15 +175,9 @@
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
- /*
- * Quickly check for vmalloc range faults.
- */
- if ((!vma) && (address >= VMALLOC_START) && (address < VMALLOC_END)) {
- printk("Fix vmalloc invalidate fault\n");
- while(1);
- }
+bad_area_nosemaphore:
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
@@ -195,7 +218,7 @@
* terminate things with extreme prejudice.
*/
- bust_spinlocks();
+ bust_spinlocks(1);
printk(KERN_ALERT "Cpu %d Unable to handle kernel paging request at "
"address %08lx, epc == %08x, ra == %08x\n",
@@ -203,20 +226,21 @@
(unsigned int) regs->regs[31]);
die("Oops", regs, write);
do_exit(SIGKILL);
+ bust_spinlocks(0);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -232,4 +256,9 @@
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
+
+ return;
+
+vmalloc_fault:
+ panic("Pagefault for kernel virtual memory");
}
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/init.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- init.c 14 Jan 2001 19:54:20 -0000 1.1.1.1
+++ init.c 10 Apr 2002 14:43:22 -0000 1.2
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -37,27 +36,11 @@
#include <asm/sgialib.h>
#endif
#include <asm/mmu_context.h>
+#include <asm/tlb.h>
-unsigned long totalram_pages;
-
-void __bad_pte_kernel(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
-}
-
-void __bad_pte(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
-}
+mmu_gather_t mmu_gathers[NR_CPUS];
-/* Fixme, we need something like BAD_PMDTABLE ... */
-void __bad_pmd(pgd_t *pgd)
-{
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, empty_bad_pmd_table);
-}
+unsigned long totalram_pages;
void pgd_init(unsigned long page)
{
@@ -113,72 +96,6 @@
}
}
-pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
-{
- pmd_t *pmd;
-
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1);
- if (pgd_none(*pgd)) {
- if (pmd) {
- pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
- pgd_set(pgd, pmd);
- return pmd + offset;
- }
- pgd_set(pgd, BAD_PMDTABLE);
- return NULL;
- }
- free_page((unsigned long)pmd);
- if (pgd_bad(*pgd)) {
- __bad_pmd(pgd);
- return NULL;
- }
- return (pmd_t *) pgd_page(*pgd) + offset;
-}
-
-pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *page;
-
- page = (pte_t *) __get_free_pages(GFP_USER, 1);
- if (pmd_none(*pmd)) {
- if (page) {
- clear_page(page);
- pmd_set(pmd, page);
- return page + offset;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long)page);
- if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *page;
-
- page = (pte_t *) __get_free_pages(GFP_KERNEL, 0);
- if (pmd_none(*pmd)) {
- if (page) {
- clear_page(page);
- pmd_val(*pmd) = (unsigned long)page;
- return page + offset;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- free_pages((unsigned long)page, 0);
- if (pmd_bad(*pmd)) {
- __bad_pte(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
@@ -205,7 +122,7 @@
}
/*
- * We have upto 8 empty zeroed pages so we can map one of the right colour
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
@@ -247,32 +164,155 @@
return 1UL << order;
}
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-pmd_t * __bad_pmd_table(void)
+void __init add_memory_region(unsigned long start, unsigned long size,
+ long type)
{
- return empty_bad_pmd_table;
-}
+ int x = boot_mem_map.nr_map;
-pte_t * __bad_pagetable(void)
-{
- return empty_bad_page_table;
+ if (x == BOOT_MEM_MAP_MAX) {
+ printk("Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ boot_mem_map.map[x].addr = start;
+ boot_mem_map.map[x].size = size;
+ boot_mem_map.map[x].type = type;
+ boot_mem_map.nr_map++;
+}
+
+static void __init print_memory_map(void)
+{
+ int i;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ printk(" memory: %08lx @ %08lx ",
+ boot_mem_map.map[i].size, boot_mem_map.map[i].addr);
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RAM:
+ printk("(usable)\n");
+ break;
+ case BOOT_MEM_ROM_DATA:
+ printk("(ROM data)\n");
+ break;
+ case BOOT_MEM_RESERVED:
+ printk("(reserved)\n");
+ break;
+ default:
+ printk("type %lu\n", boot_mem_map.map[i].type);
+ break;
+ }
+ }
}
-pte_t __bad_page(void)
-{
- return __pte(0);
+void bootmem_init(void) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long tmp;
+ unsigned long *initrd_header;
+#endif
+ unsigned long bootmap_size;
+ unsigned long start_pfn, max_pfn;
+ int i;
+ extern int _end;
+
+#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards.
+ */
+ start_pfn = PFN_UP(__pa(&_end));
+
+ /* Find the highest page frame number we have available. */
+ max_pfn = 0;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long start, end;
+
+ if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ continue;
+
+ start = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr
+ + boot_mem_map.map[i].size);
+
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ }
+
+ /* Initialize the boot-time allocator. */
+ bootmap_size = init_bootmem(start_pfn, max_pfn);
+
+ /*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long curr_pfn, last_pfn, size;
+
+ /*
+ * Reserve usable memory.
+ */
+ if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ continue;
+
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
+ if (curr_pfn >= max_pfn)
+ continue;
+ if (curr_pfn < start_pfn)
+ curr_pfn = start_pfn;
+
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
+ + boot_mem_map.map[i].size);
+
+ if (last_pfn > max_pfn)
+ last_pfn = max_pfn;
+
+ /*
+ * ... finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+
+ /* Reserve the bootmap memory. */
+ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#error "Initrd is broken, please fit it."
+ tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
+ if (tmp < (unsigned long)&_end)
+ tmp += PAGE_SIZE;
+ initrd_header = (unsigned long *)tmp;
+ if (initrd_header[0] == 0x494E5244) {
+ initrd_start = (unsigned long)&initrd_header[2];
+ initrd_end = initrd_start + initrd_header[1];
+ initrd_below_start_ok = 1;
+ if (initrd_end > memory_end) {
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ initrd_end,memory_end);
+ initrd_start = 0;
+ } else
+ *memory_start_p = initrd_end;
+ }
+#endif
+
+#undef PFN_UP
+#undef PFN_DOWN
+#undef PFN_PHYS
+
}
void show_mem(void)
@@ -312,27 +352,39 @@
void __init paging_init(void)
{
+ pmd_t *pmd = kpmdtbl;
+ pte_t *pte = kptbl;
+
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
unsigned long max_dma, low;
+ int i;
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
memset((void *)invalid_pte_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
- pmd_init((unsigned long)empty_bad_pmd_table, (unsigned long)empty_bad_page_table);
- memset((void *)empty_bad_page_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
low = max_low_pfn;
+#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
if (low < max_dma)
zones_size[ZONE_DMA] = low;
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = low - max_dma;
}
+#else
+ zones_size[ZONE_DMA] = low;
+#endif
free_area_init(zones_size);
+
+ memset((void *)kptbl, 0, PAGE_SIZE << KPTBL_PAGE_ORDER);
+ memset((void *)kpmdtbl, 0, PAGE_SIZE);
+ pgd_set(swapper_pg_dir, kpmdtbl);
+ for (i = 0; i < (1 << KPTBL_PAGE_ORDER); pmd++,i++,pte+=PTRS_PER_PTE)
+ pmd_val(*pmd) = (unsigned long)pte;
}
extern int page_is_ram(unsigned long pagenr);
@@ -411,7 +463,7 @@
si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
- val->sharedram = 0;
+ val->sharedram = atomic_read(&shmem_nrpages);
val->freeram = nr_free_pages();
val->bufferram = atomic_read(&buffermem_pages);
val->totalhigh = 0;
Index: loadmmu.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/loadmmu.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- loadmmu.c 14 Jan 2001 19:54:20 -0000 1.1.1.1
+++ loadmmu.c 10 Apr 2002 14:43:22 -0000 1.2
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
Index: r4xx0.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/r4xx0.c,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- r4xx0.c 25 Feb 2001 23:15:22 -0000 1.1.1.2
+++ r4xx0.c 10 Apr 2002 14:43:22 -0000 1.2
@@ -6,7 +6,7 @@
* r4xx0.c: R4000 processor variant specific MMU/Cache routines.
*
* Copyright (C) 1996 David S. Miller (dm...@en...)
- * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle (ra...@gn...)
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001 Ralf Baechle (ra...@gn...)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/init.h>
@@ -377,7 +377,7 @@
"ld\t%3,-8(%1)\n\t"
"sd\t%2,-16(%0)\n\t"
"bne\t$1,%0,1b\n\t"
- " sd\t%4,-8(%0)\n\t"
+ " sd\t%3,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
:"=r" (dummy1), "=r" (dummy2), "=&r" (reg1), "=&r" (reg2)
@@ -776,9 +776,9 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_range_s16d16i16(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s16d16i16(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -815,9 +815,9 @@
}
}
-static void
-r4k_flush_cache_range_s32d16i16(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s32d16i16(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -854,9 +854,9 @@
}
}
-static void
-r4k_flush_cache_range_s64d16i16(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s64d16i16(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -893,9 +893,9 @@
}
}
-static void
-r4k_flush_cache_range_s128d16i16(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -932,9 +932,9 @@
}
}
-static void
-r4k_flush_cache_range_s32d32i32(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -971,9 +971,9 @@
}
}
-static void
-r4k_flush_cache_range_s64d32i32(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
@@ -1010,14 +1010,14 @@
}
}
-static void
-r4k_flush_cache_range_s128d32i32(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct vm_area_struct *vma;
unsigned long flags;
- if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
+ if (CPU_CONTEXT(smp_processor_id(), mm) != 0)
return;
start &= PAGE_MASK;
@@ -1049,9 +1049,9 @@
}
}
-static void
-r4k_flush_cache_range_d16i16(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_d16i16(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
unsigned long flags;
@@ -1065,9 +1065,9 @@
}
}
-static void
-r4k_flush_cache_range_d32i32(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+static void r4k_flush_cache_range_d32i32(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
unsigned long flags;
@@ -1176,8 +1176,8 @@
}
}
-static void
-r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1213,8 +1213,8 @@
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
- if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ if (CPU_CONTEXT(smp_processor_id(), mm) !=
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
@@ -1227,8 +1227,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1263,8 +1263,8 @@
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
- if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ if (CPU_CONTEXT(smp_processor_id(), mm) !=
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
@@ -1277,8 +1277,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1315,7 +1315,7 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
@@ -1328,8 +1328,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1366,7 +1366,7 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/*
* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
@@ -1380,8 +1380,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1419,7 +1419,7 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/*
* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
@@ -1433,8 +1433,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1472,7 +1472,7 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/*
* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
@@ -1486,8 +1486,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1524,7 +1524,7 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (CPU_CONTEXT(smp_processor_id(), mm) !=
- CPU_CONTEXT(smp_processor_id(), current->mm)) {
+ CPU_CONTEXT(smp_processor_id(), current->mm)) {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
@@ -1537,8 +1537,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_d16i16(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1587,8 +1587,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_d32i32(struct vm_area_struct *vma, unsigned long page)
+static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1639,9 +1639,8 @@
__restore_flags(flags);
}
-static void
-r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
- unsigned long page)
+static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
+ unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -1741,21 +1740,20 @@
* (Revision 2.0 device errata from IDT available on http://www.idt.com/
* in .pdf format.)
*/
-static void
-r4k_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size)
+static void r4k_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
unsigned int flags;
- if (size >= dcache_size) {
+ if (size >= (unsigned long)dcache_size) {
flush_cache_l1();
} else {
/* Workaround for R4600 bug. See comment above. */
__save_and_cli(flags);
*(volatile unsigned long *)KSEG1;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size) & ~(dc_lsize - 1);
+ a = addr & ~((unsigned long)dc_lsize - 1);
+ end = (addr + size) & ~((unsigned long)dc_lsize - 1);
while (1) {
flush_dcache_line(a); /* Hit_Writeback_Inv_D */
if (a == end) break;
@@ -1766,18 +1764,17 @@
bc_wback_inv(addr, size);
}
-static void
-r4k_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size)
+static void r4k_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
- if (size >= scache_size) {
+ if (size >= (unsigned long)scache_size) {
flush_cache_l1();
return;
}
- a = addr & ~(sc_lsize - 1);
- end = (addr + size) & ~(sc_lsize - 1);
+ a = addr & ~((unsigned long)sc_lsize - 1);
+ end = (addr + size) & ~((unsigned long)sc_lsize - 1);
while (1) {
flush_scache_line(a); /* Hit_Writeback_Inv_SD */
if (a == end) break;
@@ -1785,21 +1782,20 @@
}
}
-static void
-r4k_dma_cache_inv_pc(unsigned long addr, unsigned long size)
+static void r4k_dma_cache_inv_pc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
unsigned int flags;
- if (size >= dcache_size) {
+ if (size >= (unsigned long)dcache_size) {
flush_cache_l1();
} else {
/* Workaround for R4600 bug. See comment above. */
__save_and_cli(flags);
*(volatile unsigned long *)KSEG1;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size) & ~(dc_lsize - 1);
+ a = addr & ~((unsigned long)dc_lsize - 1);
+ end = (addr + size) & ~((unsigned long)dc_lsize - 1);
while (1) {
flush_dcache_line(a); /* Hit_Writeback_Inv_D */
if (a == end) break;
@@ -1811,18 +1807,17 @@
bc_inv(addr, size);
}
-static void
-r4k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
+static void r4k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
- if (size >= scache_size) {
+ if (size >= (unsigned long)scache_size) {
flush_cache_l1();
return;
}
- a = addr & ~(sc_lsize - 1);
- end = (addr + size) & ~(sc_lsize - 1);
+ a = addr & ~((unsigned long)sc_lsize - 1);
+ end = (addr + size) & ~((unsigned long)sc_lsize - 1);
while (1) {
flush_scache_line(a); /* Hit_Writeback_Inv_SD */
if (a == end) break;
@@ -1830,8 +1825,7 @@
}
}
-static void
-r4k_dma_cache_wback(unsigned long addr, unsigned long size)
+static void r4k_dma_cache_wback(unsigned long addr, unsigned long size)
{
panic("r4k_dma_cache called - should not happen.\n");
}
@@ -1843,33 +1837,24 @@
*/
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- unsigned long daddr, iaddr;
-
- daddr = addr & ~(dc_lsize - 1);
__asm__ __volatile__("nop;nop;nop;nop"); /* R4600 V1.7 */
- protected_writeback_dcache_line(daddr);
- protected_writeback_dcache_line(daddr + dc_lsize);
- iaddr = addr & ~(ic_lsize - 1);
- protected_flush_icache_line(iaddr);
- protected_flush_icache_line(iaddr + ic_lsize);
+
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
}
static void r4600v20k_flush_cache_sigtramp(unsigned long addr)
{
- unsigned long daddr, iaddr;
unsigned int flags;
- daddr = addr & ~(dc_lsize - 1);
__save_and_cli(flags);
/* Clear internal cache refill buffer */
*(volatile unsigned int *)KSEG1;
- protected_writeback_dcache_line(daddr);
- protected_writeback_dcache_line(daddr + dc_lsize);
- iaddr = addr & ~(ic_lsize - 1);
- protected_flush_icache_line(iaddr);
- protected_flush_icache_line(iaddr + ic_lsize);
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
+
__restore_flags(flags);
}
@@ -2011,8 +1996,7 @@
}
}
-static void
-r4k_flush_cache_l2(void)
+static void r4k_flush_cache_l2(void)
{
}
@@ -2125,8 +2109,8 @@
printk("Lo : %016lx\n", regs->lo);
/* Saved cp0 registers. */
- printk("epc : %016lx\nbadvaddr: %016lx\n",
- regs->cp0_epc, regs->cp0_badvaddr);
+ printk("epc : %016lx %s\nbadvaddr: %016lx\n",
+ regs->cp0_epc, print_tainted(), regs->cp0_badvaddr);
printk("Status : %08x\nCause : %08x\n",
(unsigned int) regs->cp0_status, (unsigned int) regs->cp0_cause);
}
@@ -2384,7 +2368,11 @@
printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
+#ifdef CONFIG_MIPS_UNCACHED
+ set_cp0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+#else
set_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
+#endif /* UNCACHED */
probe_icache(config);
probe_dcache(config);
Index: umap.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/mips64/mm/umap.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- umap.c 14 Jan 2001 19:54:29 -0000 1.1.1.1
+++ umap.c 10 Apr 2002 14:43:22 -0000 1.2
@@ -1,11 +1,11 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 1997 Miguel de Icaza
+ * Copyright (C) 2001 Ralf Baechle
*/
#include <linux/stat.h>
#include <linux/sched.h>
@@ -16,6 +16,7 @@
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
@@ -24,8 +25,8 @@
#include <asm/pgalloc.h>
#include <asm/page.h>
-static inline void
-remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size)
+static inline void remove_mapping_pte_range (pmd_t *pmd, unsigned long address,
+ unsigned long size)
{
pte_t *pte;
unsigned long end;
@@ -33,7 +34,8 @@
if (pmd_none (*pmd))
return;
if (pmd_bad (*pmd)){
- printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd));
+ printk ("remove_graphics_pte_range: bad pmd (%08lx)\n",
+ pmd_val (*pmd));
pmd_clear (pmd);
return;
}
@@ -52,8 +54,8 @@
}
-static inline void
-remove_mapping_pmd_range (pgd_t *pgd, unsigned long address, unsigned long size)
+static inline void remove_mapping_pmd_range (pgd_t *pgd, unsigned long address,
+ unsigned long size)
{
pmd_t *pmd;
unsigned long end;
@@ -62,7 +64,8 @@
return;
if (pgd_bad (*pgd)){
- printk ("remove_graphics_pmd_range: bad pgd (%08lx)\n", pgd_val (*pgd));
+ printk ("remove_graphics_pmd_range: bad pgd (%08lx)\n",
+ pgd_val (*pgd));
pgd_clear (pgd);
return;
}
@@ -83,13 +86,13 @@
* This routine is called from the page fault handler to remove a
* range of active mappings at this point
*/
-void
-remove_mapping (struct task_struct *task, unsigned long start, unsigned long end)
+void remove_mapping (struct task_struct *task, unsigned long start,
+ unsigned long end)
{
unsigned long beg = start;
pgd_t *dir;
- down (&task->mm->mmap_sem);
+ down_write (&task->mm->mmap_sem);
dir = pgd_offset (task->mm, start);
flush_cache_range (task->mm, beg, end);
while (start < end){
@@ -98,12 +101,15 @@
dir++;
}
flush_tlb_range (task->mm, beg, end);
- up (&task->mm->mmap_sem);
+ up_write (&task->mm->mmap_sem);
}
+EXPORT_SYMBOL(remove_mapping);
+
void *vmalloc_uncached (unsigned long size)
{
- return vmalloc_prot (size, PAGE_KERNEL_UNCACHED);
+ return __vmalloc (size, GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_UNCACHED);
}
static inline void free_pte(pte_t page)
@@ -133,8 +139,8 @@
* maps a range of vmalloc()ed memory into the requested pages. the old
* mappings are removed.
*/
-static inline void
-vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr)
+static inline void vmap_pte_range (pte_t *pte, unsigned long address,
+ unsigned long size, unsigned long vaddr)
{
unsigned long end;
pgd_t *vdir;
@@ -163,8 +169,8 @@
} while (address < end);
}
-static inline int
-vmap_pmd_range (pmd_t *pmd, unsigned long address, unsigned long size, unsigned long vaddr)
+static inline int vmap_pmd_range (pmd_t *pmd, unsigned long address,
+ unsigned long size, unsigned long vaddr)
{
unsigned long end;
@@ -174,7 +180,7 @@
end = PGDIR_SIZE;
vaddr -= address;
do {
- pte_t * pte = pte_alloc(pmd, address);
+ pte_t * pte = pte_alloc(current->mm, pmd, address);
if (!pte)
return -ENOMEM;
vmap_pte_range(pte, address, end - address, address + vaddr);
@@ -184,8 +190,8 @@
return 0;
}
-int
-vmap_page_range (unsigned long from, unsigned long size, unsigned long vaddr)
+int vmap_page_range (unsigned long from, unsigned long size,
+ unsigned long vaddr)
{
int error = 0;
pgd_t * dir;
@@ -196,7 +202,7 @@
dir = pgd_offset(current->mm, from);
flush_cache_range(current->mm, beg, end);
while (from < end) {
- pmd_t *pmd = pmd_alloc(dir, from);
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
|