|
From: Andy P. <at...@us...> - 2002-04-09 17:08:26
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/cris/mm
In directory usw-pr-cvs1:/tmp/cvs-serv13825/cris/mm
Modified Files:
Makefile extable.c fault.c init.c tlb.c
Added Files:
ioremap.c
Log Message:
synch 2.4.15 commit 29
--- NEW FILE ---
/*
* arch/cris/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
* Needed for memory-mapped I/O devices mapped outside our normal DRAM
* window (that is, all memory-mapped I/O devices).
*
* (C) Copyright 1995 1996 Linus Torvalds
* CRIS-port by Axis Communications AB
*/
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
if (address >= end)
BUG();
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE |
__WRITEABLE | _PAGE_GLOBAL |
_PAGE_KERNEL | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
} while (address && (address < end));
}
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
phys_addr -= address;
if (address >= end)
BUG();
do {
pte_t * pte = pte_alloc(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
return 0;
}
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
int error;
pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
error = -ENOMEM;
if (!pmd)
break;
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
break;
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return error;
}
/*
* Generic mapping function (not visible outside):
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void * addr;
struct vm_struct * area;
unsigned long offset, last_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
#if 0
/* TODO: Here we can put checks for driver-writer abuse... */
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return phys_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
if(!PageReserved(page))
return NULL;
}
#endif
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr) - phys_addr;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr);
return NULL;
}
return (void *) (offset + (char *)addr);
}
void iounmap(void *addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
Index: Makefile
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/cris/mm/Makefile,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- Makefile 25 Feb 2001 23:15:23 -0000 1.1.1.1
+++ Makefile 9 Apr 2002 17:03:16 -0000 1.2
@@ -8,6 +8,6 @@
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o
-obj-y := init.o fault.o tlb.o extable.o
+obj-y := init.o fault.o tlb.o extable.o ioremap.o
include $(TOPDIR)/Rules.make
Index: extable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/cris/mm/extable.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- extable.c 25 Feb 2001 23:15:23 -0000 1.1.1.1
+++ extable.c 9 Apr 2002 17:03:16 -0000 1.2
@@ -1,13 +1,22 @@
/*
* linux/arch/cris/mm/extable.c
+ *
+ * $Log$
+ * Revision 1.2 2002/04/09 17:03:16 atp
+ * synch 2.4.15 commit 29
+ *
+ * Revision 1.3 2001/09/27 13:52:40 bjornw
+ * Harmonize underscore-ness with other parts
+ *
+ *
*/
#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
-extern const struct exception_table_entry _start___ex_table[];
-extern const struct exception_table_entry _stop___ex_table[];
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
static inline unsigned long
search_one_table(const struct exception_table_entry *first,
@@ -37,8 +46,7 @@
#ifndef CONFIG_MODULES
/* There is only the kernel to search. */
- ret = search_one_table(_start___ex_table, _stop___ex_table-1, addr);
- if (ret) return ret;
+ return search_one_table(__start___ex_table, __stop___ex_table-1, addr);
#else
/* The kernel is the last "module" -- no need to treat it special. */
struct module *mp;
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/cris/mm/fault.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- fault.c 25 Feb 2001 23:15:23 -0000 1.1.1.1
+++ fault.c 9 Apr 2002 17:03:16 -0000 1.2
@@ -1,13 +1,48 @@
/*
* linux/arch/cris/mm/fault.c
*
- * Copyright (C) 2000 Axis Communications AB
+ * Copyright (C) 2000, 2001 Axis Communications AB
*
* Authors: Bjorn Wesen
*
* $Log$
- * Revision 1.1.1.1 2001/02/25 23:15:23 kenn
- * Import official 2.4.2 Linus tree
+ * Revision 1.2 2002/04/09 17:03:16 atp
+ * synch 2.4.15 commit 29
+ *
+ * Revision 1.18 2001/07/18 22:14:32 bjornw
+ * Enable interrupts in the bulk of do_page_fault
+ *
+ * Revision 1.17 2001/07/18 13:07:23 bjornw
+ * * Detect non-existant PTE's in vmalloc pmd synchronization
+ * * Remove comment about fast-paths for VMALLOC_START etc, because all that
+ * was totally bogus anyway it turned out :)
+ * * Fix detection of vmalloc-area synchronization
+ * * Add some comments
+ *
+ * Revision 1.16 2001/06/13 00:06:08 bjornw
+ * current_pgd should be volatile
+ *
+ * Revision 1.15 2001/06/13 00:02:23 bjornw
+ * Use a separate variable to store the current pgd to avoid races in schedule
+ *
+ * Revision 1.14 2001/05/16 17:41:07 hp
+ * Last comment tweak further tweaked.
+ *
+ * Revision 1.13 2001/05/15 00:58:44 hp
+ * Expand a bit on the comment why we compare address >= TASK_SIZE rather
+ * than >= VMALLOC_START.
+ *
+ * Revision 1.12 2001/04/04 10:51:14 bjornw
+ * mmap_sem is grabbed for reading
+ *
+ * Revision 1.11 2001/03/23 07:36:07 starvik
+ * Corrected according to review remarks
+ *
+ * Revision 1.10 2001/03/21 16:10:11 bjornw
+ * CRIS_FRAME_FIXUP not needed anymore, use FRAME_NORMAL
+ *
+ * Revision 1.9 2001/03/05 13:22:20 bjornw
+ * Spell-fix and fix in vmalloc_fault handling
*
* Revision 1.8 2000/11/22 14:45:31 bjornw
* * 2.4.0-test10 removed the set_pgdir instantaneous kernel global mapping
@@ -50,7 +85,13 @@
/* debug of higher-level faults */
#define DPG(x)
-/* fast TLB-fill fault handler */
+/* current active page directory */
+
+volatile pgd_t *current_pgd;
+
+/* fast TLB-fill fault handler
+ * this is called from entry.S with interrupts disabled
+ */
void
handle_mmu_bus_fault(struct pt_regs *regs)
@@ -59,10 +100,9 @@
int index;
int page_id;
int miss, we, acc, inv;
- struct mm_struct *mm = current->active_mm;
pmd_t *pmd;
pte_t pte;
- int errcode = 0;
+ int errcode;
unsigned long address;
cause = *R_MMU_CAUSE;
@@ -70,13 +110,27 @@
address = cause & PAGE_MASK; /* get faulting address */
- page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause);
+ D(page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause));
+ D(acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause));
+ D(inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause));
+ D(index = IO_EXTRACT(R_TLB_SELECT, index, select));
miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause);
we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause);
- acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause);
- inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause);
- index = IO_EXTRACT(R_TLB_SELECT, index, select);
+ /* Note: the reason we don't set errcode's r/w flag here
+ * using the 'we' flag, is because the latter is only given
+ * if there is a write-protection exception, not given as a
+ * general r/w access mode flag. It is currently not possible
+ * to get this from the MMU (TODO: check if this is the case
+ * for LXv2).
+ *
+ * The page-fault code won't care, but there will be two page-
+ * faults instead of one for the case of a write to a non-tabled
+ * page (miss, then write-protection).
+ */
+
+ errcode = 0;
+
D(printk("bus_fault from IRP 0x%x: addr 0x%x, miss %d, inv %d, we %d, acc %d, "
"idx %d pid %d\n",
regs->irp, address, miss, inv, we, acc, index, page_id));
@@ -85,9 +139,11 @@
if(miss) {
- /* see if the pte exists at all */
+ /* see if the pte exists at all
+ * refer through current_pgd, dont use mm->pgd
+ */
- pmd = (pmd_t *)pgd_offset(mm, address);
+ pmd = (pmd_t *)(current_pgd + pgd_index(address));
if(pmd_none(*pmd))
goto dofault;
if(pmd_bad(*pmd)) {
@@ -129,14 +185,14 @@
* the write to R_TLB_LO also writes the vpn and page_id fields from
* R_MMU_CAUSE, which we in this case obviously want to keep
*/
-
+
*R_TLB_LO = pte_val(pte);
return;
}
- errcode = 0x01 | (we << 1);
-
+ errcode = 1 | (we << 1);
+
dofault:
/* leave it to the MM system fault handler below */
D(printk("do_page_fault %p errcode %d\n", address, errcode));
@@ -149,7 +205,7 @@
* routines.
*
* Notice that the address we're given is aligned to the page the fault
- * occured in, since we only get the PFN in R_MMU_CAUSE not the complete
+ * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
* address.
*
* error_code:
@@ -186,23 +242,21 @@
* NOTE2: This is done so that, when updating the vmalloc
* mappings we don't have to walk all processes pgdirs and
* add the high mappings all at once. Instead we do it as they
- * are used.
+ * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
+ * bit set so sometimes the TLB can use a lingering entry.
*
- * TODO: On CRIS, we have a PTE Global bit which should be set in
- * all the PTE's related to vmalloc in all processes - that means if
- * we switch process and a vmalloc PTE is still in the TLB, it won't
- * need to be reloaded. It's an optimization.
- *
- * Linux/CRIS's kernel is not page-mapped, so the comparision below
- * should really be >= VMALLOC_START, however, kernel fixup errors
- * will be handled more quickly by going through vmalloc_fault and then
- * into bad_area_nosemaphore than falling through the find_vma user-mode
- * tests.
+ * This verifies that the fault happens in kernel space
+ * and that the fault was not a protection error (error_code & 1).
*/
- if (address >= TASK_SIZE)
+ if (address >= VMALLOC_START &&
+ !(error_code & 1) &&
+ !user_mode(regs))
goto vmalloc_fault;
+ /* we can and should enable interrupts at this point */
+ sti();
+
mm = tsk->mm;
writeaccess = error_code & 2;
info.si_code = SEGV_MAPERR;
@@ -215,7 +269,7 @@
if (in_interrupt() || !mm)
goto no_context;
- down(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -273,7 +327,7 @@
goto out_of_memory;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return;
/*
@@ -283,7 +337,7 @@
bad_area:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
bad_area_nosemaphore:
DPG(show_registers(regs));
@@ -311,16 +365,25 @@
*/
if ((fixup = search_exception_table(regs->irp)) != 0) {
+ /* Adjust the instruction pointer in the stackframe */
+
regs->irp = fixup;
- regs->frametype = CRIS_FRAME_FIXUP;
+
+ /* We do not want to return by restoring the CPU-state
+ * anymore, so switch frame-types (see ptrace.h)
+ */
+
+ regs->frametype = CRIS_FRAME_NORMAL;
+
D(printk("doing fixup to 0x%x\n", fixup));
return;
}
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
if ((unsigned long) (address) < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
else
@@ -337,14 +400,14 @@
*/
out_of_memory:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
printk("VM: killing process %s\n", tsk->comm);
if(user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -366,28 +429,52 @@
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
+ *
+ * Use current_pgd instead of tsk->active_mm->pgd
+ * since the latter might be unavailable if this
+ * code is executed in a misfortunately run irq
+ * (like inside schedule() between switch_mm and
+ * switch_to...).
*/
+
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
- pgd = tsk->active_mm->pgd + offset;
+ pgd = current_pgd + offset;
pgd_k = init_mm.pgd + offset;
- if (!pgd_present(*pgd)) {
- if (!pgd_present(*pgd_k))
- goto bad_area_nosemaphore;
- set_pgd(pgd, *pgd_k);
- return;
- }
+ /* Since we're two-level, we don't need to do both
+ * set_pgd and set_pmd (they do the same thing). If
+ * we go three-level at some point, do the right thing
+ * with pgd_present and set_pgd here.
+ *
+ * Also, since the vmalloc area is global, we don't
+ * need to copy individual PTE's, it is enough to
+ * copy the pgd pointer into the pte page of the
+ * root task. If that is there, we'll find our pte if
+ * it exists.
+ */
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
- if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+ if (!pmd_present(*pmd_k))
goto bad_area_nosemaphore;
+
set_pmd(pmd, *pmd_k);
+
+ /* Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+
+ pte_k = pte_offset(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
return;
}
-
}
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/cris/mm/init.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- init.c 25 Feb 2001 23:15:23 -0000 1.1.1.1
+++ init.c 9 Apr 2002 17:03:16 -0000 1.2
@@ -2,13 +2,58 @@
* linux/arch/cris/mm/init.c
*
* Copyright (C) 1995 Linus Torvalds
- * Copyright (C) 2000 Axis Communications AB
+ * Copyright (C) 2000,2001 Axis Communications AB
*
* Authors: Bjorn Wesen (bj...@ax...)
*
* $Log$
- * Revision 1.1.1.1 2001/02/25 23:15:23 kenn
- * Import official 2.4.2 Linus tree
+ * Revision 1.2 2002/04/09 17:03:16 atp
+ * synch 2.4.15 commit 29
+ *
+ * Revision 1.29 2001/07/25 16:09:50 bjornw
+ * val->sharedram will stay 0
+ *
+ * Revision 1.28 2001/06/28 16:30:17 bjornw
+ * Oops. This needs to wait until 2.4.6 is merged
+ *
+ * Revision 1.27 2001/06/28 14:04:07 bjornw
+ * Fill in sharedram
+ *
+ * Revision 1.26 2001/06/18 06:36:02 hp
+ * Enable free_initmem of __init-type pages
+ *
+ * Revision 1.25 2001/06/13 00:02:23 bjornw
+ * Use a separate variable to store the current pgd to avoid races in schedule
+ *
+ * Revision 1.24 2001/05/15 00:52:20 hp
+ * Only map segment 0xa as seg if CONFIG_JULIETTE
+ *
+ * Revision 1.23 2001/04/04 14:35:40 bjornw
+ * * Removed get_pte_slow and friends (2.4.3 change)
+ * * Removed bad_pmd handling (2.4.3 change)
+ *
+ * Revision 1.22 2001/04/04 13:38:04 matsfg
+ * Moved ioremap to a separate function instead
+ *
+ * Revision 1.21 2001/03/27 09:28:33 bjornw
+ * ioremap used too early - lets try it in mem_init instead
+ *
+ * Revision 1.20 2001/03/23 07:39:21 starvik
+ * Corrected according to review remarks
+ *
+ * Revision 1.19 2001/03/15 14:25:17 bjornw
+ * More general shadow registers and ioremaped addresses for external I/O
+ *
+ * Revision 1.18 2001/02/23 12:46:44 bjornw
+ * * 0xc was not CSE1; 0x8 is, same as uncached flash, so we move the uncached
+ * flash during CRIS_LOW_MAP from 0xe to 0x8 so both the flash and the I/O
+ * is mapped straight over (for !CRIS_LOW_MAP the uncached flash is still 0xe)
+ *
+ * Revision 1.17 2001/02/22 15:05:21 bjornw
+ * Map 0x9 straight over during LOW_MAP to allow for memory mapped LEDs
+ *
+ * Revision 1.16 2001/02/22 15:02:35 bjornw
+ * Map 0xc straight over during LOW_MAP to allow for memory mapped I/O
*
* Revision 1.15 2001/01/10 21:12:10 bjornw
* loops_per_sec -> loops_per_jiffy
@@ -60,6 +105,8 @@
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/svinto.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
static unsigned long totalram_pages;
@@ -71,128 +118,13 @@
extern void show_net_buffers(void);
extern void tlb_init(void);
-/*
- * empty_bad_page is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * the main point is that when a page table error occurs, we want to get
- * out of the kernel safely before killing the process, so we need something
- * to feed the MMU with when the fault occurs even if we don't have any
- * real PTE's or page tables.
- *
- * empty_bad_page_table is the accompanying page-table: it is initialized
- * to point to empty_bad_page writable-shared entries.
- *
- * empty_zero_page is a special page that is used for zero-initialized
- * data and COW.
- */
-unsigned long empty_bad_page_table;
-unsigned long empty_bad_page;
unsigned long empty_zero_page;
-pte_t * __bad_pagetable(void)
-{
- /* somehow it is enough to just clear it and not fill it with
- * bad page PTE's...
- */
- memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
-
- return (pte_t *) empty_bad_page_table;
-}
-
-pte_t __bad_page(void)
-{
-
- /* clear the empty_bad_page page. this should perhaps be
- * a more simple inlined loop like it is on the other
- * architectures.
- */
-
- memset((void *)empty_bad_page, 0, PAGE_SIZE);
-
- return pte_mkdirty(__mk_pte((void *)empty_bad_page, PAGE_SHARED));
-}
-
-static pte_t * get_bad_pte_table(void)
-{
- pte_t *empty_bad_pte_table = (pte_t *)empty_bad_page_table;
- pte_t v;
- int i;
-
- v = __bad_page();
-
- for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
- empty_bad_pte_table[i] = v;
-
- return empty_bad_pte_table;
-}
-
-void __handle_bad_pmd(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- pmd_set(pmd, get_bad_pte_table());
-}
-
-void __handle_bad_pmd_kernel(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- pmd_set_kernel(pmd, get_bad_pte_table());
-}
-
-pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (pte) {
- clear_page(pte);
- pmd_set_kernel(pmd, pte);
- return pte + offset;
- }
- pmd_set_kernel(pmd, get_bad_pte_table());
- return NULL;
- }
- free_page((unsigned long)pte);
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (pte) {
- clear_page(pte);
- pmd_set(pmd, pte);
- return pte + offset;
- }
- pmd_set(pmd, get_bad_pte_table());
- return NULL;
- }
- free_page((unsigned long)pte);
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-#ifndef CONFIG_NO_PGT_CACHE
-struct pgtable_cache_struct quicklists;
-
/* trim the page-table cache if necessary */
-int do_check_pgt_cache(int low, int high)
+int
+do_check_pgt_cache(int low, int high)
{
int freed = 0;
@@ -203,25 +135,20 @@
freed++;
}
if(pmd_quicklist) {
- free_pmd_slow(get_pmd_fast());
+ pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
freed++;
}
if(pte_quicklist) {
- free_pte_slow(get_pte_fast());
- freed++;
+ pte_free_slow(pte_alloc_one_fast(NULL, 0));
+ freed++;
}
} while(pgtable_cache_size > low);
}
return freed;
}
-#else
-int do_check_pgt_cache(int low, int high)
-{
- return 0;
-}
-#endif
-void show_mem(void)
+void
+show_mem(void)
{
int i,free = 0,total = 0,cached = 0, reserved = 0, nonshared = 0;
int shared = 0;
@@ -272,6 +199,13 @@
for(i = 0; i < PTRS_PER_PGD; i++)
swapper_pg_dir[i] = __pgd(0);
+
+ /* make sure the current pgd table points to something sane
+ * (even if it is most probably not used until the next
+ * switch_mm)
+ */
+
+ current_pgd = init_mm.pgd;
/* initialise the TLB (tlb.c) */
@@ -279,27 +213,70 @@
/* see README.mm for details on the KSEG setup */
-#ifdef CONFIG_CRIS_LOW_MAP
+#ifndef CONFIG_CRIS_LOW_MAP
+ /* This code is for the corrected Etrax-100 LX version 2... */
+
+ *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */
+ IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */
+ IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */
+ IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */
+ IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
+ IO_STATE(R_MMU_KSEG, seg_a, page ) | /* user area */
+ IO_STATE(R_MMU_KSEG, seg_9, page ) |
+ IO_STATE(R_MMU_KSEG, seg_8, page ) |
+ IO_STATE(R_MMU_KSEG, seg_7, page ) |
+ IO_STATE(R_MMU_KSEG, seg_6, page ) |
+ IO_STATE(R_MMU_KSEG, seg_5, page ) |
+ IO_STATE(R_MMU_KSEG, seg_4, page ) |
+ IO_STATE(R_MMU_KSEG, seg_3, page ) |
+ IO_STATE(R_MMU_KSEG, seg_2, page ) |
+ IO_STATE(R_MMU_KSEG, seg_1, page ) |
+ IO_STATE(R_MMU_KSEG, seg_0, page ) );
+ *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
+
+ *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
+#else
/* Etrax-100 LX version 1 has a bug so that we cannot map anything
* across the 0x80000000 boundary, so we need to shrink the user-virtual
* area to 0x50000000 instead of 0xb0000000 and map things slightly
* different. The unused areas are marked as paged so that we can catch
* freak kernel accesses there.
*
- * The Juliette chip is mapped at 0xa so we pass that segment straight
+ * The ARTPEC chip is mapped at 0xa so we pass that segment straight
* through. We cannot vremap it because the vmalloc area is below 0x8
* and Juliette needs an uncached area above 0x8.
+ *
+ * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards.
+ * We map them straight over in LOW_MAP, but use vremap in LX version 2.
*/
*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, page ) |
- IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */
+ IO_STATE(R_MMU_KSEG, seg_e, page ) |
IO_STATE(R_MMU_KSEG, seg_d, page ) |
- IO_STATE(R_MMU_KSEG, seg_c, page ) |
+ IO_STATE(R_MMU_KSEG, seg_c, page ) |
IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
- IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* Juliette etc. */
- IO_STATE(R_MMU_KSEG, seg_9, page ) |
- IO_STATE(R_MMU_KSEG, seg_8, page ) |
+#ifdef CONFIG_JULIETTE
+ IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* ARTPEC etc. */
+#else
+ IO_STATE(R_MMU_KSEG, seg_a, page ) |
+#endif
+ IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */
+ IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */
IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */
IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */
IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */
@@ -310,60 +287,27 @@
IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */
*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
+#ifdef CONFIG_JULIETTE
IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
- IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
-
- *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
- /* This code is for the hopefully corrected Etrax-100 LX version 2... */
-
- *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */
- IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */
- IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */
- IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */
- IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
- IO_STATE(R_MMU_KSEG, seg_a, page ) | /* user area */
- IO_STATE(R_MMU_KSEG, seg_9, page ) |
- IO_STATE(R_MMU_KSEG, seg_8, page ) |
- IO_STATE(R_MMU_KSEG, seg_7, page ) |
- IO_STATE(R_MMU_KSEG, seg_6, page ) |
- IO_STATE(R_MMU_KSEG, seg_5, page ) |
- IO_STATE(R_MMU_KSEG, seg_4, page ) |
- IO_STATE(R_MMU_KSEG, seg_3, page ) |
- IO_STATE(R_MMU_KSEG, seg_2, page ) |
- IO_STATE(R_MMU_KSEG, seg_1, page ) |
- IO_STATE(R_MMU_KSEG, seg_0, page ) );
-
- *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
+#endif
+ IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
+ IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
- IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
+ IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
-#endif
+#endif
*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
@@ -382,8 +326,6 @@
* to a couple of allocated pages
*/
- empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
@@ -401,6 +343,7 @@
*/
free_area_init_node(0, 0, 0, zones_size, PAGE_OFFSET, 0);
+
}
extern unsigned long loops_per_jiffy; /* init/main.c */
@@ -451,7 +394,7 @@
datasize >> 10,
initsize >> 10
);
-
+
/* HACK alert - calculate a loops_per_usec for asm/delay.h here
* since this is called just after calibrate_delay in init/main.c
* but before places which use udelay. cannot be in time.c since
@@ -463,15 +406,53 @@
return;
}
+/* Initialize remaps of some I/O-ports. This is designed to be callable
+ * multiple times from the drivers init-sections, because we don't know
+ * beforehand which driver will get initialized first.
+ */
+
+void
+init_ioremap(void)
+{
+
+ /* Give the external I/O-port addresses their values */
+
+ static int initialized = 0;
+
+ if( !initialized ) {
+ initialized++;
+
+#ifdef CONFIG_CRIS_LOW_MAP
+ /* Simply a linear map (see the KSEG map above in paging_init) */
+ port_cse1_addr = (volatile unsigned long *)(MEM_CSE1_START |
+ MEM_NON_CACHEABLE);
+ port_csp0_addr = (volatile unsigned long *)(MEM_CSP0_START |
+ MEM_NON_CACHEABLE);
+ port_csp4_addr = (volatile unsigned long *)(MEM_CSP4_START |
+ MEM_NON_CACHEABLE);
+#else
+ /* Note that nothing blows up just because we do this remapping
+ * it's ok even if the ports are not used or connected
+ * to anything (or connected to a non-I/O thing) */
+ port_cse1_addr = (volatile unsigned long *)
+ ioremap((unsigned long)(MEM_CSE1_START |
+ MEM_NON_CACHEABLE), 16);
+ port_csp0_addr = (volatile unsigned long *)
+ ioremap((unsigned long)(MEM_CSP0_START |
+ MEM_NON_CACHEABLE), 16);
+ port_csp4_addr = (volatile unsigned long *)
+ ioremap((unsigned long)(MEM_CSP4_START |
+ MEM_NON_CACHEABLE), 16);
+#endif
+ }
+}
+
+
/* free the pages occupied by initialization code */
-void free_initmem(void)
+void
+free_initmem(void)
{
-#if 0
- /* currently this is a bad idea since the cramfs image is catted onto
- * the vmlinux image, and the end of that image is not page-padded so
- * part of the cramfs image will be freed here
- */
unsigned long addr;
addr = (unsigned long)(&__init_begin);
@@ -483,10 +464,10 @@
}
printk ("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
-#endif
}
-void si_meminfo(struct sysinfo *val)
+void
+si_meminfo(struct sysinfo *val)
{
int i;
Index: tlb.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/cris/mm/tlb.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- tlb.c 25 Feb 2001 23:15:23 -0000 1.1.1.1
+++ tlb.c 9 Apr 2002 17:03:16 -0000 1.2
@@ -1,7 +1,7 @@
/*
* linux/arch/cris/mm/tlb.c
*
- * Copyright (C) 2000 Axis Communications AB
+ * Copyright (C) 2000, 2001 Axis Communications AB
*
* Authors: Bjorn Wesen (bj...@ax...)
*
@@ -21,6 +21,7 @@
#include <asm/segment.h>
#include <asm/pgtable.h>
#include <asm/svinto.h>
+#include <asm/mmu_context.h>
#define D(x)
@@ -39,6 +40,10 @@
*
* The last page_id is never running - it is used as an invalid page_id
* so we can make TLB entries that will never match.
+ *
+ * Notice that we need to make the flushes atomic, otherwise an interrupt
+ * handler that uses vmalloced memory might cause a TLB load in the middle
+ * of a flush causing.
*/
struct mm_struct *page_id_map[NUM_PAGEID];
@@ -48,17 +53,18 @@
/* invalidate all TLB entries */
void
-flush_tlb_all()
+flush_tlb_all(void)
{
int i;
+ unsigned long flags;
/* the vpn of i & 0xf is so we dont write similar TLB entries
* in the same 4-way entry group. details..
*/
+ save_and_cli(flags); /* flush needs to be atomic */
for(i = 0; i < NUM_TLB_ENTRIES; i++) {
*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
-
*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
@@ -68,6 +74,7 @@
IO_STATE(R_TLB_LO, we, no ) |
IO_FIELD(R_TLB_LO, pfn, 0 ) );
}
+ restore_flags(flags);
D(printk("tlb: flushed all\n"));
}
@@ -78,6 +85,7 @@
{
int i;
int page_id = mm->context;
+ unsigned long flags;
D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
@@ -89,6 +97,7 @@
* global pages. is it worth the extra I/O ?
*/
+ save_and_cli(flags); /* flush needs to be atomic */
for(i = 0; i < NUM_TLB_ENTRIES; i++) {
*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
@@ -102,6 +111,7 @@
IO_FIELD(R_TLB_LO, pfn, 0 ) );
}
}
+ restore_flags(flags);
}
/* invalidate a single page */
@@ -113,6 +123,7 @@
struct mm_struct *mm = vma->vm_mm;
int page_id = mm->context;
int i;
+ unsigned long flags;
D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
@@ -125,6 +136,7 @@
* and the virtual address requested
*/
+ save_and_cli(flags); /* flush needs to be atomic */
for(i = 0; i < NUM_TLB_ENTRIES; i++) {
unsigned long tlb_hi;
*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
@@ -141,6 +153,7 @@
IO_FIELD(R_TLB_LO, pfn, 0 ) );
}
}
+ restore_flags(flags);
}
/* invalidate a page range */
@@ -152,6 +165,7 @@
{
int page_id = mm->context;
int i;
+ unsigned long flags;
D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
start, end, page_id, mm));
@@ -166,6 +180,7 @@
* and the virtual address range
*/
+ save_and_cli(flags); /* flush needs to be atomic */
for(i = 0; i < NUM_TLB_ENTRIES; i++) {
unsigned long tlb_hi, vpn;
*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
@@ -183,8 +198,30 @@
IO_FIELD(R_TLB_LO, pfn, 0 ) );
}
}
+ restore_flags(flags);
}
+/* dump the entire TLB for debug purposes */
+
+#if 0
+void
+dump_tlb_all(void)
+{
+ int i;
+ unsigned long flags;
+
+ printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
+
+ save_and_cli(flags);
+ for(i = 0; i < NUM_TLB_ENTRIES; i++) {
+ *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
+ printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
+ i, *R_TLB_HI, *R_TLB_LO);
+ }
+ restore_flags(flags);
+}
+#endif
+
/*
* Initialize the context related info for a new mm_struct
* instance.
@@ -227,8 +264,7 @@
map_replace_ptr++;
if(map_replace_ptr == INVALID_PAGEID)
- map_replace_ptr = 0; /* wrap around */
-
+ map_replace_ptr = 0; /* wrap around */
}
/*
@@ -252,6 +288,15 @@
get_mmu_context(next);
+ /* remember the pgd for the fault handlers
+ * this is similar to the pgd register in some other CPU's.
+ * we need our own copy of it because current and active_mm
+ * might be invalid at points where we still need to derefer
+ * the pgd.
+ */
+
+ current_pgd = next->pgd;
+
/* switch context in the MMU */
D(printk("switching mmu_context to %d (%p)\n", next->context, next));
@@ -288,7 +333,7 @@
/* clear the page_id map */
- for(i = 0; i < 64; i++)
+ for (i = 1; i < sizeof (page_id_map) / sizeof (page_id_map[0]); i++)
page_id_map[i] = NULL;
/* invalidate the entire TLB */
|