|
From: Andy P. <at...@us...> - 2002-04-09 17:07:28
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/sh/mm
In directory usw-pr-cvs1:/tmp/cvs-serv15874/sh/mm
Modified Files:
Makefile extable.c fault.c init.c ioremap.c
Added Files:
__clear_user_page-sh4.S __copy_user_page-sh4.S cache-sh3.c
cache-sh4.c clear_page.S copy_page.S
Removed Files:
cache.c
Log Message:
synch 2.4.15 commit 30
--- NEW FILE ---
/* $Id: __clear_user_page-sh4.S,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* __clear_user_page implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
*
*/
/*
* __clear_user_page
* @to: P1 address (with same color)
* @orig_to: P1 address
*
* void __clear_user_page(void *to, void *orig_to)
*/
/*
* r0 --- scratch
* r4 --- to
* r5 --- orig_to
* r6 --- to + 4096
*/
#include <linux/linkage.h>
ENTRY(__clear_user_page)
mov r4,r6
mov.w .L4096,r0
add r0,r6
mov #0,r0
!
1: ocbi @r5
add #32,r5
movca.l r0,@r4
mov r4,r1
add #32,r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
add #28,r4
cmp/eq r6,r4
bf/s 1b
ocbwb @r1
!
rts
nop
.L4096: .word 4096
--- NEW FILE ---
/* $Id: __copy_user_page-sh4.S,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* __copy_user_page implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
*
*/
/*
* __copy_user_page
* @to: P1 address (with same color)
* @from: P1 address
* @orig_to: P1 address
*
* void __copy_user_page(void *to, void *from, void *orig_to)
*/
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
* r8 --- from + 4096
* r9 --- orig_to
* r10 --- to
* r11 --- from
*/
#include <linux/linkage.h>
ENTRY(__copy_user_page)
mov.l r8,@-r15
mov.l r9,@-r15
mov.l r10,@-r15
mov.l r11,@-r15
mov r4,r10
mov r5,r11
mov r6,r9
mov r5,r8
mov.w .L4096,r0
add r0,r8
!
1: ocbi @r9
add #32,r9
mov.l @r11+,r0
mov.l @r11+,r1
mov.l @r11+,r2
mov.l @r11+,r3
mov.l @r11+,r4
mov.l @r11+,r5
mov.l @r11+,r6
mov.l @r11+,r7
movca.l r0,@r10
mov r10,r0
add #32,r10
mov.l r7,@-r10
mov.l r6,@-r10
mov.l r5,@-r10
mov.l r4,@-r10
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
ocbwb @r0
cmp/eq r11,r8
bf/s 1b
add #28,r10
!
mov.l @r15+,r11
mov.l @r15+,r10
mov.l @r15+,r9
mov.l @r15+,r8
rts
nop
.L4096: .word 4096
--- NEW FILE ---
/* $Id: cache-sh3.c,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* linux/arch/sh/mm/cache-sh3.c
*
* Copyright (C) 1999, 2000 Niibe Yutaka
*
*/
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#define CCR 0xffffffec /* Address of Cache Control Register */
#define CCR_CACHE_CE 0x01 /* Cache Enable */
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
#define CCR_CACHE_CB 0x04 /* Write-Back (for P1) (else writethrough) */
#define CCR_CACHE_CF 0x08 /* Cache Flush */
#define CCR_CACHE_RA 0x20 /* RAM mode */
#define CCR_CACHE_VAL (CCR_CACHE_CB|CCR_CACHE_CE) /* 8k-byte cache, P1-wb, enable */
#define CCR_CACHE_INIT (CCR_CACHE_CF|CCR_CACHE_VAL) /* 8k-byte cache, CF, P1-wb, enable */
#define CACHE_OC_ADDRESS_ARRAY 0xf0000000
#define CACHE_VALID 1
#define CACHE_UPDATED 2
#define CACHE_PHYSADDR_MASK 0x1ffffc00
/* 7709A/7729 has 16K cache (256-entry), while 7702 has only 2K(direct)
7702 is not supported (yet) */
struct _cache_system_info {
int way_shift;
int entry_mask;
int num_entries;
};
/* Data at BSS is cleared after setting this variable.
So, we Should not placed this variable at BSS section.
Initialize this, it is placed at data section. */
static struct _cache_system_info cache_system_info = {0,};
#define CACHE_OC_WAY_SHIFT (cache_system_info.way_shift)
#define CACHE_OC_ENTRY_SHIFT 4
#define CACHE_OC_ENTRY_MASK (cache_system_info.entry_mask)
#define CACHE_OC_NUM_ENTRIES (cache_system_info.num_entries)
#define CACHE_OC_NUM_WAYS 4
#define CACHE_OC_ASSOC_BIT (1<<3)
/*
* Write back all the cache.
*
* For SH-4, we only need to flush (write back) Operand Cache,
* as Instruction Cache doesn't have "updated" data.
*
* Assumes that this is called in interrupt disabled context, and P2.
* Shuld be INLINE function.
*/
static inline void cache_wback_all(void)
{
unsigned long addr, data, i, j;
for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
for (j=0; j<CACHE_OC_NUM_WAYS; j++) {
addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)|
(i<<CACHE_OC_ENTRY_SHIFT);
data = ctrl_inl(addr);
if ((data & (CACHE_UPDATED|CACHE_VALID))
== (CACHE_UPDATED|CACHE_VALID))
ctrl_outl(data & ~CACHE_UPDATED, addr);
}
}
}
static void __init
detect_cpu_and_cache_system(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
jump_to_P2();
/*
* Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
* Otherwise, it's 256-entry system.
*/
addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
/* First, write back & invalidate */
data0 = ctrl_inl(addr0);
ctrl_outl(data0&~(CACHE_VALID|CACHE_UPDATED), addr0);
data1 = ctrl_inl(addr1);
ctrl_outl(data1&~(CACHE_VALID|CACHE_UPDATED), addr1);
/* Next, check if there's shadow or not */
data0 = ctrl_inl(addr0);
data0 ^= CACHE_VALID;
ctrl_outl(data0, addr0);
data1 = ctrl_inl(addr1);
data2 = data1 ^ CACHE_VALID;
ctrl_outl(data2, addr1);
data3 = ctrl_inl(addr0);
/* Lastly, invaliate them. */
ctrl_outl(data0&~CACHE_VALID, addr0);
ctrl_outl(data2&~CACHE_VALID, addr1);
back_to_P1();
if (data0 == data1 && data2 == data3) { /* Shadow */
cache_system_info.way_shift = 11;
cache_system_info.entry_mask = 0x7f0;
cache_system_info.num_entries = 128;
cpu_data->type = CPU_SH7708;
} else { /* 7709A or 7729 */
cache_system_info.way_shift = 12;
cache_system_info.entry_mask = 0xff0;
cache_system_info.num_entries = 256;
cpu_data->type = CPU_SH7729;
}
}
void __init cache_init(void)
{
unsigned long ccr;
detect_cpu_and_cache_system();
jump_to_P2();
ccr = ctrl_inl(CCR);
if (ccr & CCR_CACHE_CE)
/*
* XXX: Should check RA here.
* If RA was 1, we only need to flush the half of the caches.
*/
cache_wback_all();
ctrl_outl(CCR_CACHE_INIT, CCR);
back_to_P1();
}
/*
* Write back the dirty D-caches, but not invalidate them.
*
* Is this really worth it, or should we just alias this routine
* to __flush_purge_region too?
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
void __flush_wback_region(void *start, int size)
{
unsigned long v, j;
unsigned long begin, end;
unsigned long flags;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
for (j=0; j<CACHE_OC_NUM_WAYS; j++) {
unsigned long data, addr, p;
p = __pa(v);
addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)|
(v&CACHE_OC_ENTRY_MASK);
save_and_cli(flags);
data = ctrl_inl(addr);
if ((data & CACHE_PHYSADDR_MASK) ==
(p & CACHE_PHYSADDR_MASK)) {
data &= ~CACHE_UPDATED;
ctrl_outl(data, addr);
restore_flags(flags);
break;
}
restore_flags(flags);
}
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
void __flush_purge_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
unsigned long data, addr;
data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
addr = CACHE_OC_ADDRESS_ARRAY | (v&CACHE_OC_ENTRY_MASK) |
CACHE_OC_ASSOC_BIT;
ctrl_outl(data, addr);
}
}
/*
* No write back please
*
* Except I don't think there's any way to avoid the writeback. So we
* just alias it to __flush_purge_region(). dwmw2.
*/
void __flush_invalidate_region(void *start, int size)
__attribute__((alias("__flush_purge_region")));
--- NEW FILE ---
/* $Id: cache-sh4.c,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* linux/arch/sh/mm/cache.c
*
* Copyright (C) 1999, 2000 Niibe Yutaka
*
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#define CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
#define CCR_CACHE_OCI 0x0008 /* OC Invalidate */
#define CCR_CACHE_ORA 0x0020 /* OC RAM Mode */
#define CCR_CACHE_OIX 0x0080 /* OC Index Enable */
#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */
#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */
#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */
/* Default CCR setup: 8k+16k-byte cache,P1-wb,enable */
#define CCR_CACHE_VAL (CCR_CACHE_ICE|CCR_CACHE_CB|CCR_CACHE_OCE)
#define CCR_CACHE_INIT (CCR_CACHE_VAL|CCR_CACHE_OCI|CCR_CACHE_ICI)
#define CCR_CACHE_ENABLE (CCR_CACHE_OCE|CCR_CACHE_ICE)
#define CACHE_IC_ADDRESS_ARRAY 0xf0000000
#define CACHE_OC_ADDRESS_ARRAY 0xf4000000
#define CACHE_VALID 1
#define CACHE_UPDATED 2
#define CACHE_OC_WAY_SHIFT 13
#define CACHE_IC_WAY_SHIFT 13
#define CACHE_OC_ENTRY_SHIFT 5
#define CACHE_IC_ENTRY_SHIFT 5
#define CACHE_OC_ENTRY_MASK 0x3fe0
#define CACHE_OC_ENTRY_PHYS_MASK 0x0fe0
#define CACHE_IC_ENTRY_MASK 0x1fe0
#define CACHE_IC_NUM_ENTRIES 256
#define CACHE_OC_NUM_ENTRIES 512
static void __init
detect_cpu_and_cache_system(void)
{
#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
cpu_data->type = CPU_ST40STB1;
#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751)
cpu_data->type = CPU_SH7750;
#else
#error Unknown SH4 CPU type
#endif
}
void __init cache_init(void)
{
unsigned long ccr;
detect_cpu_and_cache_system();
jump_to_P2();
ccr = ctrl_inl(CCR);
if (ccr & CCR_CACHE_ENABLE) {
/*
* XXX: Should check RA here.
* If RA was 1, we only need to flush the half of the caches.
*/
unsigned long addr, data;
for (addr = CACHE_OC_ADDRESS_ARRAY;
addr < (CACHE_OC_ADDRESS_ARRAY+
(CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT));
addr += (1 << CACHE_OC_ENTRY_SHIFT)) {
data = ctrl_inl(addr);
if ((data & (CACHE_UPDATED|CACHE_VALID))
== (CACHE_UPDATED|CACHE_VALID))
ctrl_outl(data & ~CACHE_UPDATED, addr);
}
}
ctrl_outl(CCR_CACHE_INIT, CCR);
back_to_P1();
}
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
static struct semaphore p3map_sem[4];
void __init p3_cache_init(void)
{
/* In ioremap.c */
extern int remap_area_pages(unsigned long address,
unsigned long phys_addr,
unsigned long size, unsigned long flags);
if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
panic("p3_cachie_init failed.");
sema_init (&p3map_sem[0], 1);
sema_init (&p3map_sem[1], 1);
sema_init (&p3map_sem[2], 1);
sema_init (&p3map_sem[3], 1);
}
/*
* Write back the dirty D-caches, but not invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
void __flush_wback_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
asm volatile("ocbwb %0"
: /* no output */
: "m" (__m(v)));
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
void __flush_purge_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
asm volatile("ocbp %0"
: /* no output */
: "m" (__m(v)));
}
}
/*
* No write back please
*/
void __flush_invalidate_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
asm volatile("ocbi %0"
: /* no output */
: "m" (__m(v)));
}
}
/*
* Write back the range of D-cache, and purge the I-cache.
*
* Called from kernel/module.c:sys_init_module and routine for a.out format.
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
flush_cache_all();
}
/*
* Write back the D-cache and purge the I-cache for signal trampoline.
*/
void flush_cache_sigtramp(unsigned long addr)
{
unsigned long v, index;
unsigned long flags;
v = addr & ~(L1_CACHE_BYTES-1);
asm volatile("ocbwb %0"
: /* no output */
: "m" (__m(v)));
index = CACHE_IC_ADDRESS_ARRAY| (v&CACHE_IC_ENTRY_MASK);
save_and_cli(flags);
jump_to_P2();
ctrl_outl(0, index); /* Clear out Valid-bit */
back_to_P1();
restore_flags(flags);
}
/*
* Writeback&Invalidate the D-cache of the page
*/
static void __flush_dcache_page(unsigned long phys)
{
unsigned long addr, data;
unsigned long flags;
phys |= CACHE_VALID;
save_and_cli(flags);
jump_to_P2();
/* Loop all the D-cache */
for (addr = CACHE_OC_ADDRESS_ARRAY;
addr < (CACHE_OC_ADDRESS_ARRAY
+(CACHE_OC_NUM_ENTRIES<< CACHE_OC_ENTRY_SHIFT));
addr += (1<<CACHE_OC_ENTRY_SHIFT)) {
data = ctrl_inl(addr)&(0x1ffff000|CACHE_VALID);
if (data == phys)
ctrl_outl(0, addr);
}
#if 0 /* DEBUG DEBUG */
/* Loop all the I-cache */
for (addr = CACHE_IC_ADDRESS_ARRAY;
addr < (CACHE_IC_ADDRESS_ARRAY
+(CACHE_IC_NUM_ENTRIES<< CACHE_IC_ENTRY_SHIFT));
addr += (1<<CACHE_IC_ENTRY_SHIFT)) {
data = ctrl_inl(addr)&(0x1ffff000|CACHE_VALID);
if (data == phys) {
printk(KERN_INFO "__flush_cache_page: I-cache entry found\n");
ctrl_outl(0, addr);
}
}
#endif
back_to_P1();
restore_flags(flags);
}
/*
* Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues)
*/
void flush_dcache_page(struct page *page)
{
if (test_bit(PG_mapped, &page->flags))
__flush_dcache_page(PHYSADDR(page_address(page)));
}
void flush_cache_all(void)
{
extern unsigned long empty_zero_page[1024];
unsigned long flags;
unsigned long addr;
save_and_cli(flags);
/* Prefetch the data to write back D-cache */
for (addr = (unsigned long)empty_zero_page;
addr < (unsigned long)empty_zero_page + 1024*16;
addr += L1_CACHE_BYTES)
asm volatile("pref @%0"::"r" (addr));
jump_to_P2();
/* Flush D-cache/I-cache */
ctrl_outl(CCR_CACHE_INIT, CCR);
back_to_P1();
restore_flags(flags);
}
void flush_cache_mm(struct mm_struct *mm)
{
/* Is there any good way? */
/* XXX: possibly call flush_cache_range for each vm area */
flush_cache_all();
}
/*
* Write back and invalidate D-caches.
*
* START, END: Virtual Address (U0 address)
*
* NOTE: We need to flush the _physical_ page entry.
* Flushing the cache lines for U0 only isn't enough.
* We need to flush for P1 too, which may contain aliases.
*/
void flush_cache_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
/*
* We could call flush_cache_page for the pages of these range,
* but it's not efficient (scan the caches all the time...).
*
* We can't use A-bit magic, as there's the case we don't have
* valid entry on TLB.
*/
flush_cache_all();
}
/*
* Write back and invalidate I/D-caches for the page.
*
* ADDR: Virtual Address (U0 address)
*/
void flush_cache_page(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *dir;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
unsigned long phys, addr, data;
unsigned long flags;
dir = pgd_offset(vma->vm_mm, address);
pmd = pmd_offset(dir, address);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
pte = pte_offset(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return;
phys = pte_val(entry)&PTE_PHYS_MASK;
phys |= CACHE_VALID;
save_and_cli(flags);
jump_to_P2();
/* We only need to flush D-cache when we have alias */
if ((address^phys) & CACHE_ALIAS) {
/* Loop 4K of the D-cache */
for (addr = CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS);
addr < (CACHE_OC_ADDRESS_ARRAY + (address & CACHE_ALIAS)
+(CACHE_OC_NUM_ENTRIES/4<<CACHE_OC_ENTRY_SHIFT));
addr += (1<<CACHE_OC_ENTRY_SHIFT)) {
data = ctrl_inl(addr)&(0x1ffff000|CACHE_VALID);
if (data == phys)
ctrl_outl(0, addr);
}
/* Loop another 4K of the D-cache */
for (addr = CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS);
addr < (CACHE_OC_ADDRESS_ARRAY + (phys & CACHE_ALIAS)
+(CACHE_OC_NUM_ENTRIES/4<<CACHE_OC_ENTRY_SHIFT));
addr += (1<<CACHE_OC_ENTRY_SHIFT)) {
data = ctrl_inl(addr)&(0x1ffff000|CACHE_VALID);
if (data == phys)
ctrl_outl(0, addr);
}
}
if (vma->vm_flags & VM_EXEC)
/* Loop 4K of the I-cache */
for (addr = CACHE_IC_ADDRESS_ARRAY|(address&0x1000);
addr < ((CACHE_IC_ADDRESS_ARRAY|(address&0x1000))
+(CACHE_IC_NUM_ENTRIES/2<<CACHE_IC_ENTRY_SHIFT));
addr += (1<<CACHE_IC_ENTRY_SHIFT)) {
data = ctrl_inl(addr)&(0x1ffff000|CACHE_VALID);
if (data == phys)
ctrl_outl(0, addr);
}
back_to_P1();
restore_flags(flags);
}
/*
* clear_user_page
* @to: P1 address
* @address: U0 address to be mapped
*/
void clear_user_page(void *to, unsigned long address)
{
struct page *page = virt_to_page(to);
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to);
else {
pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
_PAGE_RW | _PAGE_CACHABLE |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *dir = pgd_offset_k(p3_addr);
pmd_t *pmd = pmd_offset(dir, p3_addr);
pte_t *pte = pte_offset(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
__flush_tlb_page(get_asid(), p3_addr);
restore_flags(flags);
update_mmu_cache(NULL, p3_addr, entry);
__clear_user_page((void *)p3_addr, to);
pte_clear(pte);
up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
}
}
/*
* copy_user_page
* @to: P1 address
* @from: P1 address
* @address: U0 address to be mapped
*/
void copy_user_page(void *to, void *from, unsigned long address)
{
struct page *page = virt_to_page(to);
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
_PAGE_RW | _PAGE_CACHABLE |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *dir = pgd_offset_k(p3_addr);
pmd_t *pmd = pmd_offset(dir, p3_addr);
pte_t *pte = pte_offset(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
__flush_tlb_page(get_asid(), p3_addr);
restore_flags(flags);
update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to);
pte_clear(pte);
up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
}
}
--- NEW FILE ---
/* $Id: clear_page.S,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* clear_page implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
*
*/
/*
* clear_page
* @to: P1 address
*
* void clear_page(void *to)
*/
/*
* r0 --- scratch
* r4 --- to
* r5 --- to + 4096
*/
#include <linux/linkage.h>
ENTRY(clear_page)
mov r4,r5
mov.w .Llimit,r0
add r0,r5
mov #0,r0
!
1:
#if defined(__sh3__)
mov.l r0,@r4
#elif defined(__SH4__)
movca.l r0,@r4
mov r4,r1
#endif
add #32,r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
mov.l r0,@-r4
#if defined(__SH4__)
ocbwb @r1
#endif
cmp/eq r5,r4
bf/s 1b
add #28,r4
!
rts
nop
.Llimit: .word (4096-28)
--- NEW FILE ---
/* $Id: copy_page.S,v 1.1 2002/04/09 17:07:20 atp Exp $
*
* copy_page implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
*
*/
/*
* copy_page
* @to: P1 address
* @from: P1 address
*
* void copy_page(void *to, void *from)
*/
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
* r8 --- from + 4096
* r9 --- not used
* r10 --- to
* r11 --- from
*/
#include <linux/linkage.h>
ENTRY(copy_page)
mov.l r8,@-r15
mov.l r10,@-r15
mov.l r11,@-r15
mov r4,r10
mov r5,r11
mov r5,r8
mov.w .L4096,r0
add r0,r8
!
1: mov.l @r11+,r0
mov.l @r11+,r1
mov.l @r11+,r2
mov.l @r11+,r3
mov.l @r11+,r4
mov.l @r11+,r5
mov.l @r11+,r6
mov.l @r11+,r7
#if defined(__sh3__)
mov.l r0,@r10
#elif defined(__SH4__)
movca.l r0,@r10
mov r10,r0
#endif
add #32,r10
mov.l r7,@-r10
mov.l r6,@-r10
mov.l r5,@-r10
mov.l r4,@-r10
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
#if defined(__SH4__)
ocbwb @r0
#endif
cmp/eq r11,r8
bf/s 1b
add #28,r10
!
mov.l @r15+,r11
mov.l @r15+,r10
mov.l @r15+,r8
rts
nop
.L4096: .word 4096
Index: Makefile
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/sh/mm/Makefile,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- Makefile 14 Jan 2001 19:48:50 -0000 1.1.1.1
+++ Makefile 9 Apr 2002 17:07:20 -0000 1.2
@@ -8,6 +8,11 @@
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o
-obj-y := init.o fault.o extable.o cache.o # ioremap.o
+obj-y := init.o fault.o extable.o clear_page.o copy_page.o
+
+obj-$(CONFIG_CPU_SH3) += cache-sh3.o
+obj-$(CONFIG_CPU_SH4) += cache-sh4.o __clear_user_page-sh4.o __copy_user_page-sh4.o ioremap.o
+
+USE_STANDARD_AS_RULE := true
include $(TOPDIR)/Rules.make
Index: extable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/sh/mm/extable.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- extable.c 14 Jan 2001 19:48:50 -0000 1.1.1.1
+++ extable.c 9 Apr 2002 17:07:20 -0000 1.2
@@ -46,7 +46,7 @@
/* The kernel is the last "module" -- no need to treat it special. */
struct module *mp;
for (mp = module_list; mp != NULL; mp = mp->next) {
- if (mp->ex_table_start == NULL)
+ if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
continue;
ret = search_one_table(mp->ex_table_start,
mp->ex_table_end - 1, addr);
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/sh/mm/fault.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- fault.c 14 Jan 2001 19:48:52 -0000 1.1.1.1
+++ fault.c 9 Apr 2002 17:07:20 -0000 1.2
@@ -28,10 +28,6 @@
#include <asm/mmu_context.h>
extern void die(const char *,struct pt_regs *,long);
-static void __flush_tlb_page(unsigned long asid, unsigned long page);
-#if defined(__SH4__)
-static void __flush_tlb_phys(unsigned long phys);
-#endif
/*
* Ugly, ugly, but the goto's result in better assembly..
@@ -109,7 +105,7 @@
if (in_interrupt() || !mm)
goto no_context;
- down(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -138,6 +134,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
+survive:
switch (handle_mm_fault(mm, vma, address, writeaccess)) {
case 1:
tsk->min_flt++;
@@ -151,7 +148,7 @@
goto out_of_memory;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return;
/*
@@ -159,7 +156,7 @@
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
if (user_mode(regs)) {
tsk->thread.address = address;
@@ -208,14 +205,20 @@
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ if (current->pid == 1) {
+ current->policy |= SCHED_YIELD;
+ schedule();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -242,8 +245,10 @@
pte_t *pte;
pte_t entry;
- if (address >= VMALLOC_START && address < VMALLOC_END)
+ if (address >= P3SEG && address < P4SEG)
dir = pgd_offset_k(address);
+ else if (address >= TASK_SIZE)
+ return 1;
else
dir = pgd_offset(current->mm, address);
@@ -257,7 +262,7 @@
}
pte = pte_offset(pmd, address);
entry = *pte;
- if (pte_none(entry) || !pte_present(entry)
+ if (pte_none(entry) || pte_not_present(entry)
|| (writeaccess && !pte_write(entry)))
return 1;
@@ -281,37 +286,35 @@
{
unsigned long flags;
unsigned long pteval;
- unsigned long pteaddr;
+ unsigned long vpn;
+#if defined(__SH4__)
+ struct page *page;
unsigned long ptea;
+#endif
- save_and_cli(flags);
+ /* Ptrace may call this routine. */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
#if defined(__SH4__)
- if (pte_shared(pte)) {
- struct page *pg;
-
- pteval = pte_val(pte);
- pteval &= PAGE_MASK; /* Physicall page address */
- __flush_tlb_phys(pteval);
- pg = virt_to_page(__va(pteval));
- flush_dcache_page(pg);
+ page = pte_page(pte);
+ if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
}
#endif
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm) {
- restore_flags(flags);
- return;
- }
+ save_and_cli(flags);
/* Set PTEH register */
- pteaddr = (address & MMU_VPN_MASK) | get_asid();
- ctrl_outl(pteaddr, MMU_PTEH);
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ ctrl_outl(vpn, MMU_PTEH);
- /* Set PTEA register */
- /* TODO: make this look less hacky */
pteval = pte_val(pte);
#if defined(__SH4__)
+ /* Set PTEA register */
+ /* TODO: make this look less hacky */
ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1);
ctrl_outl(ptea, MMU_PTEA);
#endif
@@ -326,7 +329,7 @@
restore_flags(flags);
}
-static void __flush_tlb_page(unsigned long asid, unsigned long page)
+void __flush_tlb_page(unsigned long asid, unsigned long page)
{
unsigned long addr, data;
@@ -341,40 +344,13 @@
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
ctrl_outl(data, addr);
#elif defined(__SH4__)
- jump_to_P2();
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
data = page | asid; /* VALID bit is off */
+ jump_to_P2();
ctrl_outl(data, addr);
back_to_P1();
#endif
}
-
-#if defined(__SH4__)
-static void __flush_tlb_phys(unsigned long phys)
-{
- int i;
- unsigned long addr, data;
-
- jump_to_P2();
- for (i = 0; i < MMU_UTLB_ENTRIES; i++) {
- addr = MMU_UTLB_DATA_ARRAY | (i<<MMU_U_ENTRY_SHIFT);
- data = ctrl_inl(addr);
- if ((data & MMU_UTLB_VALID) && (data&PAGE_MASK) == phys) {
- data &= ~MMU_UTLB_VALID;
- ctrl_outl(data, addr);
- }
- }
- for (i = 0; i < MMU_ITLB_ENTRIES; i++) {
- addr = MMU_ITLB_DATA_ARRAY | (i<<MMU_I_ENTRY_SHIFT);
- data = ctrl_inl(addr);
- if ((data & MMU_ITLB_VALID) && (data&PAGE_MASK) == phys) {
- data &= ~MMU_ITLB_VALID;
- ctrl_outl(data, addr);
- }
- }
- back_to_P1();
-}
-#endif
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/sh/mm/init.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- init.c 14 Jan 2001 19:48:53 -0000 1.1.1.1
+++ init.c 9 Apr 2002 17:07:20 -0000 1.2
@@ -34,120 +34,23 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
/*
* Cache of MMU context last used.
*/
unsigned long mmu_context_cache;
+#ifdef CONFIG_DISCONTIGMEM
+pg_data_t discontig_page_data[NR_NODES];
+bootmem_data_t discontig_node_bdata[NR_NODES];
+#endif
+
static unsigned long totalram_pages;
static unsigned long totalhigh_pages;
-extern unsigned long init_smp_mappings(unsigned long);
-
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving an inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-
-unsigned long empty_bad_page[1024];
-pte_t empty_bad_pte_table[PTRS_PER_PTE];
-extern unsigned long empty_zero_page[1024];
-
-static pte_t * get_bad_pte_table(void)
-{
- pte_t v;
- int i;
-
- v = pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED));
-
- for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
- empty_bad_pte_table[i] = v;
-
- return empty_bad_pte_table;
-}
-
-void __handle_bad_pmd(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
-}
-
-void __handle_bad_pmd_kernel(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
-}
-
-pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (pte) {
- clear_page(pte);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
- return pte + offset;
- }
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
- return NULL;
- }
- free_page((unsigned long)pte);
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
-{
- unsigned long pte;
-
- pte = (unsigned long) __get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (pte) {
- clear_page((void *)pte);
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
- return (pte_t *)pte + offset;
- }
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
- return NULL;
- }
- free_page(pte);
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-int do_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if (pgtable_cache_size > high) {
- do {
- if (pgd_quicklist)
- free_pgd_slow(get_pgd_fast()), freed++;
- if (pmd_quicklist)
- free_pmd_slow(get_pmd_fast()), freed++;
- if (pte_quicklist)
- free_pte_slow(get_pte_fast()), freed++;
- } while (pgtable_cache_size > low);
- }
- return freed;
-}
-
void show_mem(void)
{
int i, total = 0, reserved = 0;
@@ -170,7 +73,6 @@
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",pgtable_cache_size);
show_buffers();
}
@@ -179,7 +81,7 @@
extern char _text, _etext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
-pgd_t swapper_pg_dir[1024];
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* It'd be good if these lines were in the standard header file. */
#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
@@ -199,7 +101,7 @@
/* We don't need kernel mapping as hardware support that. */
pg_dir = swapper_pg_dir;
- for (i=0; i < USER_PTRS_PER_PGD*2; i++)
+ for (i=0; i < PTRS_PER_PGD; i++)
pgd_val(pg_dir[i]) = 0;
/* Enable MMU */
@@ -225,12 +127,18 @@
zones_size[ZONE_DMA] = max_dma - start_pfn;
zones_size[ZONE_NORMAL] = low - max_dma;
}
- free_area_init_node(0, 0, 0, zones_size, __MEMORY_START, 0);
+ free_area_init_node(0, NODE_DATA(0), 0, zones_size, __MEMORY_START, 0);
+#ifdef CONFIG_DISCONTIGMEM
+ zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
+ zones_size[ZONE_NORMAL] = 0;
+ free_area_init_node(1, NODE_DATA(1), 0, zones_size, __MEMORY_START_2ND, 0);
+#endif
}
}
void __init mem_init(void)
{
+ extern unsigned long empty_zero_page[1024];
int codesize, reservedpages, datasize, initsize;
int tmp;
@@ -239,10 +147,13 @@
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
- flush_page_to_ram(virt_to_page(empty_zero_page));
+ __flush_wback_region(empty_zero_page, PAGE_SIZE);
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ totalram_pages += free_all_bootmem_node(NODE_DATA(0));
+#ifdef CONFIG_DISCONTIGMEM
+ totalram_pages += free_all_bootmem_node(NODE_DATA(1));
+#endif
reservedpages = 0;
for (tmp = 0; tmp < num_physpages; tmp++)
/*
@@ -261,6 +172,8 @@
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10);
+
+ p3_cache_init();
}
void free_initmem(void)
Index: ioremap.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/sh/mm/ioremap.c,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- ioremap.c 14 Jan 2001 19:48:53 -0000 1.1.1.1
+++ ioremap.c 9 Apr 2002 17:07:20 -0000 1.2
@@ -13,8 +13,8 @@
#include <asm/io.h>
#include <asm/pgalloc.h>
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline void remap_area_pte(pte_t * pte, unsigned long address,
+ unsigned long size, unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
@@ -25,18 +25,22 @@
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
+ if (address >= end)
+ BUG();
do {
- if (!pte_none(*pte))
+ if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
+ BUG();
+ }
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
- } while (address < end);
+ } while (address && (address < end));
}
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
+ unsigned long size, unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
@@ -45,38 +49,48 @@
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
phys_addr -= address;
+ if (address >= end)
+ BUG();
do {
- pte_t * pte = pte_alloc_kernel(pmd, address);
+ pte_t * pte = pte_alloc(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
- } while (address < end);
+ } while (address && (address < end));
return 0;
}
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
+int remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
{
+ int error;
pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address;
dir = pgd_offset_k(address);
flush_cache_all();
- while (address < end) {
- pmd_t *pmd = pmd_alloc_kernel(dir, address);
+ if (address >= end)
+ BUG();
+ spin_lock(&init_mm.page_table_lock);
+ do {
+ pmd_t *pmd;
+ pmd = pmd_alloc(&init_mm, dir, address);
+ error = -ENOMEM;
if (!pmd)
- return -ENOMEM;
+ break;
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
- return -ENOMEM;
+ break;
+ error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
- }
+ } while (address && (address < end));
+ spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
- return 0;
+ return error;
}
/*
@@ -92,7 +106,7 @@
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void * addr;
struct vm_struct * area;
@@ -106,7 +120,7 @@
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr <= 0x100000)
+ if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return phys_to_virt(phys_addr);
/*
@@ -136,7 +150,7 @@
return (void *) (offset + (char *)addr);
}
-void iounmap(void *addr)
+void p3_iounmap(void *addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr));
--- cache.c DELETED ---
|