|
From: Andy P. <at...@us...> - 2002-04-09 12:34:04
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-arm/proc-armv
In directory usw-pr-cvs1:/tmp/cvs-serv9454/asm-arm/proc-armv
Modified Files:
cache.h pgtable.h system.h uaccess.h
Added Files:
pgalloc.h
Log Message:
Synch to 2.4.15 commit 1
--- NEW FILE ---
/*
* linux/include/asm-arm/proc-armv/pgalloc.h
*
* Copyright (C) 2001 Russell King
*
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
/* unfortunately, this includes linux/mm.h and the rest of the universe. */
#include <linux/slab.h>
extern kmem_cache_t *pte_cache;
/*
* Allocate one PTE table.
*
* Note that we keep the processor copy of the PTE entries separate
* from the Linux copy. The processor copies are offset by -PTRS_PER_PTE
* words from the Linux copy.
*/
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = kmem_cache_alloc(pte_cache, GFP_KERNEL);
if (pte)
pte += PTRS_PER_PTE;
return pte;
}
/*
* Free one PTE table.
*/
static inline void pte_free_slow(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
kmem_cache_free(pte_cache, pte);
}
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
*/
#define pmd_populate(mm,pmdp,pte) \
do { \
unsigned long __prot; \
if (mm == &init_mm) \
__prot = _PAGE_KERNEL_TABLE; \
else \
__prot = _PAGE_USER_TABLE; \
set_pmd(pmdp, __mk_pmd(pte, __prot)); \
} while (0)
Index: cache.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-arm/proc-armv/cache.h,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- cache.h 14 Jan 2001 16:58:46 -0000 1.1.1.1
+++ cache.h 9 Apr 2002 12:33:10 -0000 1.2
@@ -1,7 +1,7 @@
/*
* linux/include/asm-arm/proc-armv/cache.h
*
- * Copyright (C) 1999-2000 Russell King
+ * Copyright (C) 1999-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -10,6 +10,12 @@
#include <asm/mman.h>
/*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+/*
* Cache handling for 32-bit ARM processors.
*
* Note that on ARM, we have a more accurate specification than that
@@ -54,6 +60,33 @@
/*
* This flushes back any buffered write data. We have to clean the entries
* in the cache for this page. This does not invalidate either I or D caches.
+ *
+ * Called from:
+ * 1. mm/filemap.c:filemap_nopage
+ * 2. mm/filemap.c:filemap_nopage
+ * [via do_no_page - ok]
+ *
+ * 3. mm/memory.c:break_cow
+ * [copy_cow_page doesn't do anything to the cache; insufficient cache
+ * handling. Need to add flush_dcache_page() here]
+ *
+ * 4. mm/memory.c:do_swap_page
+ * [read_swap_cache_async doesn't do anything to the cache: insufficient
+ * cache handling. Need to add flush_dcache_page() here]
+ *
+ * 5. mm/memory.c:do_anonymous_page
+ * [zero page, never written by kernel - ok]
+ *
+ * 6. mm/memory.c:do_no_page
+ * [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty]
+ *
+ * 7. mm/shmem.c:shmem_nopage
+ * 8. mm/shmem.c:shmem_nopage
+ * [via do_no_page - ok]
+ *
+ * 9. fs/exec.c:put_dirty_page
+ * [we call flush_dcache_page prior to this, which will flush out the
+ * kernel virtual addresses from the dcache - ok]
*/
static __inline__ void flush_page_to_ram(struct page *page)
{
@@ -69,26 +102,71 @@
#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
/*
- * FIXME: We currently clean the dcache for this page. Should we
- * also invalidate the Dcache? And what about the Icache? -- rmk
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page->mapping = NULL), or it has
+ * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
+ * then we _must_ always clean + invalidate the dcache entries associated
+ * with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space. This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
*/
-#define flush_dcache_page(page) cpu_dcache_clean_page(page_address(page))
+static inline void flush_dcache_page(struct page *page)
+{
+ if (page->mapping && !(page->mapping->i_mmap) &&
+ !(page->mapping->i_mmap_shared))
+ set_bit(PG_dcache_dirty, &page->flags);
+ else {
+ unsigned long virt = (unsigned long)page_address(page);
+ cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
+ }
+}
#define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s))
/*
- * I cache only
+ * I cache coherency stuff.
+ *
+ * This *is not* just icache. It is to make data written to memory
+ * consistent such that instructions fetched from the region are what
+ * we expect.
+ *
+ * This generally means that we have to clean out the Dcache and write
+ * buffers, and maybe flush the Icache in the specified range.
*/
#define flush_icache_range(_s,_e) \
do { \
cpu_icache_invalidate_range((_s), (_e)); \
} while (0)
-#define flush_icache_page(vma,pg) \
- do { \
- if ((vma)->vm_flags & PROT_EXEC) \
- cpu_icache_invalidate_page(page_address(pg)); \
- } while (0)
+/*
+ * This function is misnamed IMHO. There are three places where it
+ * is called, each of which is preceded immediately by a call to
+ * flush_page_to_ram:
+ *
+ * 1. kernel/ptrace.c:access_one_page
+ * called after we have written to the kernel view of a user page.
+ * The user page has been expundged from the cache by flush_cache_page.
+ * [we don't need to do anything here if we add a call to
+ * flush_dcache_page]
+ *
+ * 2. mm/memory.c:do_swap_page
+ * called after we have (possibly) written to the kernel view of a
+ * user page, which has previously been removed (ie, has been through
+ * the swap cache).
+ * [if the flush_page_to_ram() conditions are satisfied, then ok]
+ *
+ * 3. mm/memory.c:do_no_page
+ * [if the flush_page_to_ram() conditions are satisfied, then ok]
+ *
+ * Invalidating the icache at the kernels virtual page isn't really
+ * going to do us much good, since we wouldn't have executed any
+ * instructions there.
+ */
+#define flush_icache_page(vma,pg) do { } while (0)
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
@@ -154,3 +232,10 @@
cpu_tlb_invalidate_page((_page), \
((_vma)->vm_flags & VM_EXEC)); \
} while (0)
+
+/*
+ * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
+ */
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
Index: pgtable.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-arm/proc-armv/pgtable.h,v
retrieving revision 1.1.1.2
retrieving revision 1.2
diff -u -r1.1.1.2 -r1.2
--- pgtable.h 25 Feb 2001 23:14:53 -0000 1.1.1.2
+++ pgtable.h 9 Apr 2002 12:33:10 -0000 1.2
@@ -1,7 +1,7 @@
/*
* linux/include/asm-arm/proc-armv/pgtable.h
*
- * Copyright (C) 1995-1999 Russell King
+ * Copyright (C) 1995-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -48,7 +48,7 @@
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) cpu_set_pmd(pmdp,pmd)
-extern __inline__ pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
+static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
{
unsigned long pte_ptr = (unsigned long)ptep;
pmd_t pmd;
@@ -64,11 +64,7 @@
return pmd;
}
-/* these are aliases for the above function */
-#define mk_user_pmd(ptep) __mk_pmd(ptep, _PAGE_USER_TABLE)
-#define mk_kernel_pmd(ptep) __mk_pmd(ptep, _PAGE_KERNEL_TABLE)
-
-extern __inline__ unsigned long pmd_page(pmd_t pmd)
+static inline unsigned long pmd_page(pmd_t pmd)
{
unsigned long ptr;
@@ -149,7 +145,7 @@
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define PTE_BIT_FUNC(fn,op) \
-extern inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
@@ -161,7 +157,6 @@
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
-PTE_BIT_FUNC(nocache, &= ~L_PTE_CACHEABLE);
/*
* Mark the prot value as uncacheable and unbufferable.
Index: system.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-arm/proc-armv/system.h,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- system.h 14 Jan 2001 16:58:45 -0000 1.1.1.1
+++ system.h 9 Apr 2002 12:33:10 -0000 1.2
@@ -17,9 +17,31 @@
"mcr p15, 0, %0, c1, c0 @ set CR" \
: : "r" (x))
+#define CR_M (1 << 0) /* MMU enable */
+#define CR_A (1 << 1) /* Alignment abort enable */
+#define CR_C (1 << 2) /* Dcache enable */
+#define CR_W (1 << 3) /* Write buffer enable */
+#define CR_P (1 << 4) /* 32-bit exception handler */
+#define CR_D (1 << 5) /* 32-bit data address range */
+#define CR_L (1 << 6) /* Implementation defined */
+#define CD_B (1 << 7) /* Big endian */
+#define CR_S (1 << 8) /* System MMU protection */
+#define CD_R (1 << 9) /* ROM MMU protection */
+#define CR_F (1 << 10) /* Implementation defined */
+#define CR_Z (1 << 11) /* Implementation defined */
+#define CR_I (1 << 12) /* Icache enable */
+#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
+#define CR_RR (1 << 14) /* Round Robin cache replacement */
+
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
+#ifdef __ARM_ARCH_4__
+#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
+#else
+#define vectors_base() (0)
+#endif
+
/*
* A couple of speedups for the ARM
*/
@@ -135,7 +157,7 @@
#define swp_is_buggy
#endif
-extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
Index: uaccess.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-arm/proc-armv/uaccess.h,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -u -r1.1.1.1 -r1.2
--- uaccess.h 14 Jan 2001 16:58:46 -0000 1.1.1.1
+++ uaccess.h 9 Apr 2002 12:33:10 -0000 1.2
@@ -14,7 +14,7 @@
#define KERNEL_DS 0x00000000
#define USER_DS PAGE_OFFSET
-extern __inline__ void set_fs (mm_segment_t fs)
+static inline void set_fs (mm_segment_t fs)
{
current->addr_limit = fs;
@@ -24,7 +24,7 @@
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
- __asm__ __volatile__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+ __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current->addr_limit) \
: "cc"); \
@@ -32,7 +32,7 @@
#define __addr_ok(addr) ({ \
unsigned long flag; \
- __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \
+ __asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current->addr_limit), "r" (addr) \
: "cc"); \
@@ -57,24 +57,9 @@
#define __put_user_asm_half(x,addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
- __asm__ __volatile__( \
- "1: strbt %1,[%3],#0\n" \
- "2: strbt %2,[%4],#0\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: mov %0, %5\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "=r" (err) \
- : "r" (__temp), "r" (__temp >> 8), \
- "r" (addr), "r" ((int)(addr) + 1), \
- "i" (-EFAULT), "0" (err)); \
+ unsigned long __ptr = (unsigned long)(addr); \
+ __put_user_asm_byte(__temp, __ptr, err); \
+ __put_user_asm_byte(__temp >> 8, __ptr + 1, err); \
})
#define __put_user_asm_word(x,addr,err) \
@@ -107,31 +92,15 @@
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
- : "=r" (err), "=r" (x) \
+ : "=r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT), "0" (err))
#define __get_user_asm_half(x,addr,err) \
({ \
- unsigned long __temp; \
- __asm__ __volatile__( \
- "1: ldrbt %1,[%3],#0\n" \
- "2: ldrbt %2,[%4],#0\n" \
- " orr %1, %1, %2, lsl #8\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: mov %0, %5\n" \
- " mov %1, #0\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "=r" (err), "=r" (x), "=&r" (__temp) \
- : "r" (addr), "r" ((int)(addr) + 1), \
- "i" (-EFAULT), "0" (err)); \
+ unsigned long __b1, __b2, __ptr = (unsigned long)addr; \
+ __get_user_asm_byte(__b1, __ptr, err); \
+ __get_user_asm_byte(__b2, __ptr + 1, err); \
+ (x) = __b1 | (__b2 << 8); \
})
@@ -149,7 +118,7 @@
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
- : "=r" (err), "=r" (x) \
+ : "=r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT), "0" (err))
extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n);
|