Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv1125/mm
Modified Files:
mmu_context.h pagelet.h pagelet_pgd.h pagelet_pmd.h
pagelet_pte.h pgalloc.h pgtable.h tlb.h
Added Files:
task.h
Log Message:
New mm layer + start of signal handling + misc fixes
--- NEW FILE ---
#ifndef __VAX_MM_TASK_H
#define __VAX_MM_TASK_H
/* task.h - task memory map defines */
/* atp July 2001. */
/* These are all used to size the relevant structures in the system
* page table, in paging_init (arch/vax/mm/init.c)
*/
/* currently allocate 32mb of virtual memory */
/* These defines cover the process memory map, and are in bytes */
/* Please remember to make them a multiple of PAGE_SIZE, or its going to
* get wierd here */
/* TASK_WSMAX is the max virtual address space in P0 */
/* TASK_WSMAX must not be larger than 768MB. In the unlikely event that
* you really want to allocate that much to a process, change PGD_SPECIAL below */
#define TASK_WSMAX (40*1024*1024)
/* TASK_STKMAX is the max space for the stack in P1 */
/* Like WSMAX above, the upper limit for this is set by PGD_SPECIAL below. If this
* is above 256MB change PGD_SPECIAL
*/
#define TASK_STKMAX (8*1024*1024)
/* TASK_MMAPMAX is the max space in P0 for the mmap() function ,
contiguous with TASK_WSMAX */
#define TASK_MMAPMAX (8*1024*1024)
/* TASK_MAXUPRC is the maximum number of user processes on the system
* Think of this like balsetcnt on VMS.
* -- this should also set/be set by the linux max task variable
*/
#define TASK_MAXUPRC (32)
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE TASK_WSMAX
/* calculations based on the above for the SPT */
/* NPTE_TASK = the number of HWPTE's needed to map a process */
#define N_HWPTE_TASK_P0 ((TASK_WSMAX+TASK_MMAPMAX)>>PAGELET_SHIFT)
#define N_HWPTE_TASK_P1 ((TASK_STKMAX)>>PAGELET_SHIFT)
/* There are 4 4096 byte pages in the pmd. = 4x1024 hwptes. */
#define N_HWPTE_TASK_PMD ((4*1024))
#define N_HWPTE_TASK (N_HWPTE_TASK_P0+N_HWPTE_TASK_P1+N_HWPTE_TASK_PMD)
/* The alignment we want - at present double page for pte_alloc/offset to work ok */
#define PTE_TASK_MASK (~(8191))
#define PTE_TASK_ALIGN(x) (((x)+8191)&PTE_TASK_MASK)
/* size in bytes of an aligned task pte region */
#define PTE_TASK_SLOTSIZE PTE_TASK_ALIGN(N_HWPTE_TASK<<2)
/* The number of pagelets, or SPTEs needed to hold this number of HWPTEs */
#define SPTE_MAX_TASKPTE ((N_HWPTE_TASK>>(PAGELET_SHIFT-2))+1)
/* The offsets into page table area from the start of this slot, in bytes */
#define P0PTE_OFFSET (N_HWPTE_TASK_PMD<<2)
#define P1PTE_OFFSET ((N_HWPTE_TASK_P0+N_HWPTE_TASK_PMD)<<2)
#define P0PMD_OFFSET (0)
#define P1PMD_OFFSET (PAGE_SIZE*2)
/*
* This is a special index into the pmd. This stores a back pointer to the
* pgd in the pmd. The default value of 1536 allows 768 MB for WSMAX and 256
* MB for stack. If you want to change that allocation, bear in mind that you
* have to trade WSMAX for STKMAX. Unless I think of a cleverer way of doing this.
*/
#define PGD_SPECIAL 1536
/*
* User space process size: 2GB (default).
* This is a bit bogus - a linux thing.
*/
#define TASK_SIZE (PAGE_OFFSET)
#endif /* __VAX_MM_TASK_H */
Index: mmu_context.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/mmu_context.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- mmu_context.h 2001/06/09 18:00:25 1.3
+++ mmu_context.h 2001/07/31 17:33:26 1.4
@@ -1,8 +1,6 @@
-/* Copyright (C) May 2001 - Dave Airlie - Vax project - ai...@li...
- derived I'm sure from somewhere ... */
-
#ifndef _ASM_VAX_MMU_CONTEXT_H
#define _ASM_VAX_MMU_CONTEXT_H
+/* atp Jan 2001 */
#include <asm/mm/tlb.h>
@@ -12,6 +10,26 @@
#define destroy_context(mm) flush_tlb_mm(mm)
+static inline void set_vaxmm_regs_p0(pgd_t *pgdp)
+{
+ __mtpr(pgdp->br, PR_P0BR);
+ __mtpr( (pgdp->lr * 8), PR_P0LR);
+}
+
+static inline void set_vaxmm_regs_p1(pgd_t *pgdp)
+{
+ __mtpr(pgdp->br, PR_P1BR);
+ __mtpr( (pgdp->lr * 8), PR_P1LR);
+}
+
+static inline void set_vaxmm_regs(pgd_t *pgdp)
+{
+ __mtpr((pgdp[0]).br, PR_P0BR);
+ __mtpr( ((pgdp[0]).lr * 8), PR_P0LR);
+ __mtpr((pgdp[1]).br, PR_P1BR);
+ __mtpr( ((pgdp[1]).lr * 8), PR_P1LR);
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk, unsigned cpu)
{
@@ -30,15 +48,12 @@
clear_bit(cpu, &prev->cpu_vm_mask);
tsk->thread.pcb.p0br = (next->pgd[0]).br;
- tsk->thread.pcb.p0lr = (next->pgd[0]).lr * 8 | 0x04000000;
+ tsk->thread.pcb.p0lr = (next->pgd[0]).lr * 8 /*| 0x04000000*/;
tsk->thread.pcb.p1br = (next->pgd[1]).br;
tsk->thread.pcb.p1lr = (next->pgd[1]).lr * 8;
- __mtpr(next->pgd[0].br, PR_P0BR);
- __mtpr((next->pgd[0].lr * 8), PR_P0LR);
- __mtpr(next->pgd[1].br, PR_P1BR);
- __mtpr((next->pgd[1].lr * 8), PR_P1LR);
-
+ set_vaxmm_regs(next->pgd);
+
flush_tlb_all();
}
set_bit(cpu, &next->cpu_vm_mask);
Index: pagelet.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- pagelet.h 2001/02/15 16:50:54 1.2
+++ pagelet.h 2001/07/31 17:33:26 1.3
@@ -73,11 +73,18 @@
struct vax_pgd_descriptor {
unsigned long br;
unsigned long lr;
+ unsigned long pmd; /* first four pages of the task PTE slot are the pmds
+ * There are two pmd's one for p0 and one for p1 */
+ unsigned long pmd2; /* This is just a place holder, as we pretend that
+ * our pmds hold 2048 entries and are 2 pages long */
+ unsigned long slot; /* the base address of this slot */
+ unsigned long segment; /* The segment index - used in pgd_clear */
};
/* pgd_t definitions */
typedef struct vax_pgd_descriptor pgd_t;
-#define pgd_val(x) ((x).br)
+/* the .pmd is not a typo */
+#define pgd_val(x) ((x).pmd)
#define __pgd(x) ((pgd_t) { (x) } )
/* definition of pmd_t */
@@ -90,6 +97,11 @@
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
+
+/* hwpte_t */
+typedef struct { unsigned long hwpte; } hwpte_t;
+#define hwpte_val(x) ((x).hwpte)
+#define __hwpte(x) ((hwpte_t) { (x) } )
/* and pgprot_t */
typedef struct { unsigned long pgprot; } pgprot_t;
Index: pagelet_pgd.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pgd.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- pagelet_pgd.h 2001/02/15 01:17:23 1.1
+++ pagelet_pgd.h 2001/07/31 17:33:26 1.2
@@ -45,10 +45,11 @@
* into the pgd entry)
* All the actual stuff is done by the pmd_xxx functions
*/
-extern inline int pgd_none(pgd_t pgd) { return 0; }
-extern inline int pgd_bad(pgd_t pgd) { return 0; }
-extern inline int pgd_present(pgd_t pgd) { return 1; }
-extern inline void pgd_clear(pgd_t * pgdp) { }
+extern inline int pgd_none(pgd_t pgd) { return !(pgd).pmd; }
+extern inline int pgd_bad(pgd_t pgd) { return !(pgd).br; }
+extern inline int pgd_present(pgd_t pgd) { return ((pgd).pmd != 0); }
+
+extern void pgd_clear(pgd_t * pgdp);
/* to set the page-dir (p0br/p0lr) (p1br/p1lr) see arch/vax/mm/pgtable.c */
extern void set_page_dir(struct task_struct *task, pgd_t *pgdir);
Index: pagelet_pmd.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pmd.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- pagelet_pmd.h 2001/06/17 12:34:05 1.2
+++ pagelet_pmd.h 2001/07/31 17:33:26 1.3
@@ -1,61 +1,65 @@
/*
* pagelet_pmd.h
*
- * Defines the page mid level directory in our fake 2 level paging scheme.
- * As for all the 2 level schemes, this is folded away by the compiler.
+ * Defines the page mid level directory in our fake 3 level paging scheme.
*
* Copyright atp Jan 2001.
+ * atp Jul 2001. Go to a fake 3 level.
*/
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-/* 128 * 512. 128 ptes/page */
-#define PMD_SHIFT 30
+/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
+/* 1 page of ptes maps 128x4096 bytes = 512kb.
+ * Each "pmd" here is infact a 2 page = 8kb region at the start of the
+ * process page table region. It makes the accounting a lot easier.
+ */
+#define PMD_SHIFT 19
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/*
* entries per page directory level: the VAX is single level, so
* we don't really have any PMD directory physically, or real pgd for
- * that matter.
+ * that matter. Its just an 8kb region.
*/
-#define PTRS_PER_PMD 1
+#define PTRS_PER_PMD 2048
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
/* pmd_xxx functions */
-/* These are really operating on the pgd_t */
-/* These are just testing the br in each pgd_t for the presence/absence of info */
-
-/* set_pmd: for the moment, I'm not going to use this. Each pgd_t in the
- * pgd should be set by hand at process initialisation. It doesnt need to
- * ever change, except for the length register, which is handled in pte_alloc */
-#define set_pmd(pmdptr, pmdval)
+/* These are really operating on the first two pages of a balance slot */
+/*
+ * we dont want linux mucking about with our pmd pages. It will get it
+ * wrong. pmd_alloc and pmd_free do the business there.
+ */
+#define set_pmd(pmdptr, pmdval)
/* Fixme:, check the length as well as the base register. */
-extern inline int pmd_none(pmd_t pmd) { return (pmd_val(pmd) == 0); }
+extern inline int pmd_none(pmd_t pmd)
+{
+ if (pmd_val(pmd) & 0x1) return 1;
+ return (pmd_val(pmd) == 0);
+}
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
extern inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) != 0); }
-/* This is just zeroing out the base and length registers */
-/* FIXME: or validate code - I removed the zero'ing of the pmd,
- pmd are parts of pgds, and if we clear the br/lr of the P0 pmd,
- the zeroth member of pgd, we lose the vmalloc address so can't
- do vfree. - D.A. June 2001
-*/
-extern inline void pmd_clear(pmd_t * pmdp) {
- /* pmd_val(pmdp[0]) = 0;
- pmd_val(pmdp[1]) = 0;*/
- }
+/* clear the pmd entry */
+extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
-
/* Find an entry in the second-level page table.. */
+#define pmd_index(address) ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
- return (pmd_t *) dir;
+ pmd_t *ptr;
+ ptr = (pmd_t *)pmd_val(*dir) + pmd_index(address);
+ /* locate the pmd entry according to address */
+// printk("pmd_offset: pgd %8p, pmd_val %8lx, address %8lx, index %8lx, offset %8p\n",dir,pmd_val(*dir),address,pmd_index(address),ptr);
+ return ptr;
}
Index: pagelet_pte.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pte.h,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- pagelet_pte.h 2001/06/16 14:26:36 1.5
+++ pagelet_pte.h 2001/07/31 17:33:26 1.6
@@ -33,17 +33,17 @@
* Note that the first hwpte is the one that linux sees.
* The first hwpte is used for all tests except
* the dirty test, which has to be applied to all */
-typedef unsigned long hwpte_t;
+/*typedef unsigned long hwpte_t;*/
typedef struct pagelet_pagecluster {
- hwpte_t pte;
- hwpte_t pte1;
- hwpte_t pte2;
- hwpte_t pte3;
- hwpte_t pte4;
- hwpte_t pte5;
- hwpte_t pte6;
- hwpte_t pte7;
+ unsigned long pte;
+ unsigned long pte1;
+ unsigned long pte2;
+ unsigned long pte3;
+ unsigned long pte4;
+ unsigned long pte5;
+ unsigned long pte6;
+ unsigned long pte7;
} pagecluster_t;
/* each ptr is 32 bytes in size */
@@ -70,7 +70,7 @@
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTE_LOG2)&PTE_MASK&~PAGE_MASK)
+((unsigned long)(((address)>>PAGE_SHIFT)<<SIZEOF_PTE_LOG2)&PTE_MASK&~PAGE_MASK)
/* Certain architectures need to do special things when PTEs
@@ -101,6 +101,11 @@
ptep->pte6 = pte_val(pte)+6;
ptep->pte7 = pte_val(pte)+7;
}
+
+static inline void print_pte(pte_t *ptep)
+{
+ printk(KERN_DEBUG "%8p: %8lx %8lx %8lx %8lx %8lx %8lx %8lx %8lx\n", ptep, ptep->pte,ptep->pte1,ptep->pte2,ptep->pte3,ptep->pte4,ptep->pte5,ptep->pte6,ptep->pte7);
+}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -112,21 +117,24 @@
*
* See asm-i386/pgtable-3level.h for background.
*/
-/* D.A. May 2001 - FIXME: this needs cleaning up, hacked in a mk_pte and __mk_pte... will clean up later.. our mk_pte is being use incorrectly in some VAX code so I needed __mk_pte.
+/* D.A. May 2001 - FIXME: this needs cleaning up, hacked in a mk_pte and
+ __mk_pte... will clean up later.. our mk_pte is being use incorrectly
+ in some VAX code so I needed __mk_pte.
*/
-static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+
+static inline pte_t __mk_pte(unsigned long int page, pgprot_t pgprot)
{
pte_t pte;
- pte.pte = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ pte_val(pte) = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
return pte;
}
#define mk_pte(page, pgprot) __mk_pte(((page)-mem_map)<<PAGE_SHIFT,(pgprot))
/* This takes a physical page address that is used by the remapping functions */
-static inline pte_t mk_pte_phys(void *physpage, pgprot_t pgprot)
+static inline pte_t mk_pte_phys(unsigned long int physpage, pgprot_t pgprot)
{
pte_t pte;
- pte.pte = ((unsigned long)(physpage) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ pte_val(pte) = ((unsigned long)(physpage) >> PAGELET_SHIFT) | pgprot_val(pgprot);
return pte;
}
@@ -196,17 +204,16 @@
/* who needs that
-extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
-extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
-extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
-*/
+ * extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+ * extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+ * extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+ */
/*
* these manipulate various bits in each hwpte.
- *
*/
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -235,6 +242,12 @@
return pte;
}
+static inline pte_t pte_mkinvalid(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_VALID;
+ return pte;
+}
+
/* software only - only bother with first pagelet pte in the pagecluster */
static inline pte_t pte_mkold(pte_t pte)
{
@@ -261,9 +274,31 @@
static inline int pte_none(pte_t pte) { return (!pte_val(pte)); }
static inline int pte_present(pte_t pte) { return (pte_val(pte) & _PAGE_VALID); }
+
+extern pte_t * pte_offset(pmd_t * dir, unsigned long address);
+
+/* items to manipulate a hwpte (for the S0 tables ) */
+
+static inline void set_hwpte(hwpte_t *ptep, hwpte_t pte)
+{
+ *ptep = pte;
+}
+
+static inline hwpte_t mk_hwpte(void *page, pgprot_t pgprot)
+{
+ hwpte_t hwpte;
+ hwpte_val(hwpte) = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ return hwpte;
+}
+
+static inline int hwpte_none(hwpte_t pte) { return !hwpte_val(pte); }
+static inline int hwpte_present(hwpte_t pte) { return hwpte_val(pte) & _PAGE_VALID; }
+
+static inline hwpte_t hwpte_mkinvalid(hwpte_t pte)
+{
+ hwpte_val(pte) &= ~_PAGE_VALID;
+ return pte;
+}
-/* find an entry in a pagetable */
-#define pte_offset(pmd, address) \
-((pte_t *) ( ((pgd_t *)(pmd))->br + ((address & 0x3fffffff)>> PAGE_SHIFT)*BYTES_PER_PTE_T))
#endif
Index: pgalloc.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgalloc.h,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- pgalloc.h 2001/06/26 19:01:11 1.8
+++ pgalloc.h 2001/07/31 17:33:26 1.9
@@ -1,7 +1,7 @@
#ifndef __ASM_VAX_MM_PGALLOC_H
#define __ASM_VAX_MM_PGALLOC_H
-/* atp 2001. pgalloc.h for VAX architecture. */
+/* Copyright atp 1998-2001. pgalloc.h for VAX architecture. */
/*
* Fixmes:
* 1) the pte_alloc/freeing stuff. Check Constraints here
@@ -16,6 +16,14 @@
/*
* (c) Copyright Dave Airlie 2001 - ai...@li...
* -- re-write for fixed sized processes
+ *
+ * atp Jun 2001 remove fixed size processes, use 3 level page table and pte slots.
+ * atp Jun-Jul 2001 - complete rewrite.
+ *
+ * each 'pgd' spans an address range of 0x40000000 bytes.
+ * each page of 'ptes' spans an address range of 0x80000 bytes
+ * So, there are 0x800 pages of 'ptes' per pgd. Keeping track of which page
+ * is mapped where, requires a pmd with 0x800 entries.
*/
#include <asm/processor.h>
#include <linux/threads.h>
@@ -25,6 +33,7 @@
#ifndef CONFIG_SMP
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
+ unsigned long pgd_slots_used;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
@@ -38,102 +47,66 @@
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+#define pgd_slots_used (quicklists.pgd_slots_used)
-
-/*
- * traditional two-level paging, page table allocation routines:
- */
-
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
-extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- return (pmd_t *) pgd;
-}
-
-
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-#if 0
-extern pgd_t *get_pgd_slow(void);
-#else
-extern __inline__ pgd_t *get_pgd_slow(void)
+
+extern pgd_t *pgd_alloc(void);
+extern pgd_t *get_pgd_fast(void);
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- /*
- * this is rather wasteful, as only 6 longwords are
- * used in the entire 4kb page. Perhaps we can do something
- * smarter here by using the quicklists to pack the pgds into
- * a single page.
- */
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret) {
- /* Allocate space for the p0/p1 page tables */
- /* allocate 192 pages at 4096 bytes each for page tables? */
- ret[0].br = (unsigned long)vmalloc(192 * PAGE_SIZE);
- if (ret[0].br==0)
- {
- printk("page_tables:vmalloc failed to allocate a page directory\n");
- BUG();
- return NULL;
- }
- memset((void *)ret[0].br, 0, 192*PAGE_SIZE);
- ret[0].lr = ((160*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
- /* the p1br needs to be set back from the end of the p1 ptes */
- ret[1].br = (ret[0].br - 0x800000) + (192*PAGE_SIZE);
- ret[1].lr = 0x40000-((32*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
-
- printk("get_pgd: p0: %8lX, %8lX, p1: %8lX, %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr);
-
- /* set the s0 region, from the master copy in swapper_pg_dir */
- memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return ret;
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
}
-#endif
-extern __inline__ pgd_t *get_pgd_fast(void)
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
- unsigned long *ret;
+ /* we dont do this at present */
+}
- if ((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+extern pmd_t *get_pmd_slow(void);
- return (pgd_t *)ret;
+/* Page Mid level directory handling routines. */
+static inline pmd_t *get_pmd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pmd_t *)ret;
}
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline void free_pmd_fast(pmd_t *pmd)
{
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
}
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline void free_pmd_slow(pmd_t *pmd)
{
- vfree((void *)pgd[0].br);
- free_page((unsigned long)pgd);
+ free_page((unsigned long)pmd);
}
+/* in arch/vax/mm/pgalloc.c */
+extern pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address);
+extern void pmd_free(pmd_t *pmd);
+extern void pte_free(pte_t *pte);
+extern unsigned long get_pageaddr_from_pte(pte_t *ptep);
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
+extern pte_t *get_pte_slow(void);
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset);
-extern void remap_and_clear_pte_page(pgd_t *pagetable, pte_t *page, unsigned long pte_page);
+extern void remap_and_clear_pte_page(pmd_t *s0addr, pte_t *page);
+extern void remap_pte_invalidate(pmd_t *s0addr);
extern __inline__ pte_t *get_pte_fast(void)
{
@@ -158,49 +131,30 @@
extern __inline__ void free_pte_slow(pte_t *pte)
{
pte_clear(pte);
-/* free_page((unsigned long)pte);*/
+ free_page((unsigned long)pte);
}
-#define pte_free_kernel(pte) free_pte_slow(pte)
-#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+extern __inline__ void page_clear(pte_t *pte) {memset(pte, 0, PAGE_SIZE);}
-/* atp jun 01, moved these to arch/vax/mm/pgalloc.c */
-/* Allocate a new page for a page table for the kernel */
-extern pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address);
-extern pte_t *pte_alloc(pmd_t *pmd, unsigned long address);
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(pmd) free_pmd_slow(pmd)
-
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
-extern int do_check_pgt_cache(int, int);
+/* atp jun 01, moved these to arch/vax/mm/pgalloc.c */
+/* Allocate a new page for a page table for the kernel */
+extern pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+extern pte_t *pte_alloc(pmd_t *pmd, unsigned long address);
+extern pte_t * pte_alloc_one(pmd_t *pmd);
-/* I cant find a reference to this in the generic or arch specific code
- * -- it used to be called from linux/mm/vmalloc.c, but is no longer */
-/* extern inline void set_pgdir(unsigned long address, pgd_t entry)
- * {
- * struct task_struct * p;
- * pgd_t *pgd;
- *
- * read_lock(&tasklist_lock);
- * for_each_task(p) {
- * if (!p->mm)
- * continue;
- * *pgd_offset(p->mm,address) = entry;
- * }
- * read_unlock(&tasklist_lock);
- * for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- * pgd[address >> PGDIR_SHIFT] = entry;
- * }
- */
+extern int do_check_pgt_cache(int, int);
/* tlb routines */
Index: pgtable.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgtable.h,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- pgtable.h 2001/07/21 11:58:51 1.13
+++ pgtable.h 2001/07/31 17:33:26 1.14
@@ -1,4 +1,7 @@
/* (c) 2001 Vax Porting Project, atp, kenn, airlied */
+
+/* FIXME: this is a mess its confusing and badly documented
+ * - needs cleaning up atp jul 2001 */
#ifndef _VAX_MM_PGTABLE_H
#define _VAX_MM_PGTABLE_H
@@ -20,6 +23,9 @@
/* the pagelet stuff */
#include <asm/mm/pgtable_pagelet.h>
+/* TASK address space sizing, for sizing SPT and so forth */
+#include <asm/mm/task.h>
+
/*
* See Documentation/vax/memory.txt
* for up to date memory layout
@@ -42,23 +48,29 @@
/* entries is (1024 * 1024) >> PAGELET_SIZE */
#define SPT_HWPTES_IOMAP (SPT_MAX_IOMAP<<1)
#define SPT_PTES_IOMAP (SPT_MAX_IOMAP >> 2)
- /*>> (PAGE_SHIFT-10)) */
+ /*/>> (PAGE_SHIFT-10)) */
/* FIXME: (PAGE_SHIFT-10) is hardwired here to 2. asm bug in head.S */
#define SPT_HWPTES_VMALLOC (SPT_MAX_VMALLOC << 1)
#define SPT_PTES_VMALLOC (SPT_MAX_VMALLOC >> 2)
#define SPT_BASE ((unsigned long)( (swapper_pg_dir[2]).br ))
-/* Length register is in words.. shift left 2 to get bytes */
-#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2)
+/* SPT_LEN can be an lvalue, and is the length in longwords */
+#define SPT_LEN ((unsigned long)( (swapper_pg_dir[2]).lr ))
+/* SPT_SIZE is the size in BYTES */
+#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2)
-/* I'm not sure these are ok. I've only tested the results of
- * These in the interrupt guard page routine in arch/vax/kernel/interrupt.c
- * if they are 4k ptes then set_pte needs to be used on the
- * results,
+/*
+ * Macros to get page table addresses + offsets.
+ *
+ * if they are 4k ptes then set_pte needs to be used on the results,
*/
+
/* macro to get linear page table entry for a physical address */
-#define GET_HWSPTE_PHYS(x) ((hwpte_t *)(SPT_BASE + ((x) >> (PAGELET_SHIFT-SIZEOF_PTR_LOG2))))
-#define GET_SPTE_PHYS(x) ((pte_t *)(SPT_BASE + ((x) >> (PAGE_SHIFT-SIZEOF_PTE_LOG2))))
+#define GET_HWSPTE_PHYS(x) ((hwpte_t *)(SPT_BASE + ( ((x) >> PAGELET_SHIFT) << SIZEOF_PTR_LOG2) ))
+
+/* this is like it is for a reason - we need to wipe out the lower bits, the old
+ * calculation using page_shift-sizeof_pte_log2 gave the wrong answer sometimes */
+#define GET_SPTE_PHYS(x) ((pte_t *)(SPT_BASE + ( ((x) >> PAGE_SHIFT) << SIZEOF_PTE_LOG2)))
/* macro to get linear page table entry for a virtual address
(only works for addresses in S0 space) */
@@ -76,23 +88,39 @@
space waste precious SPTEs.
*/
- /* the previous definition of VMALLOC START relied on the
- * VAX phy memory being an exact 4k multiple,
- * my VAX has 7f1f hw-pages so isn't aligned on 4K
- * workout the VMALLOC_START from the vmallocmap_base and the
- * system base register.-
- */
+/* the previous definition of VMALLOC START relied on the
+ * VAX phy memory being an exact 4k multiple,
+ * my VAX has 7f1f hw-pages so isn't aligned on 4K
+ * workout the VMALLOC_START from the vmallocmap_base and the
+ * system base register.-
+ */
+
+/* VMALLOC_OFFSET is the gap between the end of mapping of physical
+ * ram and the start of VMALLOC ?? */
#define VMALLOC_OFFSET (SPT_MAX_IOMAP * 1024)
- /*#define VMALLOC_START ((unsigned long) high_memory + VMALLOC_OFFSET)*/
#define VMALLOC_START (PAGE_OFFSET+((vmallocmap_base-swapper_pg_dir[2].br)<<(PAGELET_SHIFT-2)))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (VMALLOC_START + (SPT_MAX_VMALLOC * 1024))
+/* Start of task page table area - the variables this is based on
+ * are defined in asm-vax/mm/task.h */
+
+/* address in S0 space of where the process page table area starts and ends.*/
+#define TASKPTE_START PTE_TASK_ALIGN(VMALLOC_END)
+#define TASKPTE_END (TASKPTE_START+(PTE_TASK_SLOTSIZE * TASK_MAXUPRC))
+/* the number of hwptes to map this space */
+#define SPT_HWPTES_TASKPTE (((PTE_TASK_SLOTSIZE)>>PAGELET_SHIFT)*TASK_MAXUPRC)
+#define SPT_PTES_TASKPTE (SPT_HWPTES_TASKPTE >> 3)
+
+/* find a slot in the pagetable area for pgd (x), x is 0->TASK_MAXUPRC-1 */
+#define GET_TASKSLOT(x) (TASKPTE_START+((x) * PTE_TASK_SLOTSIZE))
+
+
/* page table for 0-4MB for everybody */
/* This is a c reference to the start of the system page table
* (see arch/vax/boot/head.S). The spt is initialised to cover physical
* memory by early boot code, based on VMB supplied information. Further
- * expansion happens later in the boot sequence */
+ * expansion happens later in the boot sequence in paging_init */
extern pte_t *pg0;
/* Number of SPTEs in system page table */
@@ -135,7 +163,7 @@
#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
#define SWP_OFFSET(x) ((x).val >> 8)
#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define pte_to_swp_entry(x) ((swp_entry_t) { (x).pte })
+#define pte_to_swp_entry(x) ((swp_entry_t) { pte_val(x) })
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
/* Memory sizing. You'll need to #include <asm/rpb.h> to get
Index: tlb.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/tlb.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- tlb.h 2001/02/15 01:17:23 1.1
+++ tlb.h 2001/07/31 17:33:26 1.2
@@ -16,7 +16,6 @@
* VAX hw ref manual pg 216. can use mtpr to either invalidate single
* (TBIS) or all (TBIA) TLB entries. In addition LDPCTX will
* invalidate all process virtual address translations.
- * FIXME: adopting sledgehammer (trust me i know what I'm doing) approach
*/
#define __flush_tlb() \
|