|
From: Andy P. <at...@us...> - 2001-07-31 17:28:30
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv32129/mm
Modified Files:
fault.c init.c pgalloc.c pgtable.c
Log Message:
New mm layer, start of signals implementation + other misc fixes
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/fault.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- fault.c 2001/06/26 18:59:00 1.6
+++ fault.c 2001/07/31 17:28:26 1.7
@@ -35,6 +35,7 @@
* it off to handle_mm_fault().
*
* reason:
+ * reason == 0 means kernel translation not valid fault in SPT.
* bit 0 = length violation
* bit 1 = fault during PPTE reference
* bit 2 = fault-on-read if 0, fault-on-write if 1
@@ -57,8 +58,9 @@
struct mm_struct *mm = NULL;
unsigned fixup;
-#if VAX_MM_DEBUG
- printk("mmfault: fault at %8X\n", address);
+#ifdef VAX_MM_DEBUG
+ printk("mmfault: fault at %8x, pc %8x, psl %8x, reason %8x\n",info->addr, info->pc, info->psl, info->reason);
+ printk("mmfault:p0br %8lx, p0lr %8lx, p1br %8lx, p1lr %8lx\n",Xmfpr(PR_P0BR),Xmfpr(PR_P0LR),Xmfpr(PR_P1BR),Xmfpr(PR_P1LR));
#endif
/* This check, and the mm != NULL checks later, will be removed
later, once we actually have a 'current' properly defined */
@@ -72,20 +74,27 @@
goto no_context;
down (&mm->mmap_sem);
+
vma = find_vma(mm, address);
+
if (!vma)
goto bad_area;
+
if (vma->vm_start <= address)
goto good_area;
+
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
+
if (expand_stack(vma, address))
goto bad_area;
+
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
+
if (reason & REASON_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -96,7 +105,8 @@
}
survive:
{
- int fault = handle_mm_fault(mm, vma, address, reason & REASON_WRITE);
+ int fault;
+ fault = handle_mm_fault(mm, vma, address, reason & REASON_WRITE);
if (!fault)
goto do_sigbus;
if (fault < 0)
@@ -111,10 +121,16 @@
*/
bad_area:
up(&mm->mmap_sem);
+ printk("\nStack dump\n");
+ hex_dump((void *)(regs->r1), 256);
+ show_regs(regs);
+ show_cpu_regs();
- if (user_mode(regs)) {
+ if (user_mode(regs)) {
printk("do_page_fault: sending SIGSEGV\n");
force_sig(SIGSEGV, current);
+ /* signals arent implemented yet */
+ machine_halt();
return;
}
@@ -212,13 +228,14 @@
struct accvio_info *info = (struct accvio_info *)excep_info;
static int active;
+
/* This active flag is just a temporary hack to help catch
accvios in the page fault handler. It will have to
go eventually as it's not SMP safe */
if (!active) {
/* active = 1;*/
do_page_fault(info, regs);
-#if VAX_MM_DEBUG
+#ifdef VAX_MM_DEBUG
printk("finished fault\n");
#endif
active = 0;
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/init.c,v
retrieving revision 1.14
retrieving revision 1.15
diff -u -r1.14 -r1.15
--- init.c 2001/06/26 18:59:00 1.14
+++ init.c 2001/07/31 17:28:26 1.15
@@ -14,15 +14,16 @@
#include <asm/pgalloc.h>
#include <asm/rpb.h>
-#undef VAX_INIT_DEBUG
+#define VAX_INIT_DEBUG
static unsigned long totalram_pages;
-unsigned long max_pfn; /* number of 4k pfns */
unsigned long empty_zero_page[PAGE_SIZE /
sizeof(unsigned long)]
__attribute__ ((__aligned__(PAGE_SIZE)));
+pte_t *pg0;
+
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
@@ -38,23 +39,51 @@
/*
* In other architectures, paging_init sets up the kernel's page tables.
- * In Linux/VAX, this is already done by the early boot code.
+ * In Linux/VAX, this is already done by the early boot code. For the
+ * physical RAM. In this routine we initialise the remaining areas of
+ * the memory, and system page table.
*/
void __init paging_init()
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
- unsigned int max_dma, max_norm;
+ hwpte_t *pte, *lastpte;
+ unsigned int ii;
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ /* sort out page table. */
+ pg0 = (pte_t *)SPT_BASE;
+
+ /* FIXME: This is where the VMALLOC stuff from head.S should go */
- /* max_pfn is the number of 4k ptes */
- if (max_pfn < max_dma) {
- zones_size[ZONE_DMA] = max_pfn;
- } else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = max_pfn - max_dma;
+ printk("VAXMM: Initialising mm layer for %d tasks of size %dMB\n",TASK_MAXUPRC,(TASK_WSMAX>>20));
+ /* Size the process page table slots. See asm/mm/task.h for details
+ * The _START and _END macros are from pgtable.h
+ * This is all in PAGELETS and HWPTES, hence no set_pte
+ */
+ pte = (hwpte_t *)GET_SPTE_VIRT(VMALLOC_END);
+ lastpte = (hwpte_t *)GET_SPTE_VIRT(TASKPTE_START);
+ ii=0;
+ /* clear this area */
+ while (pte<lastpte) {
+ *pte++ = __hwpte(0x00000000);
+ ii++;
+ }
+ /* this is stored in hwptes */
+ SPT_LEN += ii;
+
+ pte = (hwpte_t *)GET_SPTE_VIRT(TASKPTE_START);
+ lastpte = pte + SPT_HWPTES_TASKPTE;
+ /* clear this area */
+ while (pte<lastpte) {
+ *pte++ = __hwpte(0x00000000);
}
- free_area_init(zones_size);
+ /* this is stored in hwptes */
+ SPT_LEN += SPT_HWPTES_TASKPTE;
+ __mtpr(SPT_LEN, PR_SLR);
+ flush_tlb();
+
+ printk("VAXMM: system page table base %8lx, length (bytes) %8lx length (ptes) %8lx\n",SPT_BASE,SPT_SIZE,SPT_LEN);
+
+ /* clear the quicklists structure */
+ memset(&quicklists,0,sizeof(quicklists));
}
#if DEBUG_POISON
@@ -107,73 +136,6 @@
}
printk("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
-}
-
-
-/* page table stuff */
-
-/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable.
- * See comments in include/asm/mm/pgalloc.h for get_pte_kernel.
- */
-pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address)
-{
- return (pte_t *) NULL;
-}
-
-/* just want a page here - quite simple */
-pte_t *get_pte_slow(pmd_t * pmd, unsigned long address)
-{
- unsigned long pte;
-
- pte = (unsigned long) __get_free_page(GFP_KERNEL);
-
- if (pte) {
- return (pte_t *) pte;
- } else {
- return NULL;
- }
- return NULL;
-}
-
-/* remap a given page to be part of a contiguous page table for p0/1 space */
-void remap_and_clear_pte_page(pgd_t *pagetable, pte_t *page, unsigned long pte_page)
-{
- unsigned long page_physical_address, page_virtual_address, page_s0_address;
- pte_t *S0pte;
- pte_t tpte;
- pte_t *newpage;
- pgd_t *sys_pgd = swapper_pg_dir+2;
- /* address in S0 space is page pointer */
- /* find the entry in the SPTE corresponding to this page */
-
- page_physical_address=__pa(page);
- S0pte = pte_offset(sys_pgd, page_physical_address);
-
- /* S0pte = (pte_t *)((__pa(page) >> PAGELET_SHIFT)+sys_pgd->br);*/
-
-#if VAX_INIT_DEBUG
- printk("remap: virt addr %p, pteval %8lX , S0pte %p, %8lX\n", page, pte_val(*page), S0pte, pte_val(*S0pte));
-#endif
- if (!pte_present(*S0pte))
- {
- unsigned long phy_addr;
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- /* we have address in S0 of free page */
- /* need to remap this free page to the address in S0 where we are */
- /* Get the phy address of the page */
- //spte = pte_offset(sys_pgd, ((unsigned long)ret - PAGE_OFFSET));
- clear_page((void *)ret);
- pte_clear(S0pte);
- tpte = __mk_pte((void *)ret, (pgprot_t)PAGE_KERNEL);
-
- set_pte(S0pte, tpte);
- /* grab a free page */
- printk("S0 page invalid, %p %8lX\n", ret, pte_val(tpte));
- return;
- }
- /* zero out these pte's */
- // clear_page((void *) page);
- return;
}
void
Index: pgalloc.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgalloc.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- pgalloc.c 2001/06/27 09:17:41 1.1
+++ pgalloc.c 2001/07/31 17:28:26 1.2
@@ -3,23 +3,294 @@
* pgalloc.c Routines from include/asm-vax/mm/pgalloc.h
* Allocation of page table entries and so forth.
*
- * Copyright atp Jun 2001
+ * Copyright atp Jun 2001 - complete rewrite.
+ *
* GNU GPL
*/
#include <linux/sched.h>
#include <linux/mm.h>
-
+#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+
+#undef VAX_MM_PGALLOC_DEBUG
+
+/*
+ * allocate a pgd. We don't - at present, need to worry about
+ * maintaining a bit map as we put pgds that are finished with
+ * on our quicklists pool
+ */
+pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ pgtable_cache_size--;
+ }
+ return (pgd_t *)ret;
+}
+
+/* allocate a pgd */
+pgd_t *pgd_alloc(void)
+{
+ /* this is rather wasteful, as only a few longwords are
+ * used in the entire 4kb page. Perhaps we can do something
+ * smarter here by using the quicklists to pack the pgds into
+ * a single page. */
+ pgd_t *ret;
+ unsigned long taskslot;
+
+ /* grab a pgd off the cache */
+ ret = get_pgd_fast();
+
+ if (!ret) {
+ /* check if we have run out of balance slots */
+ if (pgd_slots_used >= TASK_MAXUPRC) return (pgd_t *)NULL;
+ ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ taskslot = GET_TASKSLOT(pgd_slots_used);
+ /* one more slot used */
+ pgd_slots_used++;
+ ret[0].pmd = 0; /* These are blank */
+ ret[1].pmd = 0;
+
+ } else {
+ /* pgd_clear keeps this */
+ taskslot=ret->slot;
+ }
+
+ if (ret) {
+
+ /* set the values of the base + length registers */
+ ret[0].br = taskslot+ (P0PTE_OFFSET); /* skip the PMD */
+ ret[0].lr = 0x0;
+ /* this comes in handy later */
+ ret[0].slot = taskslot;
+ /* p1br points at what would be page mapping 0x40000000 */
+ ret[1].br = taskslot+ (P1PTE_OFFSET) - 0x800000 ;
+ /* This is the unmapped number of PTEs */
+ ret[1].lr = 0x40000;
+ ret[1].slot = taskslot;
+
+ ret[0].segment = 0;
+ ret[1].segment = 1;
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pgd_alloc: p0: %8lX, %8lX, p1: %8lX, %8lx, slot %ld, taskslot %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr, pgd_slots_used-1, ret[0].slot);
+#endif
+ /* set the s0 region, from the master copy in swapper_pg_dir */
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
+}
+
+void pgd_clear(pgd_t * pgdp)
+{
+ /* wipe a pgd structure carefully -- this is probably overkill */
+ pgdp->pmd=0;
+ pgdp->pmd2=0;
+
+ if (pgdp->segment) {
+ /* p1br points at what would be page mapping 0x40000000 */
+ pgdp->br = pgdp->slot+ (P1PTE_OFFSET) - 0x800000 ;
+ /* This is the unmapped number of PTEs */
+ pgdp->lr = 0x40000;
+ } else {
+ pgdp->br = pgdp->slot+ (P0PTE_OFFSET); /* skip the PMD */
+ pgdp->lr = 0x0;
+ }
+}
+
+/* bit of a null op - grab a page off the list - pmd_alloc does the real work */
+pmd_t *get_pmd_slow(void)
+{
+ return (pmd_t *) __get_free_page(GFP_KERNEL);
+}
+
+/* allocate a 'pmd'. In fact we will set it here too, to avoid confusion */
+pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ /* we want to allocate two pages and remap them into the
+ * appropriate pmd slot in the taskslot. */
+
+ unsigned int is_p1;
+ pmd_t *pmdpage;
+ pmd_t *s0addr;
+ unsigned long adjaddr;
+
+ is_p1 = pgd->segment;
+ adjaddr = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+
+ /* sanity check */
+ /* FIXME: is this pgd_none? */
+ if (pgd->pmd) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already allocated page (pgd=%8p,pmd=%8lx)\n",pgd,pgd->pmd);
+#endif
+ return (pmd_t *)pgd->pmd+adjaddr;
+ }
+
+ /* grab the first page */
+ pmdpage = get_pmd_fast();
+
+ if (!pmdpage) {
+ /* didnt work */
+ pmdpage = get_pmd_slow();
+ }
+ if (!pmdpage) {
+ /* didnt work again - give up */
+ printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
+ return NULL;
+ }
+ /* calculate which bit of the page table area this page fits into */
+
+ s0addr = (pmd_t *)pgd->slot; /* base of the slot */
+
+ s0addr += (is_p1) ? (P1PMD_OFFSET/sizeof(pmd_t)): (P0PMD_OFFSET/sizeof(pmd_t));
+
+ /* remap and clear this page */
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
+
+ /* this is the first page in our pmd table. */
+ pgd->pmd=(unsigned long)s0addr;
+
+ /* now, do the same for the second */
+ pmdpage = get_pmd_fast();
+
+ if (!pmdpage) {
+ pmdpage = get_pmd_slow();
+ }
+ if (!pmdpage) {
+ printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
+ free_pmd_fast(get_pageaddr_from_pte(pgd->pmd));
+ remap_pte_invalidate(pgd->pmd);
+ return NULL;
+ }
+
+ s0addr += (PAGE_SIZE/sizeof(pmd_t));
-/* misc comments FIXME: sort and discard */
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
+ /* and the second page in our pmd table. */
+ pgd->pmd2=(unsigned long)s0addr;
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8lx, pgd->pmd2 %8lx\n",pgd,pgd->br, pgd->lr, pgd->pmd,pgd->pmd2);
+#endif
+ /* pages allocated, now store the backpointer we need in pte_alloc
+ * in the last slot in the address slot. Comfortably beyond where
+ * we expect to really be allocating memory. */
+ pmdpage = (pmd_t *)pgd->pmd;
+
+ /* FIXME: I _really_ dont like this flag. */
+ pmd_val(pmdpage[PGD_SPECIAL]) = (unsigned long)pgd | 0x1;
+
+ return (pmd_t *) pgd->pmd+adjaddr;
+}
+
+/* This inverts the remapping done in remap_and_clear */
+unsigned long get_pageaddr_from_pte(pte_t *ptep)
+{
+ unsigned long addr;
+ pte_t *s0pte;
+
+ s0pte = GET_SPTE_VIRT(ptep);
+
+ addr = (unsigned long)(((pte_val(*s0pte)&PAGELET_PFN_MASK)<<PAGELET_SHIFT)|PAGE_OFFSET);
+// printk("get_pageaddr: ptep %p, spte %8lx, *spte %8lx, addr %8lx\n",ptep,s0pte,pte_val(*s0pte),addr);
+ return addr;
+}
+
+/* free a 'pmd'. */
+void pmd_free(pmd_t *pmd)
+{
+ pmd_t *pmdp;
+ pmdp = pmd+(PAGE_SIZE/4);
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pmd_free: freeing pmd %p, pmd2 %p\n",pmd,pmdp);
+#endif
+ free_pmd_slow(get_pageaddr_from_pte(pmdp));
+ free_pmd_slow(get_pageaddr_from_pte(pmd));
+ /* invalidate the S0 ptes that map this */
+ remap_pte_invalidate(pmd);
+ remap_pte_invalidate(pmdp);
+}
+
+/* remap a given page to be part of a contiguous page table for p0/1 space
+ *
+ * This is like remap_pte_range in memory.c but VAX specific
+ *
+ * s0addr is the address in S0 space that we need to remap the page
+ * pointed to pte_page to. We also clear the page pointed at by pte_page
+ */
+void remap_and_clear_pte_page(pmd_t *s0addr, pte_t *pte_page)
+{
+
+ pte_t *s0pte;
+
+ /* sanity checks */
+ if (!s0addr) {
+ printk(KERN_ERR "VAXMM: null S0 address in remap_and_clear_pte_page!\n");
+ return;
+ }
+ if (!pte_page) {
+ printk(KERN_ERR "VAXMM: null pte_page in remap_and_clear_pte_page!\n");
+ return;
+ }
+
+ /* locate the S0 pte that describes the page pointed to by s0addr */
+
+ s0pte = GET_SPTE_VIRT(s0addr);
+
+ /* is it already pointing somewhere? */
+#ifdef VAX_MM_PGALLOC_DEBUG
+ if (pte_present(*s0pte)) {
+ printk(KERN_DEBUG "VAXMM: S0 pte %8p already valid in remap_and_clear_pte_page??\n",s0pte);
+ }
+#endif
+
+ page_clear(pte_page);
+
+ /* zap the map */
+ set_pte(s0pte,__mk_pte(pte_page,__pgprot(_PAGE_VALID|_PAGE_KW)));
+// print_pte(s0pte);
+ flush_tlb_all();
+
+}
+
+/* invalidate the S0 pte that was remapped to point at this page */
+void remap_pte_invalidate(pmd_t *s0addr)
+{
+ pte_t *s0pte;
+
+ /* sanity checks */
+ if (!s0addr) {
+ printk(KERN_ERR "VAXMM: null S0 address in remap_and_clear_pte_page!\n");
+ return;
+ }
+ /* locate the S0 pte that describes the page pointed to by s0addr */
+
+ s0pte = GET_SPTE_VIRT(s0addr);
+
+ set_pte(s0pte, pte_mkinvalid(*s0pte));
+// print_pte(s0pte);
+ /* FIXME: these flush_tlb_alls need replacing with flush_tlb_8 */
+ flush_tlb_all();
+
+// __flush_tlb_one(s0addr);
+}
-/* It is called from mm/vmalloc.c in alloc_area_pmd()
+/*
+ * Notes on pte_alloc_kernel()
+ *
+ * It is called from mm/vmalloc.c in alloc_area_pmd()
*
- * It needs to be physically contiguous with the rest of the
+ * Any extension to the SPT needs to be physically contiguous with the rest of the
* system page table.
* Things to consider:
- * If you want to allocate a pagetable to hold a pte for a given
+ * If you want to allocate a page to hold a pte for a
* new S0 address, (where this address is higher than SBR+SLR) then that
* new page table page must be allocated at the exact physical page
* That maps that S0 address. I.e.
@@ -27,17 +298,22 @@
* page containing the address
*
* PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
+ *
* PTE address (physical memory) = (PFN*4)+SBR
* Physical page address = (PTE address) & ~(PAGE_MASK)
* SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
*
- *
* If that physical page is already occupied, the contents must
* be ejected. This takes time, and can lead to deadlock, particularly
* if a dirty page needs to be written to disk/swap.
* Also, any physical pages that are in between the previous end of the
* system page table, and the new end (SBR+SLR) will need to be cleared,
* otherwise random rubbish will end up in the system page table.
+ *
+ * This requirement of a contiguous range of physical pages, at a precise
+ * address range is hard to meet on a system that has been running for any
+ * length of time.
+ *
* One way to do this by "locking up the machine", moving the contents
* of the physical pages needed to pages on the freelist, rewriting the PTE's
* to point at the new physical pages, and then allocating and expanding
@@ -53,119 +329,210 @@
*
* - we still need to implement this ... linux still calls it ..
* - D.A. May 2001.
+ *
+ * - Indeed, however, the implementation is still not obvious to me.
+ * atp July 2001.
*/
+
+
pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
pgd_t *pgdptr = (pgd_t *)pmd;
/* note the lr in the system pgd is in PAGELETS.... shift it down to
give page view */
+//printk("pte_allock: pmd, %p, address %8lx\n",pmd,address);
if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
return pte_offset(pmd, address);
else
return NULL;
}
+
/*
- * allocate a page, to hold page table entries.
- * for a user process.
+ * Allocate a page, to hold page table entries for a user process.
+ *
* We grab a random page. The only catch is that it must be virtually
* contiguous within the P0 or P1 page tables, which are held in S0
* space. So, we remap the page table area in S0 space too.
+ *
+ * The idea here is that a given task has an area in kernel
+ * address space that is TASK_WSMAX+TASK_STKSIZE in size (plus a few other bits).
+ * This space is initially unmapped. If the process needs to expand its page table
+ * (by mapping a page beyond the end of the relevant process page table)
+ * It can as long as it doesnt go beyond TASK_WSMAX in P0 and TASK_STKSIZE in P1.
+ * See asm-vax/mm/task.h for details.
+ *
+ * We make use of the knowledge that the pmd is a single block, to work back
+ * to the pgd, which is where the base and length register values are held.
+ *
+ * pmd is a pointer to the slot in our bogus pmd table we want to use.
*/
+
+
pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- pte_t *newpte= NULL;
-
- printk("pte_alloc: address %ld\n", address);
- /* Deal with P0 vs P1 spaces */
- /* need to handle error cases */
- if (address < 0x40000000)
- {
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) < (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
+ pgd_t *pgdp;
+ pmd_t *pmd_basep, *s0addr;
+ unsigned long int current_last_page,is_p1,target_page,npages,pte_number,adjusted_address;
+ pte_t *pte_page;
+ pmd_t *pmdi;
+ long int direction,ii;
+
+ pmd_basep = ((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pte_alloc: pmd_basep %8lx, pmd %8lx, pmd_val %8lx, address %8lx, pmd_index %8lx\n",pmd_basep,pmd,pmd_val(*pmd),address,pmd_index(address));
+#endif
+ pgdp = (pgd_t *)(pmd_val(pmd_basep[PGD_SPECIAL]) & ~0x1);
+
+
+ /* FIXME: should test pgdp. this is pointless otherwise */
+ if ((!pgdp)||(pgd_none(*pgdp))) {
+ printk(KERN_ERR "VAXMM: Bad PGD (%8p, from pmd %8p) in pte_alloc\n",pgdp,pmd_basep);
+ return NULL;
+ }
+ if (pgdp->pmd != (unsigned long)pmd_basep) {
+ printk(KERN_ERR "VAXMM: Mismatched PGD (%8p, has pmd %8lx from pmd %8p) in pte_alloc\n",pgdp,pgdp->pmd,pmd_basep);
+ return NULL;
+ }
+
+ is_p1=pgdp->segment;
+
+// printk(KERN_DEBUG "ptealloc:pgd %8p, pgd->segment %ld, pgd->br %8lx, pgd->lr %lx, pgd->slot %8lx\n",pgdp,pgdp->segment,pgdp->br,pgdp->lr,pgdp->slot);
+
+ /* make an adjusted address + calculate linear page table entry */
+
+ adjusted_address = (((pmd-pmd_basep))<<(PAGE_SHIFT+7))+ (address&~PMD_MASK);
+ if (is_p1){
+ adjusted_address |= 0x40000000;
+ pte_number = (adjusted_address - 0x40000000) >> PAGE_SHIFT;
+ } else {
+ pte_number = (adjusted_address>>PAGE_SHIFT);
+ }
+
+ /* FIXME: check against WSMAX */
+
+ /* check that the pte we want isnt already allocated */
+ if (is_p1) {
+ if ((pte_number) > (pgdp->lr)) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
+#endif
+ return pte_offset(pmd, adjusted_address);
}
- }
- else
- {
- address-=0x40000000;
- pte_number = (address>>PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) > (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
+ } else {
+ if ((pte_number) < (pgdp->lr)) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
+#endif
+ return pte_offset(pmd, adjusted_address);
+ }
+ }
+
+ /* find the current last page in the page table */
+ current_last_page = (pgdp->lr >> 7) - 1; /* 128 PTE's per page */
+ target_page = pmd_index(adjusted_address);
+
+ if (is_p1) {
+ npages = current_last_page - target_page + 1;
+ /* The s0 address of the current end page in the page table is
+ * current_last_page * 128 ptes/page * 32 bytes/pte_t + base reg */
+
+ s0addr = (((current_last_page)<<7)*BYTES_PER_PTE_T)+pgdp->br;
+ direction = -1;
+ pmdi = pmd_basep+(current_last_page);
+ } else {
+ npages = target_page - current_last_page;
+ s0addr = (((current_last_page + 1)<<7)*BYTES_PER_PTE_T)+pgdp->br;
+ direction = 1;
+ pmdi = pmd_basep+(current_last_page + 1);
+ }
+ for (ii=0; ii<npages; ii++) {
+ if (!(pte_page=pte_alloc_one(pmdi))) {
+ printk(KERN_ERR "VAXMM: Unable to expand process page table (pgd=%8p)\n",pgdp);
+ return NULL;
}
+
+ /* remap and clear this page */
+ remap_and_clear_pte_page(s0addr, pte_page);
+
+ /* set the pmd */
+ pmd_val(*pmdi) = (unsigned long) s0addr;
+
+ /* increment/decrement length register. */
+ pgdp->lr += (direction*128);
+ s0addr += (direction * (PAGE_SIZE>>2));
+ pmdi += direction;
+
+ }
+
+ /* if task == current, the hw registers need to be set */
+ if (is_p1) {
+ if (current->thread.pcb.p1br == pgdp->br) {
+ current->thread.pcb.p1lr = pgdp->lr * 8;
+ set_vaxmm_regs_p1(pgdp);
+
+ }
+ } else {
+ if (current->thread.pcb.p0br == pgdp->br) {
+ current->thread.pcb.p0lr = pgdp->lr * 8;
+ set_vaxmm_regs_p0(pgdp);
+ }
+ }
+ /* we flush tlb anways as we have touched S0 page tables */
+ flush_tlb_all();
+ return pte_offset(pmd, adjusted_address);
+
+} /* pte_alloc */
- }
+/* allocate a page for the page table */
+pte_t * pte_alloc_one(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page = get_pte_fast();
+
+ if (!page) return get_pte_slow();
+ return page;
+ }
+ return (pte_t *) pmd_val(*pmd);
+}
- if (newpte)
- {
- remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
- /* make sure a page in S0 space is mapped */
+/* free the page after recovering the original address */
+void pte_free(pte_t *pte)
+{
+ free_pte_fast(get_pageaddr_from_pte(pte));
+ /* invalidate the S0 pte that maps this */
+ remap_pte_invalidate(pte);
+}
- }
- return newpte;
+/* Find an entry in the third-level page table.. */
+pte_t * pte_offset(pmd_t * dir, unsigned long address)
+{
+ return (pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2));
+}
- /* old 2.2 code commented out for now .. in case it is of any use
- to anyone later - D.A. May 2001 */
-#if 0
- /* calculate the offset of the requested pte in this pagetable page */
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- unsigned long t2;
- pte_t *page;
-
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
-
- /* do we have a pgd base and length set ? */
- /* The p0br and p1br should be setup at process initialisation. */
- if (pmd_none(*pmd)) {
- printk("Got pmd_none\n");
- return NULL;
- }
-
- /* do we need to allocate another page(s) */
-
-
- /* this is already inside the page table region, and allocated */
- /* return the virtual address of the pte. (base registers for p0 and p1 */
- /* refer to virtual addresses in S0 space) so no _va() is needed */
- if (pte_number < (pgdptr->lr)) {
- return (pte_t *) (pgdptr->br + pte_number*BYTES_PER_PTE_T);
- }
-
- /* The address lies outside the current page table - by how much?*/
- /* FIXME: Maximum task size, defined by max p0 pagetable size */
-
- /* number of pages to allocate */
- t2 = ((pte_number - pgdptr->lr) >> SIZEOF_PTE_LOG2) + 1 ;
-
- while (t2--) {
- /* grab a page off the quicklist */
- page = get_pte_fast();
- /* or allocate a new one if none left */
- if (!page) page = get_pte_slow(pmd, address);
-
- /* run out of pages - out of memory */
- /* FIXME: is there anything else we need to do to signal failure?*/
- if (!page) {
- printk("%s:%d: run out of free pages building page table at pte %08lx.\n", __FILE__, __LINE__, pgdptr->lr);
- return NULL;
- }
- /* map this page into the S0 page table at the right point */
- remap_and_clear_pte_page((pgd_t *)pmd, page, pte_page);
-
- /* add this page of PTEs to the length register */
- /* FIXME: handle reverse P1 region... */
- pgdptr->lr += PTRS_PER_PTE;
- }
- return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
-#endif /* if 0 */
+/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable.
+ * See comments in include/asm/mm/pgalloc.h for get_pte_kernel.
+ */
+pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address)
+{
+ return (pte_t *) NULL;
}
+
+/* just want a page here - quite simple */
+/* bit of a null op - grab a page off the list - pte_alloc does the real work */
+pte_t *get_pte_slow(void)
+{
+ unsigned long pte;
+
+ pte = (unsigned long) __get_free_page(GFP_KERNEL);
+
+ if (pte) {
+ return (pte_t *) pte;
+ } else {
+ return NULL;
+ }
+ return NULL;
+}
+
Index: pgtable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgtable.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- pgtable.c 2001/05/19 12:01:02 1.5
+++ pgtable.c 2001/07/31 17:28:26 1.6
@@ -1,13 +1,15 @@
/*
* $Id$
*
- * handle bits of VAX memory management
+ * handle bits of VAX memory management
+ * atp 2000
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
/* Note the factor of 8 in the length registers */
void set_page_dir(struct task_struct * tsk, pgd_t * pgdir)
@@ -22,10 +24,7 @@
/* This doesnt sound like a great idea... perhaps setipl(31) would
be a good idea here */
if (tsk == current) {
- __mtpr(tsk->thread.pcb.p0br, PR_P0BR );
- __mtpr(tsk->thread.pcb.p0lr, PR_P0LR );
- __mtpr(tsk->thread.pcb.p1br, PR_P1BR );
- __mtpr(tsk->thread.pcb.p1lr, PR_P1LR );
+ set_vaxmm_regs(pgdir);
flush_tlb_all();
}
}
|