From: Kenn H. <ke...@us...> - 2002-12-02 01:08:16
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/mm In directory sc8-pr-cvs1:/tmp/cvs-serv19236/arch/vax/mm Modified Files: pgalloc.c Log Message: 2.5.5 uses struct page * pointers to refer to pages in process page tables (to allow PTEs in HIGHMEM on x86). It also does away with the pgtable_cache quicklists. Dealing with these changes triggered quite a few changes in the VAX MM code. While working on this, I found that our existing page table allocation and management code wasn't very clear. So I've taken the opportunity to clean things up. o pgd_val(), __pgd(), pmd_val() and __pmd() are now gone. The MM core doesn't use them, and there is no reason why VAX-specific code should use them. After all, we have to be intimately familiar with the actual contents of pgd_t and pmd_t. pmd_val() and friends just obscure rather than clarify and are very type-unsafe. The pmd_t structure now contains a pte_t * instead of an unsigned long. This allows us to remove a lot of ugly casts. (Look at pte_offset() for example.) o PMDs are no longer allocated via a quicklist. They are allocated and freed directly via alloc_pages() and __free_pages(). o The job of invalidating the SPTE for a page in a process page table is now done by pmd_clear(). We used to do this in pte_free(), but we can't any more (and we shouldn't anyway), since pte_free() only gets a struct page * now, so we can't tell where in the process page table this page lies. pmd_clear() gets the address of the PMD entry, which then points to the PTE page that we need. o PTE pagesare no longer allocated via a quicklist, but via alloc_page() and __free_page(). o The old quicklist structure is now pgd_free_list, since this better describes its current function (holding previously allocated PGDs, which makes balance slot allocation simpler). pgd_t gets a .next member to simply free list insertion and removal. o pmd_populate() is now much simpler. We don't try to fill holes in page tables anymore. So each call to pmd_populate() only has to deal with one page (which the core already allocates for us). o Checks against WSMAX and STKMAX are now done in pte_alloc_one(), since the user address that we need is handed directly to pte_alloc_one(), whereas pmd_populate() would have to calculate it. The MM core can deal with pte_alloc_one() returning NULL, so calls to brk() or mmap() that would go beyond WSMAX will get ENOMEM rather than a SEGV. o pmd_populate_S0() is gone, since it's a bug if anything tries to expand our system page table. Index: pgalloc.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/mm/pgalloc.c,v retrieving revision 1.5 retrieving revision 1.6 diff -u -r1.5 -r1.6 --- pgalloc.c 31 May 2002 01:58:38 -0000 1.5 +++ pgalloc.c 2 Dec 2002 01:08:08 -0000 1.6 @@ -13,8 +13,8 @@ * License: GNU GPL */ -#include <linux/sched.h> #include <linux/mm.h> +#include <linux/slab.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -29,69 +29,75 @@ * maintaining a bit map as we put pgds that are finished with * on our quicklists pool */ -pgd_t *get_pgd_fast(void) +static inline pgd_t *get_pgd_fast(void) { - unsigned long *ret; + pgd_t *pgd; - if ((ret = pgd_quicklist) != NULL) { - pgd_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - pgd_cache_size--; + if ((pgd = pgd_free_list.head) != NULL) { + pgd_free_list.head = pgd->next; + pgd->next = NULL; + pgd_free_list.size--; } - return (pgd_t *)ret; + return pgd; } /* allocate a pgd */ pgd_t *pgd_alloc(struct mm_struct *mm) { - /* this is rather wasteful, as only a few longwords are + /* this is rather wasteful, as only a few longwords are * used in the entire 4kb page. Perhaps we can do something * smarter here by using the quicklists to pack the pgds into * a single page. */ - pgd_t *ret; - unsigned long taskslot; + pgd_t *pgd; + unsigned long taskslot; - /* grab a pgd off the cache */ - ret = get_pgd_fast(); + /* grab a pgd off the cache */ + pgd = get_pgd_fast(); - if (!ret) { - /* check if we have run out of balance slots */ - if (pgd_slots_used >= TASK_MAXUPRC) return (pgd_t *)NULL; - ret = (pgd_t *)__get_free_page(GFP_KERNEL); - taskslot = GET_TASKSLOT(pgd_slots_used); - /* one more slot used */ - pgd_slots_used++; - ret[0].pmd = 0; /* These are blank */ - ret[1].pmd = 0; - - } else { - /* pgd_clear keeps this */ - taskslot=ret->slot; - } - - if (ret) { - - /* set the values of the base + length registers */ - ret[0].br = taskslot+ (P0PTE_OFFSET); /* skip the PMD */ - ret[0].lr = 0x0; - /* this comes in handy later */ - ret[0].slot = taskslot; - /* p1br points at what would be page mapping 0x40000000 (i.e. the _end_ of the slot)*/ - ret[1].br = taskslot+ (P1PTE_OFFSET) - 0x800000 ; - /* This is the unmapped number of PTEs */ - ret[1].lr = 0x40000; - ret[1].slot = taskslot; + if (!pgd) { + /* check if we have run out of balance slots */ + if (pgd_free_list.slots_used >= TASK_MAXUPRC) return NULL; + + pgd = kmalloc(sizeof(pgd_t) * PTRS_PER_PGD, GFP_KERNEL); + if (!pgd) return NULL; + + memset(pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + + taskslot = GET_TASKSLOT(pgd_free_list.slots_used); + /* one more slot used */ + pgd_free_list.slots_used++; + + pgd[0].pmd = 0; /* These are blank */ + pgd[1].pmd = 0; - ret[0].segment = 0; - ret[1].segment = 1; + } else { + /* pgd_clear keeps this */ + taskslot=pgd->slot; + } -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM:pgd_alloc: p0: %8lX, %8lX, p1: %8lX, %8lx, slot %ld, taskslot %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr, pgd_slots_used-1, ret[0].slot); + if (pgd) { + + /* set the values of the base + length registers */ + pgd[0].br = taskslot+ (P0PTE_OFFSET); /* skip the PMD */ + pgd[0].lr = 0x0; + /* this comes in handy later */ + pgd[0].slot = taskslot; + /* p1br points at what would be page mapping 0x40000000 (i.e. the _end_ of the slot)*/ + pgd[1].br = taskslot+ (P1PTE_OFFSET) - 0x800000 ; + /* This is the unmapped number of PTEs */ + pgd[1].lr = 0x40000; + pgd[1].slot = taskslot; + + pgd[0].segment = 0; + pgd[1].segment = 1; + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM:pgd_alloc: p0: %8lX, %8lX, p1: %8lX, %8lx, slot %ld, taskslot %8lx\n", pgd[0].br, pgd[0].lr, pgd[1].br, pgd[1].lr, pgd_free_list.slots_used-1, pgd[0].slot); #endif - /* set the s0 region, from the master copy in swapper_pg_dir */ - memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + /* set the s0 region, from the master copy in swapper_pg_dir */ + memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } - return ret; + return pgd; } void pgd_clear(pgd_t * pgdp) @@ -111,6 +117,79 @@ } +/* remap a given page to be part of a contiguous page table for p0/1 space + * + * This is like remap_pte_range in memory.c but VAX specific. It's called + * when we're creating part of a process page table. A new, blank page + * has just been allocated and we want to use this page to back part of + * the process page table. This will result in this new page being + * double-mapped. One mapping will be its 'identity' mapping where + * VIRT = (PHYS + PAGE_OFFSET). The other mapping will be in the middle + * of the process page table. + * + * s0addr is the address in S0 space that we need to remap the page + * pointed at by pte_page to. + * + * This is also called to remap the two pages in our page middle directory. + * + */ +static void remap_pgtable_page(void *s0addr, struct page *page) +{ + pte_t *s0pte; + + /* sanity checks */ + if (!s0addr) { + vaxpanic("VAXMM: null S0 address in remap_pgtable_page!\n"); + return; + } + if (!page) { + vaxpanic("VAXMM: null pte_page in remap_pgtable_page!\n"); + return; + } + + /* locate the S0 pte that describes the page pointed to by s0addr */ + + s0pte = GET_SPTE_VIRT(s0addr); + + /* is it already pointing somewhere? */ +#ifdef VAX_MM_PGALLOC_DEBUG + if (pte_present(*s0pte)) { + printk(KERN_DEBUG "VAXMM: S0 pte %8p already valid in remap_pgtable_page??\n",s0pte); + } + printk(KERN_DEBUG "VAXMM: mapping PTE page %p at %p\n", page, s0addr); +#endif + + /* zap the map */ + set_pte(s0pte,mk_pte(page, __pgprot(_PAGE_VALID|_PAGE_KW))); + + flush_tlb_all(); +} + +/* invalidate the S0 pte that was remapped to point at this page in the + process page table or the page middle directory */ +static void unmap_pgtable_page(void *page) +{ + pte_t *s0pte; + + /* sanity checks */ + if (!page) { + vaxpanic(KERN_ERR "VAXMM: null S0 address in unmap_pgtable_page!\n"); + return; + } + /* locate the S0 pte that describes the page pointed to by pte_page */ + + s0pte = GET_SPTE_VIRT(page); + +#ifdef VAX_MM_PGALLOC_DEBUG + printk("unmap_pgtable_page: s0addr %p, s0pte %p\n", page, s0pte); +#endif + + set_pte(s0pte, pte_mkinvalid(*s0pte)); + /* FIXME: these flush_tlb_alls need replacing with flush_tlb_8 */ + flush_tlb_all(); +// __flush_tlb_one(s0addr); +} + /* we used to call this routine pmd_alloc. At vn 2.4.3 pmd_alloc got removed * to include/linux/mm.h, and we have now pgd_populate and pmd_populate. * this is pgd_populate */ @@ -138,12 +217,12 @@ is_p1 = pgd->segment; #ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM: Calling pgd_populate with (mm=%8p, pgd=%8p, pmd=%8lx\n",mm,pgd,pgd->pmd); + printk(KERN_DEBUG "VAXMM: Calling pgd_populate with (mm=%8p, pgd=%8p, pmd=%8p\n",mm,pgd,pgd->pmd); #endif /* sanity check */ if (pgd->pmd) { #ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already allocated page (pgd=%8p,pmd=%8lx)\n",pgd,pgd->pmd); + printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already allocated page (pgd=%8p,pmd=%8p)\n",pgd,pgd->pmd); #endif return; } @@ -153,7 +232,8 @@ s0addr += (is_p1) ? (P1PMD_OFFSET/sizeof(pmd_t)): (P0PMD_OFFSET/sizeof(pmd_t)); /* remap and clear the first page */ - remap_and_clear_pte_page(s0addr, (pte_t *)pmd); + clear_page(pmd); + remap_pgtable_page(s0addr, virt_to_page(pmd)); /* this is the pointer to our pmd table. */ pgd->pmd=s0addr; @@ -162,386 +242,152 @@ s0addr += (PAGE_SIZE/sizeof(pmd_t)); pmd += (PAGE_SIZE/sizeof(pmd_t)); - remap_and_clear_pte_page(s0addr, (pte_t *)pmd); + clear_page(pmd); + remap_pgtable_page(s0addr, virt_to_page(pmd)); #ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8lx\n",pgd,pgd->br, pgd->lr, pgd->pmd); + printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8p\n",pgd,pgd->br, pgd->lr, pgd->pmd); #endif return; } -/* This inverts the remapping done in remap_and_clear */ -pte_t *get_pageaddr_from_pte(pte_t *ptep) -{ - pte_t *addr; - pte_t *s0pte; - - s0pte = GET_SPTE_VIRT(ptep); - - addr = (pte_t *)(((pte_val(*s0pte)&PAGELET_PFN_MASK)<<PAGELET_SHIFT)|PAGE_OFFSET); - return addr; -} - -/* free a 'pmd'. */ -void pmd_free(pmd_t *pmd) -{ - pmd_t *pmdp; - pmdp = pmd+(PAGE_SIZE/sizeof(pmd_t)); -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM:pmd_free: freeing pmd %p\n",pmd); -#endif - /* This is a double page block */ - free_pmd_fast((pmd_t *)get_pageaddr_from_pte((pte_t *)pmd)); - /* invalidate the S0 ptes that map this, one per page */ - remap_pte_invalidate(pmd); - remap_pte_invalidate(pmdp); -} - -/* remap a given page to be part of a contiguous page table for p0/1 space - * - * This is like remap_pte_range in memory.c but VAX specific - * - * s0addr is the address in S0 space that we need to remap the page - * pointed at by pte_page to. We also clear the page pointed at by pte_page - */ -void remap_and_clear_pte_page(pmd_t *s0addr, pte_t *pte_page) -{ - - pte_t *s0pte; - - /* sanity checks */ - if (!s0addr) { - vaxpanic("VAXMM: null S0 address in remap_and_clear_pte_page!\n"); - return; - } - if (!pte_page) { - vaxpanic("VAXMM: null pte_page in remap_and_clear_pte_page!\n"); - return; - } - - /* locate the S0 pte that describes the page pointed to by s0addr */ - - s0pte = GET_SPTE_VIRT(s0addr); - - /* is it already pointing somewhere? */ -#ifdef VAX_MM_PGALLOC_DEBUG - if (pte_present(*s0pte)) { - printk(KERN_DEBUG "VAXMM: S0 pte %8p already valid in remap_and_clear_pte_page??\n",s0pte); - } -#endif - - clear_page(pte_page); - - /* zap the map */ - set_pte(s0pte,__mk_pte((unsigned long int)pte_page,__pgprot(_PAGE_VALID|_PAGE_KW))); -// print_pte(s0pte); - flush_tlb_all(); - -} - -/* invalidate the S0 pte that was remapped to point at this page */ -void remap_pte_invalidate(pmd_t *s0addr) -{ - pte_t *s0pte; - - /* sanity checks */ - if (!s0addr) { - vaxpanic(KERN_ERR "VAXMM: null S0 address in remap_and_clear_pte_page!\n"); - return; - } - /* locate the S0 pte that describes the page pointed to by s0addr */ - - s0pte = GET_SPTE_VIRT(s0addr); - set_pte(s0pte, pte_mkinvalid(*s0pte)); - /* FIXME: these flush_tlb_alls need replacing with flush_tlb_8 */ - flush_tlb_all(); -// __flush_tlb_one(s0addr); -} - /* - * Allocate a page, to hold page table entries for a user process. + * pmd_populate is called when the MM core wants to make a page in + * a process page table valid. The core has already allocated a + * page for this, and it now wants for us to use this page to + * hold PTEs for the range corresponding to the PMD entry pointed + * to by the pmd parameter. * - * We grab a random page. The only catch is that it must be virtually - * contiguous within the P0 or P1 page tables, which are held in S0 - * space. So, we remap the page table area in S0 space too. + * It's made a bit trickier by the fact that we need to work out if + * it's a P0 or P1 page table being populated. And then we also + * need to watch for this new page of PTEs being beyond the current + * P0LR or P1LR and extending P0/1LR as necessary. * - * The idea here is that a given task has an area in kernel - * address space that is TASK_WSMAX+TASK_STKSIZE in size (plus a few other bits). - * This space is initially unmapped. If the process needs to expand its page table - * (by mapping a page beyond the end of the relevant process page table) - * It can as long as it doesnt go beyond TASK_WSMAX in P0 and TASK_STKSIZE in P1. - * See asm-vax/mm/task.h for details. + * We used to check against WSMAX and STKMAX here, but we now do this + * check in pte_alloc_one(), where it's easier to check (since pte_alloc_one() + * is handed the user address). * * We make use of the knowledge that the pmd is a single block, to work back * to the pgd, which is where the base and length register values are held. * - * pmd is a pointer to the slot in our bogus pmd table we want to use. - * - * free_pte_fast: - * We may have to allocate many pages to hold ptes, as our page table is - * not sparse. So, we just pop the pte we have been given by the upper - * layers on the pte cache, and reallocate it as needed. Its not exactly - * in tune with all the page table locking done in pte_alloc, but this is - * square peg in a decidedly round hole, and the page table locking is one - * of the corners. - * We used to have our own pte_alloc_one. This is now called vax_pte_alloc_one. - * - * pte_alloc_kernel: - * If we get handed a request to map something into S0 or S1 space, then - * we dont do it. S0 page tables are fixed by the need to be contiguous - * in PHYSICAL memory. On a running system, expansion of or copying of the - * system page tables are almost impossible (its the "find me a couple of - * megabytes of continuous physical ram" problem). - * * FIXMES: page table locking. */ -void pmd_populate(struct mm_struct *mm, pmd_t * pmd, pte_t *pte) -{ - pgd_t *pgdp; - pmd_t *pmd_basep, *s0addr; - unsigned long int current_last_page,pgd_segment,target_page; - unsigned long int npages,pte_number,adjusted_address, is_p1; - pte_t *pte_page; - pmd_t *pmdi; - long int direction,ii; - - pmd_basep = (pmd_t *)((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */ - /* see note above */ - free_pte_fast(pte); - -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM:pmd_populate: mm %8p, pgd %8lx, pmd %8lx, pmd_basep %8lx, pmd_val %8lx,\n",mm,mm->pgd,pmd,pmd_basep,pmd_val(*pmd)); -#endif - /* - * This is the base of the pgd array. We need to check which pgd - * entry we need. This is a bit clunky, but better than what was here - * before. - */ - pgdp = mm->pgd; +/* This function could be simpler if we used system page table + entries as PMD entries. */ +void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte_page) +{ + pmd_t *pmd_base; + unsigned long pmd_index; + unsigned int pspace; + pte_t *pte_addr; + unsigned long page_index; + pgd_t *pgd_entry; + + /* Find the start of the page middle directory containing this PMD entry */ + pmd_base = (pmd_t *)((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */ + + /* The process page table page that we want to remap is at offset + pmd_index into the relevant page middle directory */ + pmd_index = pmd - pmd_base; + + /* But, is it a P0 or a P1 PMD? Assume P0 until proven otherwise */ + pspace = 0; + + if (pmd_base == mm->pgd[0].pmd) { + pspace = 0; + } else if (pmd_base == mm->pgd[1].pmd) { + pspace = 1; + } else { + BUG(); + } - if (!pgdp) { - printk(KERN_ERR "VAXMM: null pgd ptr in task mm struct %8p\n",mm); - goto give_segv; - } - - /* decide on the segment we are in */ - pgd_segment=0; - while ((pgdp[pgd_segment].pmd != pmd_basep)&&(pgd_segment<4)) { - pgd_segment++; - } - - switch(pgd_segment) { - case 0: - case 1: - /* user segments */ - is_p1=pgd_segment; - pgdp = &pgdp[pgd_segment]; - break; - case 2: - /* pte_alloc_kernel?, should we free the pte here? */ -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM:pmd_populate: kernel S0 segment pmd, %p, pte %8lx\n",pmd,pte); -#endif - pgdp = &pgdp[2]; /* swapper_pg_dir */ - pmd_populate_S0(pgdp, pmd); - return; - case 3: - default: - /* no match - something has gone very wrong. free ptes? send segv? */ -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM:pmd_populate: kernel S1 segment pmd, %p, pte %8lx\n",pmd,pte); -#endif - printk( KERN_ERR "VAXMM: pmd_populate: Attempting to set S1 pte. pmd, %p, pte %p\n",pmd,pte); - goto give_segv; - return; - } - - /* make an adjusted address + calculate linear page table entry */ - - adjusted_address = (((pmd-pmd_basep))<<(PAGE_SHIFT+7)); - - /* enforce wsmax memory limits */ - if (is_p1){ - adjusted_address |= 0x40000000; - if (adjusted_address <= (PAGE_OFFSET-TASK_STKMAX)) { - printk(KERN_NOTICE "VAXMM: process %p exceeded TASK_STKMAX (%dMB) addr %8lx\n",current,(TASK_STKMAX>>20),adjusted_address); - goto give_segv; - } - pte_number = (adjusted_address - 0x40000000) >> PAGE_SHIFT; - } else { - if (adjusted_address >= (TASK_WSMAX)) { - printk(KERN_NOTICE "VAXMM: process %p exceeded TASK_WSMAX (%dMB) addr %8lx\n",current,(TASK_WSMAX>>20),adjusted_address); - goto give_segv; - } - pte_number = (adjusted_address>>PAGE_SHIFT); - } + pgd_entry = mm->pgd + pspace; + + /* Now we can work out the system virtual address of the relevant + page in the process page table */ + pte_addr = (pte_t *)(pgd_entry->br + (pmd_index << PAGE_SHIFT)); - - /* check that the pte we want isnt already allocated */ - if (is_p1) { - if ((pte_number) > (pgdp->lr)) { -#ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr); -#endif - return; - } - } else { - if ((pte_number) < (pgdp->lr)) { #ifdef VAX_MM_PGALLOC_DEBUG - printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr); + printk(KERN_DEBUG "VAXMM: pmd_populate: mm %p br %08lx lr %04lx pmd %p page %p pte_addr %p reg %d index %04lx\n", + mm, pgd_entry->br, pgd_entry->lr, pmd, pte_page, pte_addr, pspace, pmd_index); #endif - return; - } - } - - /* find the current last page in the page table */ - current_last_page = (pgdp->lr >> 7) - 1; /* 128 PTE's per page */ - target_page = pmd_index(adjusted_address); - - if (is_p1) { - npages = current_last_page - target_page + 1; - /* The s0 address of the current end page in the page table is - * current_last_page * 128 ptes/page * 32 bytes/pte_t + base reg */ - - s0addr = (pmd_t *) ((((current_last_page)<<7)*BYTES_PER_PTE_T)+pgdp->br); - direction = -1; - pmdi = pmd_basep+(current_last_page); - } else { - npages = target_page - current_last_page; - s0addr = (pmd_t *) ((((current_last_page + 1)<<7)*BYTES_PER_PTE_T)+pgdp->br); - direction = 1; - pmdi = pmd_basep+(current_last_page + 1); - } - - for (ii=0; ii<npages; ii++) { - if (!(pte_page=vax_pte_alloc_one(pmdi))) { - printk(KERN_ERR "VAXMM: Unable to expand process page table (pgd=%8p)\n",pgdp); - goto give_segv; - } - - /* remap and clear this page */ - remap_and_clear_pte_page(s0addr, pte_page); - - /* set the pmd */ - pmd_val(*pmdi) = (unsigned long) s0addr; - - /* increment/decrement length register. */ - pgdp->lr += (direction*128); - s0addr += (direction * (PAGE_SIZE>>2)); - pmdi += direction; - - } - - /* if task == current, the hw registers need to be set */ - if (is_p1) { - if (current->thread.pcb.p1br == pgdp->br) { - current->thread.pcb.p1lr = pgdp->lr * 8; - set_vaxmm_regs_p1(pgdp); - - } - } else { - if (current->thread.pcb.p0br == pgdp->br) { - current->thread.pcb.p0lr = pgdp->lr * 8; - set_vaxmm_regs_p0(pgdp); - } - } - - /* we flush tlb anways as we have touched S0 page tables */ - flush_tlb_all(); - return; + /* Double-map the newly-allocated page to this S0 address */ + remap_pgtable_page(pte_addr, pte_page); -give_segv: - printk(KERN_NOTICE "VAXMM pmd_populate: sending SIGSEGV to process %p\n",current); - force_sig(SIGSEGV,current); - return; - -} /* pte_alloc */ - -/* - * Special case of a system page table pmd entry in the S0 region. - * These are never actually allocated, we just enter the existing - * allocated page into the system pmd table. Or die horribly if its outside - * the existing limits. - */ -void pmd_populate_S0(pgd_t *pgd, pmd_t *pmd) -{ - pmd_t *pmd_base; - unsigned long int page_address, pte_number; - pte_t *spte; - - pmd_base = (pmd_t *)pgd->pmd; - /* get physical page address */ - page_address = (((pmd-pmd_base))<<(PAGE_SHIFT+7)); - /* The length register for S0 is in pagelets */ - pte_number = (page_address>>PAGELET_SHIFT); - /* convert to a virtual address */ - page_address |= PAGE_OFFSET; - - if (pte_number >= pgd->lr) { - printk(KERN_ERR "VAXMM: attempting to access out of bounds S0 page table entry (address %8lx, pte=%8lx, limit=%8lx)\n",page_address,pte_number, pgd->lr); - vaxpanic("VAXMM: bugcheck!\n"); - return; - } - - /* calculate the appropriate system page table entry */ - spte = GET_SPTE_VIRT(page_address); - /* and enter it into the page table */ - *pmd = __pmd(spte); - - return; -} -/* allocate a page for the page table */ -/* This used to be called pte_alloc_one, until that name was used in the - * arch independent code. See notes above pmd_populate for why this is here - */ -pte_t * vax_pte_alloc_one(pmd_t *pmd) -{ - if (pmd_none(*pmd)) { - pte_t *page = pte_alloc_one_fast(NULL, 0); - - if (!page) return pte_alloc_one(NULL, 0); - - return page; - } - /* notreached */ - return NULL; -} + /* And point the PMD entry to this new mapping */ + pmd->pte_page = pte_addr; + /* Now adjust the P0LR or P1LR if we we've mapped a new + page at the end of the region */ + /* Calculate how far into the region the newly-added page lives */ + if (pspace == 0) { + /* For P0 space, we want to consider the top end of the new + page of PTEs */ + page_index = (pte_addr + PTRS_PER_PTE) - (pte_t *)pgd_entry->br; + + if (pgd_entry->lr < page_index) { + pgd_entry->lr = page_index; + } + } else { + /* For P1 space, we want to consider the bottom end of the new + page of PTEs */ + page_index = pte_addr - (pte_t *)pgd_entry->br; + + if (pgd_entry->lr > page_index) { + pgd_entry->lr = page_index; + } + } -/* free the page after recovering the original address */ -void pte_free(pte_t *pte) -{ - free_pte_fast(get_pageaddr_from_pte(pte)); - /* invalidate the S0 pte that maps this */ - remap_pte_invalidate((pmd_t *)pte); -} +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: new lr %04lx\n", pgd_entry->lr); +#endif -/* This is only ever called from do_pgt_cache, all the unmapping have been done - * before the page has been placed on the pgt cache */ -void free_pte_slow(pte_t *pte) -{ - free_page((unsigned long int)pte); + /* If all this work is for the current process, then we need to + update the hardware registers */ + if (pspace == 0) { + + if (current->thread.pcb.p0br == pgd_entry->br) { +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: updating hardware regs\n"); +#endif + current->thread.pcb.p0lr = pgd_entry->lr * 8; + set_vaxmm_regs_p0(pgd_entry); + } + + } else { + if (current->thread.pcb.p1br == pgd_entry->br) { +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: updating hardware regs\n"); +#endif + current->thread.pcb.p1lr = pgd_entry->lr * 8; + set_vaxmm_regs_p1(pgd_entry); + } + } } -/* Find an entry in the third-level page table.. */ -#ifdef VAX_MM_PGALLOC_DEBUG -pte_t * pte_offset(pmd_t * dir, unsigned long address) +/* The pmd argument points to a single PMD entry (which corresponds to + a single page in a process page table). We should invalidate the + mapping of this page in the process page table and then clear out + the PMD entry itself */ +void pmd_clear(pmd_t *pmd) { - unsigned long int offset; - offset = (pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2)); -// printk(KERN_DEBUG "VAXMM:pte_offset: pmd %8p, address %8lx, pte_offset %8lx\n",dir, address, offset); - return offset; + unmap_pgtable_page(pmd->pte_page); + pmd->pte_page = NULL; } -#else -pte_t * pte_offset(pmd_t * dir, unsigned long address) + +/* Find an entry in the third-level page table.. */ +pte_t * pte_offset(pmd_t *pmd, unsigned long address) { - return (pte_t *)(pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2)); -} + pte_t *pte; + pte = pmd->pte_page + ((address>>PAGE_SHIFT) & (PTRS_PER_PTE-1)); +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM:pte_offset: pmd %p, address %8lx, pte_pte %p\n", pmd, address, pte); #endif - + return pte; +} |