|
From: Andy P. <at...@us...> - 2002-04-09 15:08:21
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv9090/vax/mm
Modified Files:
fault.c init.c pgalloc.c pgtable.c
Log Message:
synch 2.4.15 commit 14
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/fault.c,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- fault.c 11 Sep 2001 19:25:15 -0000 1.13
+++ fault.c 9 Apr 2002 13:50:55 -0000 1.14
@@ -47,7 +47,7 @@
#define REASON_WRITE (1<<2)
#undef VAX_MM_DEBUG
-#undef VAX_MM_DEBUG_USER_FAULTS
+#define VAX_MM_DEBUG_USER_FAULTS
static void
do_page_fault(struct accvio_info *info, struct pt_regs *regs)
@@ -74,7 +74,7 @@
if (in_interrupt() || !mm)
goto no_context;
- down (&mm->mmap_sem);
+ down_read (&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -115,7 +115,7 @@
goto out_of_memory;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return;
/*
@@ -124,7 +124,7 @@
*/
bad_area:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
if (user_mode(regs)) {
#ifdef VAX_MM_DEBUG_USER_FAULTS
@@ -136,7 +136,7 @@
show_regs(regs);
show_cpu_regs();
printk("\nStack dump\n");
- hex_dump((void *)((regs->fp&~0xf), 512);
+ hex_dump( (void *)(regs->fp&~0xf), 512);
printk("do_page_fault: sending SIGSEGV\n");
#endif
@@ -180,7 +180,7 @@
schedule();
goto survive;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
if (user_mode(regs))
{
printk("VM: killing process %s\n", current->comm);
@@ -189,7 +189,7 @@
goto no_context;
do_sigbus:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/init.c,v
retrieving revision 1.18
retrieving revision 1.19
diff -u -r1.18 -r1.19
--- init.c 11 Sep 2001 19:25:15 -0000 1.18
+++ init.c 9 Apr 2002 13:50:55 -0000 1.19
@@ -36,7 +36,7 @@
context in task 0. */
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-pmd_t swapper_pm_dir[2048]; /* two pages for the kernel pmd */
+pmd_t swapper_pm_dir[2048] __attribute__ ((__aligned__(8192))); /* two pages for the kernel S0 pmd */
/*
* In other architectures, paging_init sets up the kernel's page tables.
@@ -54,16 +54,14 @@
/* set up pmd */
swapper_pg_dir[2].pmd = (unsigned int)swapper_pm_dir;
- swapper_pg_dir[2].pmd2 = (unsigned int)&swapper_pm_dir[1024];
- /* FIXME: I _really_ dont like this flag. */
- pmd_val(swapper_pm_dir[PGD_SPECIAL]) = (unsigned long)swapper_pg_dir | 0x1;
-
+
/* FIXME: This is where the VMALLOC stuff from head.S should go */
printk("VAXMM: Initialising mm layer for %d tasks of size %dMB\n",TASK_MAXUPRC,(TASK_WSMAX>>20));
- /* Size the process page table slots. See asm/mm/task.h for details
+ /*
+ * Size the process page table slots. See asm/mm/task.h for details
* The _START and _END macros are from pgtable.h
- * This is all in PAGELETS and HWPTES, hence no set_pte
+ * This is all in PAGELETS and HWPTES, hence no set_pte
*/
pte = (hwpte_t *)GET_SPTE_VIRT(VMALLOC_END);
lastpte = (hwpte_t *)GET_SPTE_VIRT(TASKPTE_START);
@@ -205,3 +203,30 @@
val->mem_unit = PAGE_SIZE;
return;
}
+
+/*
+ * atp Mar 2002.
+ * The pmd cache is now separate, as it is a two page block of
+ * memory. ptes are 1 page. I'd like to separate the pmd from the
+ * pgtable_cache_sz. Later.
+ */
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+
+ if(pgtable_cache_size > high) {
+ do {
+// if(pmd_quicklist) {
+// free_pmd_slow(pmd_alloc_one_fast(NULL, 0));
+// freed++;
+// freed++; /* a two page block */
+// }
+ if(pte_quicklist){
+ free_pte_slow(pte_alloc_one_fast(NULL,0));
+ freed++;
+ }
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
+}
+
Index: pgalloc.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgalloc.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- pgalloc.c 2 Sep 2001 20:50:16 -0000 1.8
+++ pgalloc.c 9 Apr 2002 13:50:55 -0000 1.9
@@ -42,7 +42,7 @@
}
/* allocate a pgd */
-pgd_t *pgd_alloc(void)
+pgd_t *pgd_alloc(struct mm_struct *mm)
{
/* this is rather wasteful, as only a few longwords are
* used in the entire 4kb page. Perhaps we can do something
@@ -98,7 +98,6 @@
{
/* wipe a pgd structure carefully -- this is probably overkill */
pgdp->pmd=0;
- pgdp->pmd2=0;
if (pgdp->segment) {
/* p1br points at what would be page mapping 0x40000000 */
@@ -111,109 +110,64 @@
}
}
-/* bit of a null op - grab a page off the list - pmd_alloc does the real work */
-pmd_t *get_pmd_slow(void)
-{
- return (pmd_t *) __get_free_page(GFP_KERNEL);
-}
-/* allocate a 'pmd'. In fact we will set it here too, to avoid confusion */
-pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- /* we want to allocate two pages and remap them into the
- * appropriate pmd slot in the taskslot. */
-
+/* we used to call this routine pmd_alloc. At vn 2.4.3 pmd_alloc got removed
+ * to include/linux/mm.h, and we have now pgd_populate and pmd_populate.
+ * this is pgd_populate */
+void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+ /*
+ * we have a two page block of memory, allocated via pmd_alloc by
+ * pmd_alloc_one. This needs to be remapped into the appropriate pmd
+ * section in the taskslot in S0 space.
+ * recap: The taskslot holds all the ptes in a contiguous section of
+ * S0 address space. The amounts of virtual address space are
+ * mapped out at boot time, from the constants in asm-vax/mm/task.h
+ * The first four pages of this region are "pmd" pages, used as
+ * the bookkeeping information, which is normally done by the pgd
+ * page on 32bit processors. But we have hijacked the pgds to
+ * represent the four VAX memory segments, and to hold all the
+ * base/length register information and other related stuff.
+ *
+ * updated atp Mar 2002. pgd_populate, remove PGD_SPECIAL botch.
+ */
unsigned int is_p1;
- pmd_t *pmdpage;
pmd_t *s0addr;
- unsigned long adjaddr;
+
is_p1 = pgd->segment;
- adjaddr = pmd_index(address);
+
#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM: Calling pmd_alloc with (pgd=%8p,pmd=%8lx,address %8lx, adjaddr %8lx)\n",pgd,pgd->pmd,address,adjaddr);
+ printk(KERN_DEBUG "VAXMM: Calling pgd_populate with (mm=%8p, pgd=%8p, pmd=%8lx\n",mm,pgd,pgd->pmd);
#endif
-
/* sanity check */
- /* FIXME: is this pgd_none? */
if (pgd->pmd) {
#ifdef VAX_MM_PGALLOC_DEBUG
printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already allocated page (pgd=%8p,pmd=%8lx)\n",pgd,pgd->pmd);
#endif
- return (pmd_t *)pgd->pmd+adjaddr;
- }
-
- /* grab the first page */
- pmdpage = get_pmd_fast();
-
- if (!pmdpage) {
- /* didnt work */
- pmdpage = get_pmd_slow();
+ return;
}
- if (!pmdpage) {
- /* didnt work again - give up */
- printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
- return NULL;
- }
- /* calculate which bit of the page table area this page fits into */
- s0addr = (pmd_t *)pgd->slot; /* base of the slot */
-
+ /* calculate which bit of the page table area this page fits into */
+ s0addr = (pmd_t *)pgd->slot; /* base of the slot */
s0addr += (is_p1) ? (P1PMD_OFFSET/sizeof(pmd_t)): (P0PMD_OFFSET/sizeof(pmd_t));
- /* remap and clear this page */
- remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
+ /* remap and clear the first page */
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmd);
- /* this is the first page in our pmd table. */
+ /* this is the pointer to our pmd table. */
pgd->pmd=(unsigned long)s0addr;
- /* now, do the same for the second */
- pmdpage = get_pmd_fast();
-
- if (!pmdpage) {
- pmdpage = get_pmd_slow();
- }
- if (!pmdpage) {
- printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
- free_pmd_fast((pmd_t *)get_pageaddr_from_pte((pte_t *)pgd->pmd));
- remap_pte_invalidate((pmd_t *)pgd->pmd);
- return NULL;
- }
-
+ /* this is a two page block of memory */
s0addr += (PAGE_SIZE/sizeof(pmd_t));
+ pmd += (PAGE_SIZE/sizeof(pmd_t));
- remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
- /* and the second page in our pmd table. */
- pgd->pmd2=(unsigned long)s0addr;
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmd);
#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8lx, pgd->pmd2 %8lx\n",pgd,pgd->br, pgd->lr, pgd->pmd,pgd->pmd2);
+ printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8lx\n",pgd,pgd->br, pgd->lr, pgd->pmd);
#endif
- /* pages allocated, now store the backpointer we need in pte_alloc
- * in the last slot in the address slot. Comfortably beyond where
- * we expect to really be allocating memory. */
- pmdpage = (pmd_t *)pgd->pmd;
-
- /* FIXME: I _really_ dont like this flag. */
- pmd_val(pmdpage[PGD_SPECIAL]) = (unsigned long)pgd | 0x1;
-
- return (pmd_t *) pgd->pmd+adjaddr;
-}
-
-/* the kernel pmd is in mm/init.c */
-extern pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
-{
- /* we rely on init.c to set up the pmd pointers in the pgd for us.
- * This leaves us just a pmd_offset sort of job */
-#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM: pmd_alloc_kernel: pgd %8p, pgd->pmd %8lx, address %8lx\n",pgd,pgd->pmd,address);
-#endif
- return pmd_offset(pgd, address);
-}
-
-extern void pmd_free_kernel(pmd_t *pmd)
-{
- return;
+ return;
}
/* This inverts the remapping done in remap_and_clear */
@@ -232,13 +186,13 @@
void pmd_free(pmd_t *pmd)
{
pmd_t *pmdp;
- pmdp = pmd+(PAGE_SIZE/4);
+ pmdp = pmd+(PAGE_SIZE/sizeof(pmd_t));
#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM:pmd_free: freeing pmd %p, pmd2 %p\n",pmd,pmdp);
+ printk(KERN_DEBUG "VAXMM:pmd_free: freeing pmd %p\n",pmd);
#endif
- free_pmd_fast((pmd_t *)get_pageaddr_from_pte((pte_t *)pmdp));
+ /* This is a double page block */
free_pmd_fast((pmd_t *)get_pageaddr_from_pte((pte_t *)pmd));
- /* invalidate the S0 ptes that map this */
+ /* invalidate the S0 ptes that map this, one per page */
remap_pte_invalidate(pmd);
remap_pte_invalidate(pmdp);
}
@@ -306,93 +260,6 @@
}
/*
- * Notes on pte_alloc_kernel()
- *
- * It is called from mm/vmalloc.c in alloc_area_pmd()
- *
- * Any extension to the SPT needs to be physically contiguous with the rest of the
- * system page table.
- * Things to consider:
- * If you want to allocate a page to hold a pte for a
- * new S0 address, (where this address is higher than SBR+SLR) then that
- * new page table page must be allocated at the exact physical page
- * That maps that S0 address. I.e.
- * To map a virtual address X you need to allocate the physical
- * page containing the address
- *
- * PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
- *
- * PTE address (physical memory) = (PFN*4)+SBR
- * Physical page address = (PTE address) & ~(PAGE_MASK)
- * SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
- *
- * If that physical page is already occupied, the contents must
- * be ejected. This takes time, and can lead to deadlock, particularly
- * if a dirty page needs to be written to disk/swap.
- * Also, any physical pages that are in between the previous end of the
- * system page table, and the new end (SBR+SLR) will need to be cleared,
- * otherwise random rubbish will end up in the system page table.
- *
- * This requirement of a contiguous range of physical pages, at a precise
- * address range is hard to meet on a system that has been running for any
- * length of time.
- *
- * One way to do this by "locking up the machine", moving the contents
- * of the physical pages needed to pages on the freelist, rewriting the PTE's
- * to point at the new physical pages, and then allocating and expanding
- * the system page table. No scheduling allowed. Also how you locate all
- * of the references to a given physical page so that you can rewrite them
- * without conducting a thorough search of all page tables (possibly
- * incurring page faults for those P0 page tables that have been swapped out)
- * is not clear.
- *
- *
- * - At present I'm ducking this. We fix the S0 page table size at
- * boot time, and disallow dynamic expansion. atp Feb 2001.
- *
- * - we still need to implement this ... linux still calls it ..
- * - D.A. May 2001.
- *
- * - Indeed, however, the implementation is still not obvious to me.
- * atp July 2001.
- * - let me qualify that. pte_alloc_kernel is called infrequently.
- * Mostly by the VMALLOC stuff, which already has a VMALLOC_END check.
- * so the only reason for calling this is if we are in the middle of
- * some operation, outside of the vmalloc family, mapping system space.
- * Hence the current implementation suffices, and I cant see a reason
- * to implement an expandable s0 page table.
- */
-
-
-pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- pgd_t *pgdptr = (pgd_t *)&swapper_pg_dir[2];
- unsigned long int adjusted_address;
- unsigned long int spt_entry;
- pte_t *spte;
-
- adjusted_address = ((pmd - (pmd_t *)(pgdptr->pmd))<<(PAGE_SHIFT+7)) + (address&~PMD_MASK);
-#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM:pte_alloc_kernel: pmd, %p, address %8lx, adjaddr %8lx\n",pmd,address,adjusted_address);
-#endif
-
- /* note the lr in the system pgd is in PAGELETS */
- if (((adjusted_address) >> PAGELET_SHIFT) < (pgdptr->lr)) {
- /* fill in any bits missing. Perhaps we should do this when we set up the
- * SPT in init.c just to be consistent */
- if (pmd_val(*pmd)==0) {
- spt_entry = (pmd - (pmd_t *)(pgdptr->pmd))<< (PAGE_SHIFT+7) | PAGE_OFFSET;
- spte = GET_SPTE_VIRT(spt_entry);
- pmd_val(*pmd) = (unsigned long int)spte;
- }
- return pte_offset(pmd, adjusted_address);
- }
- else
- return NULL;
-}
-
-
-/*
* Allocate a page, to hold page table entries for a user process.
*
* We grab a random page. The only catch is that it must be virtually
@@ -410,43 +277,92 @@
* to the pgd, which is where the base and length register values are held.
*
* pmd is a pointer to the slot in our bogus pmd table we want to use.
+ *
+ * free_pte_fast:
+ * We may have to allocate many pages to hold ptes, as our page table is
+ * not sparse. So, we just pop the pte we have been given by the upper
+ * layers on the pte cache, and reallocate it as needed. Its not exactly
+ * in tune with all the page table locking done in pte_alloc, but this is
+ * square peg in a decidedly round hole, and the page table locking is one
+ * of the corners.
+ * We used to have our own pte_alloc_one. This is now called vax_pte_alloc_one.
+ *
+ * pte_alloc_kernel:
+ * If we get handed a request to map something into S0 or S1 space, then
+ * we dont do it. S0 page tables are fixed by the need to be contiguous
+ * in PHYSICAL memory. On a running system, expansion of or copying of the
+ * system page tables are almost impossible (its the "find me a couple of
+ * megabytes of continuous physical ram" problem).
+ *
+ * FIXMES: page table locking.
*/
-
-pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+void pmd_populate(struct mm_struct *mm, pmd_t * pmd, pte_t *pte)
{
pgd_t *pgdp;
pmd_t *pmd_basep, *s0addr;
- unsigned long int current_last_page,is_p1,target_page,npages,pte_number,adjusted_address;
+ unsigned long int current_last_page,pgd_segment,target_page;
+ unsigned long int npages,pte_number,adjusted_address, is_p1;
pte_t *pte_page;
pmd_t *pmdi;
long int direction,ii;
pmd_basep = (pmd_t *)((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */
+ /* see note above */
+ free_pte_fast(pte);
+
#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM:pte_alloc: pmd_basep %8lx, pmd %8lx, pmd_val %8lx, address %8lx, pmd_index %8lx\n",pmd_basep,pmd,pmd_val(*pmd),address,pmd_index(address));
+ printk(KERN_DEBUG "VAXMM:pmd_populate: mm %8p, pgd %8lx, pmd %8lx, pmd_basep %8lx, pmd_val %8lx,\n",mm,mm->pgd,pmd,pmd_basep,pmd_val(*pmd));
#endif
- pgdp = (pgd_t *)(pmd_val(pmd_basep[PGD_SPECIAL]) & ~0x1);
-
-
- /* FIXME: should test pgdp. this is pointless otherwise */
- if ((!pgdp)||(pgd_none(*pgdp))) {
- printk(KERN_ERR "VAXMM: Bad PGD (%8p, from pmd %8p) in pte_alloc\n",pgdp,pmd_basep);
+ /*
+ * This is the base of the pgd array. We need to check which pgd
+ * entry we need. This is a bit clunky, but better than what was here
+ * before.
+ */
+ pgdp = mm->pgd;
+
+ if (!pgdp) {
+ printk(KERN_ERR "VAXMM: null pgd ptr in task mm struct %8p\n",mm);
goto give_segv;
}
- if (pgdp->pmd != (unsigned long)pmd_basep) {
- printk(KERN_ERR "VAXMM: Mismatched PGD (%8p, has pmd %8lx from pmd %8p) in pte_alloc\n",pgdp,pgdp->pmd,pmd_basep);
- goto give_segv;
+
+ /* decide on the segment we are in */
+ pgd_segment=0;
+ while ((pgdp[pgd_segment].pmd != (unsigned long)pmd_basep)&&(pgd_segment<4)) {
+ pgd_segment++;
}
- is_p1=pgdp->segment;
+ switch(pgd_segment) {
+ case 0:
+ case 1:
+ /* user segments */
+ is_p1=pgd_segment;
+ pgdp = &pgdp[pgd_segment];
+ break;
+ case 2:
+ /* pte_alloc_kernel?, should we free the pte here? */
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pmd_populate: kernel S0 segment pmd, %p, pte %8lx\n",pmd,pte);
+#endif
+ pgdp = &pgdp[2]; /* swapper_pg_dir */
+ pmd_populate_S0(pgdp, pmd);
+ return;
+ case 3:
+ default:
+ /* no match - something has gone very wrong. free ptes? send segv? */
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pmd_populate: kernel S1 segment pmd, %p, pte %8lx\n",pmd,pte);
+#endif
+ printk( KERN_ERR "VAXMM: pmd_populate: Attempting to set S1 pte. pmd, %p, pte %p\n",pmd,pte);
+ goto give_segv;
+ return;
+ }
/* make an adjusted address + calculate linear page table entry */
- adjusted_address = (((pmd-pmd_basep))<<(PAGE_SHIFT+7))+ (address&~PMD_MASK);
-
-
+ adjusted_address = (((pmd-pmd_basep))<<(PAGE_SHIFT+7));
+
/* enforce wsmax memory limits */
if (is_p1){
adjusted_address |= 0x40000000;
@@ -470,14 +386,14 @@
#ifdef VAX_MM_PGALLOC_DEBUG
printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
#endif
- return pte_offset(pmd, adjusted_address);
+ return;
}
} else {
if ((pte_number) < (pgdp->lr)) {
#ifdef VAX_MM_PGALLOC_DEBUG
printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
#endif
- return pte_offset(pmd, adjusted_address);
+ return;
}
}
@@ -499,8 +415,9 @@
direction = 1;
pmdi = pmd_basep+(current_last_page + 1);
}
+
for (ii=0; ii<npages; ii++) {
- if (!(pte_page=pte_alloc_one(pmdi))) {
+ if (!(pte_page=vax_pte_alloc_one(pmdi))) {
printk(KERN_ERR "VAXMM: Unable to expand process page table (pgd=%8p)\n",pgdp);
goto give_segv;
}
@@ -531,29 +448,70 @@
set_vaxmm_regs_p0(pgdp);
}
}
+
/* we flush tlb anways as we have touched S0 page tables */
flush_tlb_all();
- return pte_offset(pmd, adjusted_address);
+ return;
give_segv:
- printk(KERN_NOTICE "VAXMM pte_alloc: sending SIGSEGV to process %p\n",current);
+ printk(KERN_NOTICE "VAXMM pmd_populate: sending SIGSEGV to process %p\n",current);
force_sig(SIGSEGV,current);
- return NULL;
+ return;
} /* pte_alloc */
+/*
+ * Special case of a system page table pmd entry in the S0 region.
+ * These are never actually allocated, we just enter the existing
+ * allocated page into the system pmd table. Or die horribly if its outside
+ * the existing limits.
+ */
+void pmd_populate_S0(pgd_t *pgd, pmd_t *pmd)
+{
+ pmd_t *pmd_base;
+ unsigned long int page_address, pte_number;
+ pte_t *spte;
+
+ pmd_base = (pmd_t *)pgd->pmd;
+ /* get physical page address */
+ page_address = (((pmd-pmd_base))<<(PAGE_SHIFT+7));
+ /* The length register for S0 is in pagelets */
+ pte_number = (page_address>>PAGELET_SHIFT);
+ /* convert to a virtual address */
+ page_address |= PAGE_OFFSET;
+
+ if (pte_number >= pgd->lr) {
+ printk(KERN_ERR "VAXMM: attempting to access out of bounds S0 page table entry (address %8lx, pte=%8lx, limit=%8lx)\n",page_address,pte_number, pgd->lr);
+ vaxpanic("VAXMM: bugcheck!\n");
+ return;
+ }
+
+ /* calculate the appropriate system page table entry */
+ spte = GET_SPTE_VIRT(page_address);
+ /* and enter it into the page table */
+ *pmd = __pmd(spte);
+
+ return;
+}
/* allocate a page for the page table */
-pte_t * pte_alloc_one(pmd_t *pmd)
+/* This used to be called pte_alloc_one, until that name was used in the
+ * arch independent code. See notes above pmd_populate for why this is here
+ */
+pte_t * vax_pte_alloc_one(pmd_t *pmd)
{
- if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
-
- if (!page) return get_pte_slow();
- return page;
- }
- return (pte_t *) pmd_val(*pmd);
+ if (pmd_none(*pmd)) {
+ pte_t *page = pte_alloc_one_fast(NULL, 0);
+
+ if (!page) return pte_alloc_one(NULL, 0);
+
+ return page;
+ }
+ /* notreached */
+ return NULL;
}
+
+
/* free the page after recovering the original address */
void pte_free(pte_t *pte)
{
@@ -569,14 +527,6 @@
free_page((unsigned long int)pte);
}
-void pte_free_kernel(pte_t *pte)
-{
-#ifdef VAX_MM_PGALLOC_DEBUG
- printk(KERN_DEBUG "VAXMM: pte_free_kernel called on pte %8p\n",pte);
-#endif
- printk(KERN_DEBUG "VAXMM: pte_free_kernel called on pte %8p\n",pte);
- free_pte_fast(pte);
-}
/* Find an entry in the third-level page table.. */
#ifdef VAX_MM_PGALLOC_DEBUG
@@ -584,7 +534,7 @@
{
unsigned long int offset;
offset = (pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2));
- printk(KERN_DEBUG "VAXMM:pte_offset: pmd %8p, address %8lx, pte_offset %8lx\n",dir, address, offset);
+// printk(KERN_DEBUG "VAXMM:pte_offset: pmd %8p, address %8lx, pte_offset %8lx\n",dir, address, offset);
return offset;
}
#else
@@ -593,27 +543,5 @@
return (pte_t *)(pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2));
}
#endif
-/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable.
- * See comments in include/asm/mm/pgalloc.h for get_pte_kernel.
- */
-pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address)
-{
- return (pte_t *) NULL;
-}
-
-/* just want a page here - quite simple */
-/* bit of a null op - grab a page off the list - pte_alloc does the real work */
-pte_t *get_pte_slow(void)
-{
- unsigned long pte;
- pte = (unsigned long) __get_free_page(GFP_KERNEL);
-
- if (pte) {
- return (pte_t *) pte;
- } else {
- return NULL;
- }
- return NULL;
-}
Index: pgtable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgtable.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- pgtable.c 23 Aug 2001 09:25:13 -0000 1.7
+++ pgtable.c 9 Apr 2002 13:50:56 -0000 1.8
@@ -37,20 +37,3 @@
flush_tlb_all();
}
-int do_check_pgt_cache(int low, int high)
-{
- /* implemented like everyone else has - D.A. */
- int freed = 0;
- if(pgtable_cache_size > high) {
- do {
-// if(pgd_quicklist)
-// free_pgd_slow(get_pgd_fast()), freed++;
-// if(pmd_quicklist)
-// free_pmd_slow(get_pmd_fast()), freed++;
- if(pte_quicklist)
- free_pte_slow(get_pte_fast()), freed++;
- } while(pgtable_cache_size > low);
- }
- return freed;
-}
-
|