|
From: Dave A. <ai...@us...> - 2001-05-19 12:05:54
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv28407/include/asm-vax/mm
Modified Files:
pgalloc.h
Log Message:
DA: extensive changes to pgalloc.h
allocates p0 + p1 page tables at correct place in system for 2.4
implement pte_alloc_kernel + pte_alloc in a much simplified fashion
fixed process size... needs #defining properly..
Index: pgalloc.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgalloc.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- pgalloc.h 2001/02/15 01:17:23 1.3
+++ pgalloc.h 2001/05/19 12:05:51 1.4
@@ -13,12 +13,15 @@
* pgds into a single page, and add pages as needed. The quicklists
* structure can be hijacked for this. Or at least one per pagelet...
*/
-
+/*
+ * (c) Copyright Dave Airlie 2001 - ai...@li...
+ * -- re-write for fixed sized processes
+ */
#include <asm/processor.h>
#include <linux/threads.h>
#include <asm/mm/virtmap.h>
+#include <linux/vmalloc.h>
-
#ifndef CONFIG_SMP
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
@@ -31,7 +34,6 @@
#define quicklists cpu_data[smp_processor_id()]
#endif
-
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
@@ -63,7 +65,9 @@
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-
+#if 0
+extern pgd_t *get_pgd_slow(void);
+#else
extern __inline__ pgd_t *get_pgd_slow(void)
{
/*
@@ -75,13 +79,22 @@
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
- /* set p0 and p1 regions to empty values */
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- /* set the s0 region, from the master copy in swapper_pg_dir */
- memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ /* Allocate space for the p0/p1 page tables */
+ /* allocate 192 pages at 4096 bytes each for page tables? */
+ ret[0].br = (unsigned long)vmalloc(192 * PAGE_SIZE);
+ ret[0].lr = ((160*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
+ /* the p1br needs to be set back from the end of the p1 ptes */
+ ret[1].br = (ret[0].br - 0x800000) + (192*PAGE_SIZE);
+ ret[1].lr = 0x40000-((32*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
+
+ printk("get_pgd: p0: %8lX, %8lX, p1: %8lX, %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr);
+
+ /* set the s0 region, from the master copy in swapper_pg_dir */
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
+#endif
extern __inline__ pgd_t *get_pgd_fast(void)
{
@@ -93,6 +106,7 @@
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
+
return (pgd_t *)ret;
}
@@ -179,12 +193,21 @@
*
* - At present I'm ducking this. We fix the S0 page table size at
* boot time, and disallow dynamic expansion. atp Feb 2001.
+ *
+ * - we still need to implement this ... linux still calls it ..
+ * - D.A. May 2001.
*/
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
- return NULL;
-}
+ pgd_t *pgdptr = (pgd_t *)pmd;
+ /* note the lr in the system pgd is in PAGELETS.... shift it down to
+ give page view */
+ if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
+ return pte_offset(pmd, address);
+ else
+ return NULL;
+}
/*
* allocate a page, to hold page table entries.
@@ -195,7 +218,45 @@
*/
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
-
+ unsigned long pte_number, pte_page, pte_page_offset;
+ pgd_t *pgdptr = (pgd_t *)pmd;
+ pte_t *newpte= NULL;
+
+ /* Deal with P0 vs P1 spaces */
+ /* need to handle error cases */
+ if (address < 0x40000000)
+ {
+ pte_number = (address >> PAGE_SHIFT);
+ pte_page = (pte_number >> SIZEOF_PTE_LOG2);
+ pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
+ if ((pte_number) < (pgdptr->lr)) {
+ newpte = pte_offset(pmd, address);
+ }
+ }
+ else
+ {
+ address-=0x40000000;
+ pte_number = (address>>PAGE_SHIFT);
+ pte_page = (pte_number >> SIZEOF_PTE_LOG2);
+ pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
+ if ((pte_number) > (pgdptr->lr)) {
+ newpte = pte_offset(pmd, address);
+ }
+
+ }
+
+ if (newpte)
+ {
+ remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
+ pte_clear(newpte);
+ /* make sure a page in S0 space is mapped */
+
+ }
+ return newpte;
+
+ /* old 2.2 code commented out for now .. in case it is of any use
+ to anyone later - D.A. May 2001 */
+#if 0
/* calculate the offset of the requested pte in this pagetable page */
unsigned long pte_number, pte_page, pte_page_offset;
pgd_t *pgdptr = (pgd_t *)pmd;
@@ -209,6 +270,7 @@
/* do we have a pgd base and length set ? */
/* The p0br and p1br should be setup at process initialisation. */
if (pmd_none(*pmd)) {
+ printk("Got pmd_none\n");
return NULL;
}
@@ -248,6 +310,7 @@
pgdptr->lr += PTRS_PER_PTE;
}
return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
+#endif /* if 0 */
}
|