|
From: Andy P. <at...@us...> - 2001-06-27 09:17:45
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv21707
Added Files:
pgalloc.c
Log Message:
missing pgalloc.c from memory reorganisation
--- NEW FILE ---
/* $Id: pgalloc.c,v 1.1 2001/06/27 09:17:41 atp Exp $
*
* pgalloc.c Routines from include/asm-vax/mm/pgalloc.h
* Allocation of page table entries and so forth.
*
* Copyright atp Jun 2001
* GNU GPL
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
/* misc comments FIXME: sort and discard */
/* It is called from mm/vmalloc.c in alloc_area_pmd()
*
* It needs to be physically contiguous with the rest of the
* system page table.
* Things to consider:
* If you want to allocate a pagetable to hold a pte for a given
* new S0 address, (where this address is higher than SBR+SLR) then that
* new page table page must be allocated at the exact physical page
* That maps that S0 address. I.e.
* To map a virtual address X you need to allocate the physical
* page containing the address
*
* PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
* PTE address (physical memory) = (PFN*4)+SBR
* Physical page address = (PTE address) & ~(PAGE_MASK)
* SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
*
*
* If that physical page is already occupied, the contents must
* be ejected. This takes time, and can lead to deadlock, particularly
* if a dirty page needs to be written to disk/swap.
* Also, any physical pages that are in between the previous end of the
* system page table, and the new end (SBR+SLR) will need to be cleared,
* otherwise random rubbish will end up in the system page table.
* One way to do this by "locking up the machine", moving the contents
* of the physical pages needed to pages on the freelist, rewriting the PTE's
* to point at the new physical pages, and then allocating and expanding
* the system page table. No scheduling allowed. Also how you locate all
* of the references to a given physical page so that you can rewrite them
* without conducting a thorough search of all page tables (possibly
* incurring page faults for those P0 page tables that have been swapped out)
* is not clear.
*
*
* - At present I'm ducking this. We fix the S0 page table size at
* boot time, and disallow dynamic expansion. atp Feb 2001.
*
* - we still need to implement this ... linux still calls it ..
* - D.A. May 2001.
*/
pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
pgd_t *pgdptr = (pgd_t *)pmd;
/* note the lr in the system pgd is in PAGELETS.... shift it down to
give page view */
if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
return pte_offset(pmd, address);
else
return NULL;
}
/*
* allocate a page, to hold page table entries.
* for a user process.
* We grab a random page. The only catch is that it must be virtually
* contiguous within the P0 or P1 page tables, which are held in S0
* space. So, we remap the page table area in S0 space too.
*/
pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
unsigned long pte_number, pte_page, pte_page_offset;
pgd_t *pgdptr = (pgd_t *)pmd;
pte_t *newpte= NULL;
printk("pte_alloc: address %ld\n", address);
/* Deal with P0 vs P1 spaces */
/* need to handle error cases */
if (address < 0x40000000)
{
pte_number = (address >> PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
if ((pte_number) < (pgdptr->lr)) {
newpte = pte_offset(pmd, address);
}
}
else
{
address-=0x40000000;
pte_number = (address>>PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
if ((pte_number) > (pgdptr->lr)) {
newpte = pte_offset(pmd, address);
}
}
if (newpte)
{
remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
/* make sure a page in S0 space is mapped */
}
return newpte;
/* old 2.2 code commented out for now .. in case it is of any use
to anyone later - D.A. May 2001 */
#if 0
/* calculate the offset of the requested pte in this pagetable page */
unsigned long pte_number, pte_page, pte_page_offset;
pgd_t *pgdptr = (pgd_t *)pmd;
unsigned long t2;
pte_t *page;
pte_number = (address >> PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
/* do we have a pgd base and length set ? */
/* The p0br and p1br should be setup at process initialisation. */
if (pmd_none(*pmd)) {
printk("Got pmd_none\n");
return NULL;
}
/* do we need to allocate another page(s) */
/* this is already inside the page table region, and allocated */
/* return the virtual address of the pte. (base registers for p0 and p1 */
/* refer to virtual addresses in S0 space) so no _va() is needed */
if (pte_number < (pgdptr->lr)) {
return (pte_t *) (pgdptr->br + pte_number*BYTES_PER_PTE_T);
}
/* The address lies outside the current page table - by how much?*/
/* FIXME: Maximum task size, defined by max p0 pagetable size */
/* number of pages to allocate */
t2 = ((pte_number - pgdptr->lr) >> SIZEOF_PTE_LOG2) + 1 ;
while (t2--) {
/* grab a page off the quicklist */
page = get_pte_fast();
/* or allocate a new one if none left */
if (!page) page = get_pte_slow(pmd, address);
/* run out of pages - out of memory */
/* FIXME: is there anything else we need to do to signal failure?*/
if (!page) {
printk("%s:%d: run out of free pages building page table at pte %08lx.\n", __FILE__, __LINE__, pgdptr->lr);
return NULL;
}
/* map this page into the S0 page table at the right point */
remap_and_clear_pte_page((pgd_t *)pmd, page, pte_page);
/* add this page of PTEs to the length register */
/* FIXME: handle reverse P1 region... */
pgdptr->lr += PTRS_PER_PTE;
}
return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
#endif /* if 0 */
}
|