From: Andy P. <at...@us...> - 2001-02-15 01:26:18
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm In directory usw-pr-cvs1:/tmp/cvs-serv31452/mm Modified Files: init.c ioremap.c pgtable.c Log Message: pagelet updates, MM updates. shot 9 Index: init.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/init.c,v retrieving revision 1.4 retrieving revision 1.5 diff -u -r1.4 -r1.5 --- init.c 2001/02/05 00:06:11 1.4 +++ init.c 2001/02/15 01:26:58 1.5 @@ -16,7 +16,9 @@ static unsigned long totalram_pages; -unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)] __attribute__ ((__aligned__ (PAGE_SIZE))); +unsigned long empty_zero_page[PAGE_SIZE / + sizeof(unsigned long)] + __attribute__ ((__aligned__(PAGE_SIZE))); #ifndef CONFIG_SMP struct pgtable_cache_struct quicklists; @@ -35,81 +37,126 @@ * In other architectures, paging_init sets up the kernel's page tables. * In Linux/VAX, this is already done by the early boot code. */ -void __init -paging_init() +void __init paging_init() { - unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0}; + unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; - zones_size[ZONE_DMA] = max_pfn; - free_area_init(zones_size); + zones_size[ZONE_DMA] = max_pfn; + free_area_init(zones_size); } #if DEBUG_POISON -static void -kill_page(unsigned long pg) +static void kill_page(unsigned long pg) { - unsigned long *p = (unsigned long *)pg; - unsigned long i = PAGE_SIZE, v = 0xdeadbeefdeadbeef; - do { - p[0] = v; - p[1] = v; - p[2] = v; - p[3] = v; - p[4] = v; - p[5] = v; - p[6] = v; - p[7] = v; - i -= 64; - p += 8; - } while (i != 0); + unsigned long *p = (unsigned long *) pg; + unsigned long i = PAGE_SIZE, v = 0xdeadbeefdeadbeef; + do { + p[0] = v; + p[1] = v; + p[2] = v; + p[3] = v; + p[4] = v; + p[5] = v; + p[6] = v; + p[7] = v; + i -= 64; + p += 8; + } while (i != 0); } #else #define kill_page(pg) #endif -void -mem_init(void) +void mem_init(void) { - max_mapnr = num_physpages = max_low_pfn; + max_mapnr = num_physpages = max_low_pfn; - /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); + /* this will put all low memory onto the freelists */ + totalram_pages += free_all_bootmem(); - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - printk("Memory: %luk/%luk available\n", - (unsigned long) nr_free_pages() / 2, - max_mapnr / 2 - ); + printk("Memory: %luk/%luk available\n", + (unsigned long) nr_free_pages() / 2, max_mapnr / 2); - return; + return; } -void -free_initmem (void) +void free_initmem(void) { - extern char __init_begin, __init_end; - unsigned long addr; + extern char __init_begin, __init_end; + unsigned long addr; - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); - atomic_set(&mem_map[MAP_NR(addr)].count, 1); - kill_page(addr); - free_page(addr); - } - printk ("Freeing unused kernel memory: %dk freed\n", - (&__init_end - &__init_begin) >> 10); + addr = (unsigned long) (&__init_begin); + for (; addr < (unsigned long) (&__init_end); addr += PAGE_SIZE) { + mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); + atomic_set(&mem_map[MAP_NR(addr)].count, 1); + kill_page(addr); + free_page(addr); + } + printk("Freeing unused kernel memory: %dk freed\n", + (&__init_end - &__init_begin) >> 10); } void si_meminfo(struct sysinfo *val) { - val->totalram = totalram_pages; - val->sharedram = 0; - val->freeram = nr_free_pages(); - val->bufferram = atomic_read(&buffermem_pages); - val->totalhigh = 0; - val->freehigh = 0; - val->mem_unit = PAGE_SIZE; - return; + val->totalram = totalram_pages; + val->sharedram = 0; + val->freeram = nr_free_pages(); + val->bufferram = atomic_read(&buffermem_pages); + val->totalhigh = 0; + val->freehigh = 0; + val->mem_unit = PAGE_SIZE; + return; } + +/* page table stuff */ + +/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable. + * See comments in include/asm/mm/pgalloc.h for get_pte_kernel. + */ +pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address) +{ + return (pte_t *) NULL; +} + +/* just want a page here - quite simple */ +pte_t *get_pte_slow(pmd_t * pmd, unsigned long address) +{ + unsigned long pte; + + pte = (unsigned long) __get_free_page(GFP_KERNEL); + + if (pte) { + return (pte_t *) pte; + } else { + return NULL; + } + return NULL; +} + +/* remap a given page to be part of a contiguous page table for p0/1 space */ +void remap_and_clear_pte_page(pgd_t *pagetable, pte_t *page, unsigned long pte_page) +{ + unsigned long page_physical_address, page_virtual_address; + pte_t *S0pte; + pte_t tpte; + + /* zero out these pte's */ + clear_page((void *) page); + + /* page addresses */ + page_physical_address = __pa(page); + page_virtual_address = (pte_page * PAGE_SIZE) + pagetable->br; + + /* S0 pte entry for this virtual address */ + S0pte = ((page_virtual_address - PAGE_OFFSET) >> PAGE_SHIFT)+swapper_pg_dir; + + /* FIXME: what if the page is already mapped? (TASK_SIZE) */ + pte_clear(S0pte); + tpte = mk_pte_phys((void*)page_physical_address,(pgprot_t)PAGE_KERNEL); + set_pte(S0pte, tpte); + return; +} + + Index: ioremap.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/ioremap.c,v retrieving revision 1.2 retrieving revision 1.3 diff -u -r1.2 -r1.3 --- ioremap.c 2001/01/26 00:27:00 1.2 +++ ioremap.c 2001/02/15 01:26:58 1.3 @@ -71,12 +71,12 @@ pte_t *p; /* Page align the physical addresses */ - phys_start = VPAGE_ALIGN_PREV(phys_addr); + phys_start = PAGE_ALIGN_PREV(phys_addr); offset = phys_addr - phys_start; - phys_end = VPAGE_ALIGN(phys_addr + size); + phys_end = PAGE_ALIGN(phys_addr + size); - num_ptes = (phys_end - phys_start) >> VPAGE_SHIFT; + num_ptes = (phys_end - phys_start) >> PAGE_SHIFT; start_pte = NULL; p = iomap_base; @@ -114,10 +114,8 @@ virt_start = SPTE_TO_VIRT(start_pte); for (i = 0; i < num_ptes; i++) { - pte_val(start_pte[i]) = _PAGE_VALID | _PAGE_KW | - (((phys_start>>VPAGE_SHIFT) + i) & _PFN_MASK); - - __flush_tlb_one(virt_start + (i<<VPAGE_SHIFT)); + set_pte( (pte_t *)start_pte++, mk_pte((phys_start+(i*PAGE_SHIFT)),__pgprot(_PAGE_VALID | _PAGE_KW))); + __flush_tlb_one(virt_start + (i<<PAGELET_SHIFT)); } printk("IO mapped phys addr 0x%08lx, 0x%04x pages at virt 0x%08lx (IOMAP PTE index 0x%04x)\n", @@ -137,7 +135,7 @@ return; } - p = GET_SPTE_VIRT(addr); + p = GET_HWSPTE_VIRT(addr); if ((p < iomap_base) && (p >= (iomap_base + SPT_ENTRIES_IOMAP))) { printk("iounmap: virtual addr 0x%08lx not in IOMAP region\n", @@ -163,7 +161,7 @@ p++; __flush_tlb_one(addr); - addr += VPAGE_SIZE; + addr += PAGELET_SIZE; } } Index: pgtable.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgtable.c,v retrieving revision 1.2 retrieving revision 1.3 diff -u -r1.2 -r1.3 --- pgtable.c 2001/01/26 00:27:00 1.2 +++ pgtable.c 2001/02/15 01:26:58 1.3 @@ -9,13 +9,14 @@ #include <asm/pgalloc.h> +/* Note the factor of 8 in the length registers */ void set_page_dir(struct task_struct * tsk, pgd_t * pgdir) { /* P0BR and P1BR are virtual addresses */ - tsk->thread.pcb.p0br = pgd_val(pgdir[0]); - tsk->thread.pcb.p0lr = pgd_val(pgdir[1]); - tsk->thread.pcb.p1br = pgd_val(pgdir[2]); - tsk->thread.pcb.p1lr = pgd_val(pgdir[3]); + tsk->thread.pcb.p0br = (pgdir[0]).br; + tsk->thread.pcb.p0lr = (pgdir[0]).lr * 8; + tsk->thread.pcb.p1br = (pgdir[1]).br; + tsk->thread.pcb.p1lr = (pgdir[1]).lr * 8; /* now if this is the currently running task, up date the registers */ /* This doesnt sound like a great idea... perhaps setipl(31) would @@ -27,6 +28,14 @@ __mtpr(tsk->thread.pcb.p1lr, PR_P1LR ); flush_tlb_all(); } +} + +/* Note the factor of 8 in the length registers */ +void set_page_dir_kernel(pgd_t * pgdir) +{ + __mtpr( (pgdir[3]).br, PR_SBR); + __mtpr( (pgdir[3]).lr, PR_SLR); + flush_tlb_all(); } int do_check_pgt_cache(int low_water, int high_water) |