|
From: Andy P. <at...@us...> - 2001-01-18 15:52:27
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv5429/include/asm-vax/mm
Modified Files:
page.h pgcompat.h pgtable.h
Added Files:
mmu_context.h
Log Message:
Updates to adapt code base from 2.2 to 2.4 tree. Sync up to atp 2.4 tree.
--- NEW FILE ---
#ifndef _ASM_VAX_MMU_CONTEXT_H
#define _ASM_VAX_MMU_CONTEXT_H
/* mmu_contexts are part of process control block */
#define init_new_context(tsk,mm) 0
#define destroy_context(mm) flush_tlb_mm(mm)
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk, unsigned cpu)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu)
{
clear_bit(cpu, &prev->cpu_vm_mask);
set_bit(cpu, &next->cpu_vm_mask);
}
extern inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current, smp_processor_id());
}
#endif
Index: page.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/page.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** page.h 2001/01/17 19:37:29 1.2
--- page.h 2001/01/18 15:52:28 1.3
***************
*** 13,16 ****
--- 13,39 ----
#define STRICT_MM_TYPECHECKS
+ #define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ __asm__ __volatile__(".word 0x0000"); \
+ } while (0)
+
+ #define PAGE_BUG(page) do { \
+ BUG(); \
+ } while (0)
+
+ /* Pure 2^n version of get_order */
+ extern __inline__ int get_order(unsigned long size)
+ {
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+ }
+
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
***************
*** 83,128 ****
#define PAGE_ALIGN_PREV(addr) ((addr)&PAGE_MASK)
- /* This handles the memory map. (i hope)
- * taking the lead from the alpha port the VAX PAGE_OFFSET is
- * identified as being the start of kernel S0 (KSEG) space */
- #define __PAGE_OFFSET (0x80000000)
#ifndef __ASSEMBLY__
extern int console_loglevel;
- /*
- * Tell the user there is some problem. Beep too, so we can
- * see^H^H^Hhear bugs in early bootup as well!
- */
- #define BUG() do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- } while (0)
-
- #define PAGE_BUG(page) do { \
- BUG(); \
- } while (0)
-
- /* Pure 2^n version of get_order */
- extern __inline__ int get_order(unsigned long size)
- {
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
- }
-
#endif /* __ASSEMBLY__ */
! #define PAGE_OFFSET __PAGE_OFFSET
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
#endif /* __KERNEL__ */
--- 106,127 ----
#define PAGE_ALIGN_PREV(addr) ((addr)&PAGE_MASK)
#ifndef __ASSEMBLY__
extern int console_loglevel;
#endif /* __ASSEMBLY__ */
! /* This handles the memory map. (i hope)
! * taking the lead from the alpha port the VAX PAGE_OFFSET is
! * identified as being the start of kernel S0 (KSEG) space */
+ #define __PAGE_OFFSET (0x80000000)
+ #define PAGE_OFFSET __PAGE_OFFSET
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
+ #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+ #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif /* __KERNEL__ */
Index: pgcompat.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgcompat.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -r1.1 -r1.2
*** pgcompat.h 2001/01/17 16:18:52 1.1
--- pgcompat.h 2001/01/18 15:52:28 1.2
***************
*** 26,32 ****
*/
/* page size definitions */
! #include <asm/vpage.h>
! #include <asm/page.h> /* for pte_t */
#ifndef __ASSEMBLY__
--- 26,32 ----
*/
/* page size definitions */
! #include <asm/mm/vpage.h>
! #include <asm/mm/page.h> /* for pte_t */
#ifndef __ASSEMBLY__
Index: pgtable.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgtable.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -r1.1 -r1.2
*** pgtable.h 2001/01/17 16:18:52 1.1
--- pgtable.h 2001/01/18 15:52:28 1.2
***************
*** 11,14 ****
--- 11,15 ----
#include <asm/mm/virt_trans.h>
+
/*
* The requirement for contiguous pages for page tables means that
***************
*** 73,76 ****
--- 74,78 ----
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 128
+ /* fix this FIXME: */
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
/*
***************
*** 82,85 ****
--- 84,97 ----
#define __USER_PGD_PTRS ((__PAGE_OFFSET >> PGDIR_SHIFT) & 0x3ff)
#define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS)
+ #define FIRST_USER_PGD_NR 0
+
+
+ #define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+ #define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+ #define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
/*
***************
*** 334,354 ****
extern pgd_t swapper_pg_dir[1024];
- extern inline void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
- {
- }
! #define SWP_TYPE(entry) (((entry) >> 2) & 0x3f)
! #define SWP_OFFSET(entry) ((entry) >> 8)
! #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 8))
!
! #define module_map vmalloc
! #define module_unmap vfree
!
! /* kernel/fork.c expects these definitions to be here. They might be
! important on i386, but we don't need it. */
! #define copy_segments(nr, tsk, mm) do { } while (0)
! #define release_segments(mm) do { } while (0)
! #define forget_segments() do { } while (0)
#endif /* !__ASSEMBLY__ */
--- 346,351 ----
extern pgd_t swapper_pg_dir[1024];
! /* copy_segments and friends now in system.h */
#endif /* !__ASSEMBLY__ */
***************
*** 361,365 ****
{
unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
! unsigned long *ptr = pmdp->pmd;
short i = 16;
while (--i >= 0) {
--- 358,362 ----
{
unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
! unsigned long *ptr = (unsigned long *)pmdp->pmd;
short i = 16;
while (--i >= 0) {
***************
*** 369,372 ****
--- 366,387 ----
}
+ /*
+ * The VAX doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+ extern inline void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+ {
+ }
+
+
+ #define SWP_TYPE(entry) (((entry).val >> 2) & 0x3f)
+ #define SWP_OFFSET(entry) ((entry).val >> 8)
+ #define SWP_ENTRY(type,offset) ((swp_entry_t) {((type) << 2) | ((offset) << 8)})
+
+ #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+ #include <asm-generic/pgtable.h>
|