You can subscribe to this list here.
| 2000 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
(6) |
Sep
(2) |
Oct
(43) |
Nov
(4) |
Dec
(12) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2001 |
Jan
(78) |
Feb
(97) |
Mar
(29) |
Apr
(2) |
May
(22) |
Jun
(38) |
Jul
(11) |
Aug
(27) |
Sep
(40) |
Oct
(2) |
Nov
(17) |
Dec
(8) |
| 2002 |
Jan
|
Feb
(2) |
Mar
(1) |
Apr
(480) |
May
(456) |
Jun
(12) |
Jul
|
Aug
(1) |
Sep
|
Oct
(18) |
Nov
(3) |
Dec
(6) |
| 2003 |
Jan
|
Feb
(18) |
Mar
(1) |
Apr
|
May
(6) |
Jun
(147) |
Jul
(7) |
Aug
(3) |
Sep
(235) |
Oct
(10) |
Nov
(2) |
Dec
(1) |
| 2004 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(1) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
|
From: Dave A. <ai...@us...> - 2001-08-15 22:23:37
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel In directory usw-pr-cvs1:/tmp/cvs-serv4962/arch/vax/kernel Modified Files: entry.S Log Message: DA: umount is implemented on my box :-).... needed to get busybox to umount ... Index: entry.S =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/entry.S,v retrieving revision 1.5 retrieving revision 1.6 diff -u -r1.5 -r1.6 --- entry.S 2001/06/26 18:59:00 1.5 +++ entry.S 2001/08/15 22:23:32 1.6 @@ -274,7 +274,7 @@ .long sys_lseek .long sys_getpid /* 20 */ .long sys_mount - .long sys_ni_syscall /* old umount syscall holder */ + .long sys_umount /* old umount syscall holder */ .long sys_setuid .long sys_getuid .long sys_stime /* 25 */ |
|
From: Dave A. <ai...@us...> - 2001-08-14 21:25:47
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax In directory usw-pr-cvs1:/tmp/cvs-serv16198/arch/vax Modified Files: config.in Log Message: DA: hackage for 4000/60 testing see what happens .. will be removed later Index: config.in =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/config.in,v retrieving revision 1.5 retrieving revision 1.6 diff -u -r1.5 -r1.6 --- config.in 2001/06/16 19:42:11 1.5 +++ config.in 2001/08/14 21:25:43 1.6 @@ -74,6 +74,9 @@ bool 'Support for Unibus' CONFIG_UNIBUS bool 'Support for VAXBI' CONFIG_VAXBI bool 'Support for Vax Station BUS (??)' CONFIG_VSBUS + if [ "$CONFIG_VSBUS" != "n" ]; then + bool 'Hardcode 4000/60 (temporary for now - DA)' CONFIG_VAX_4000HC + fi endmenu mainmenu_option next_comment |
|
From: Dave A. <ai...@us...> - 2001-08-14 21:25:47
|
Update of /cvsroot/linux-vax/kernel-2.4/drivers/net
In directory usw-pr-cvs1:/tmp/cvs-serv16198/drivers/net
Modified Files:
vaxlance.c
Log Message:
DA: hackage for 4000/60 testing see what happens .. will be removed
later
Index: vaxlance.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/drivers/net/vaxlance.c,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- vaxlance.c 2001/07/31 17:47:26 1.13
+++ vaxlance.c 2001/08/14 21:25:43 1.14
@@ -1016,6 +1016,7 @@
// lp->vsbus_int=5;
#ifdef CONFIG_VSBUS
+#ifndef CONFIG_VAX_4000HC
{
int num, irq;
autoirq_setup(0);
@@ -1029,10 +1030,14 @@
if (num)
lp->vsbus_int=num;
}
+#else
+ lp->vsbus_int=1;
#endif
+
+#endif
#ifdef VAX_LANCE_AUTOPROBE_IRQ
-
+#ifndef CONFIG_VAX_4000HC
printk("Autoprobing LANCE interrupt vector...");
@@ -1058,6 +1063,9 @@
/* maybe we should stop the LANCE here? */
vsbus_clear_int(lp->vsbus_int);
+#else
+ dev->irq=254;
+#endif
if (dev->irq)
printk(" probed IRQ %d, vsbus %d\n", dev->irq, lp->vsbus_int);
else
@@ -1066,6 +1074,7 @@
printk(" failed to detect IRQ line - assuming 0x94.\n");
}
/* Fill the dev fields */
+
#else
dev->irq=0x94;
printk("Using LANCE interrupt vector %d", dev->irq);
|
|
From: Dave A. <ai...@us...> - 2001-08-14 21:25:47
|
Update of /cvsroot/linux-vax/kernel-2.4/drivers/char
In directory usw-pr-cvs1:/tmp/cvs-serv16198/drivers/char
Modified Files:
dz.c
Log Message:
DA: hackage for 4000/60 testing see what happens .. will be removed
later
Index: dz.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/drivers/char/dz.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- dz.c 2001/06/17 11:43:44 1.6
+++ dz.c 2001/08/14 21:25:43 1.7
@@ -1455,6 +1455,7 @@
it is unwise. */
restore_flags(flags);
#if CONFIG_VAX
+#ifndef CONFIG_VAX_4000HC
{
short i;
unsigned char num;
@@ -1489,7 +1490,15 @@
vsbus_disable_int(num);
irq=autoirq_report(100);
}
+#else
+ dz_vsbus_tx_int=4;
+ dz_vsbus_rx_int=5;
+ irq=149;
+
+#endif
printk("dz.c: using irq rx %d, irq tx %d\n", irq-1, irq);
+
+
if (request_irq (irq, dz_interrupt_tx, SA_INTERRUPT, "DZ", lines[0]))
panic ("Unable to register DZ interrupt\n");
if (request_irq (irq-1, dz_interrupt_rx, SA_INTERRUPT, "DZ", lines[0]))
|
|
From: Andy P. <at...@us...> - 2001-07-31 17:54:08
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax In directory usw-pr-cvs1:/tmp/cvs-serv6688 Removed Files: old.bitops.h old.init.h old.spinlock.h old.system.h Log Message: trim old files --- old.bitops.h DELETED --- --- old.init.h DELETED --- --- old.spinlock.h DELETED --- --- old.system.h DELETED --- |
|
From: Andy P. <at...@us...> - 2001-07-31 17:50:29
|
Update of /cvsroot/linux-vax/kernel-2.4 In directory usw-pr-cvs1:/tmp/cvs-serv5817 Modified Files: Makefile Log Message: fixes. Index: Makefile =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/Makefile,v retrieving revision 1.11 retrieving revision 1.12 diff -u -r1.11 -r1.12 --- Makefile 2001/03/07 02:08:40 1.11 +++ Makefile 2001/07/31 17:50:26 1.12 @@ -40,6 +40,7 @@ MODFLAGS = -DMODULE CFLAGS_KERNEL = PERL = perl +MAKE = make -j3 export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \ CONFIG_SHELL TOPDIR HPATH HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \ |
|
From: Andy P. <at...@us...> - 2001-07-31 17:47:29
|
Update of /cvsroot/linux-vax/kernel-2.4/drivers/net
In directory usw-pr-cvs1:/tmp/cvs-serv5014
Modified Files:
vaxlance.c
Log Message:
Made the diagmem thing a bit cleaner calls to a routine in arch/vax/kernel/cpu_ka43.c now
Index: vaxlance.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/drivers/net/vaxlance.c,v
retrieving revision 1.12
retrieving revision 1.13
diff -u -r1.12 -r1.13
--- vaxlance.c 2001/06/17 11:43:45 1.12
+++ vaxlance.c 2001/07/31 17:47:26 1.13
@@ -953,26 +953,9 @@
*/
- /* KA43 only.
- *
- * The KA43 seems to be nicely fscked up... All physical memory
- * is accessible from 0x00000000 up (as normal) and also from
- * 0x28000000 (KA43_DIAGMEM) in IO space. In order to reliably
- * share memory with the LANCE, we _must_ read and write to this
- * shared memory via the DIAGMEM region. Maybe this bypasses
- * caches or something... If you don't do this you get evil
- * "memory read parity error" machine checks.
- *
- */
-
- /* You MUST remember to clear the DIAGMEM bits in these PTEs
- before giving the pages back to free_pages() */
-
- pte_t *p = GET_SPTE_VIRT(dev->mem_start);
- for (i=0; i<(65536>>PAGE_SHIFT); i++, p++) {
- set_pte(p, __pte(pte_val(*p) | (KA43_DIAGMEM >> PAGELET_SHIFT)));
- __flush_tlb_one(dev->mem_start + i * PAGE_SIZE);
- }
+ /* KA43 only. */
+ ka43_diagmem_remap(dev->mem_start, 65536);
+
}
@@ -1087,7 +1070,11 @@
dev->irq=0x94;
printk("Using LANCE interrupt vector %d", dev->irq);
#endif
- dev->open = &lance_open;
+ /* tmp atp*/
+ dev->irq=0x94;
+ printk("Using LANCE interrupt vector %d", dev->irq);
+
+ dev->open = &lance_open;
dev->stop = &lance_close;
dev->hard_start_xmit = &lance_start_xmit;
dev->tx_timeout = &lance_tx_timeout;
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv1125/mm
Modified Files:
mmu_context.h pagelet.h pagelet_pgd.h pagelet_pmd.h
pagelet_pte.h pgalloc.h pgtable.h tlb.h
Added Files:
task.h
Log Message:
New mm layer + start of signal handling + misc fixes
--- NEW FILE ---
#ifndef __VAX_MM_TASK_H
#define __VAX_MM_TASK_H
/* task.h - task memory map defines */
/* atp July 2001. */
/* These are all used to size the relevant structures in the system
* page table, in paging_init (arch/vax/mm/init.c)
*/
/* currently allocate 32mb of virtual memory */
/* These defines cover the process memory map, and are in bytes */
/* Please remember to make them a multiple of PAGE_SIZE, or its going to
* get wierd here */
/* TASK_WSMAX is the max virtual address space in P0 */
/* TASK_WSMAX must not be larger than 768MB. In the unlikely event that
* you really want to allocate that much to a process, change PGD_SPECIAL below */
#define TASK_WSMAX (40*1024*1024)
/* TASK_STKMAX is the max space for the stack in P1 */
/* Like WSMAX above, the upper limit for this is set by PGD_SPECIAL below. If this
* is above 256MB change PGD_SPECIAL
*/
#define TASK_STKMAX (8*1024*1024)
/* TASK_MMAPMAX is the max space in P0 for the mmap() function ,
contiguous with TASK_WSMAX */
#define TASK_MMAPMAX (8*1024*1024)
/* TASK_MAXUPRC is the maximum number of user processes on the system
* Think of this like balsetcnt on VMS.
* -- this should also set/be set by the linux max task variable
*/
#define TASK_MAXUPRC (32)
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE TASK_WSMAX
/* calculations based on the above for the SPT */
/* NPTE_TASK = the number of HWPTE's needed to map a process */
#define N_HWPTE_TASK_P0 ((TASK_WSMAX+TASK_MMAPMAX)>>PAGELET_SHIFT)
#define N_HWPTE_TASK_P1 ((TASK_STKMAX)>>PAGELET_SHIFT)
/* There are 4 4096 byte pages in the pmd. = 4x1024 hwptes. */
#define N_HWPTE_TASK_PMD ((4*1024))
#define N_HWPTE_TASK (N_HWPTE_TASK_P0+N_HWPTE_TASK_P1+N_HWPTE_TASK_PMD)
/* The alignment we want - at present double page for pte_alloc/offset to work ok */
#define PTE_TASK_MASK (~(8191))
#define PTE_TASK_ALIGN(x) (((x)+8191)&PTE_TASK_MASK)
/* size in bytes of an aligned task pte region */
#define PTE_TASK_SLOTSIZE PTE_TASK_ALIGN(N_HWPTE_TASK<<2)
/* The number of pagelets, or SPTEs needed to hold this number of HWPTEs */
#define SPTE_MAX_TASKPTE ((N_HWPTE_TASK>>(PAGELET_SHIFT-2))+1)
/* The offsets into page table area from the start of this slot, in bytes */
#define P0PTE_OFFSET (N_HWPTE_TASK_PMD<<2)
#define P1PTE_OFFSET ((N_HWPTE_TASK_P0+N_HWPTE_TASK_PMD)<<2)
#define P0PMD_OFFSET (0)
#define P1PMD_OFFSET (PAGE_SIZE*2)
/*
* This is a special index into the pmd. This stores a back pointer to the
* pgd in the pmd. The default value of 1536 allows 768 MB for WSMAX and 256
* MB for stack. If you want to change that allocation, bear in mind that you
* have to trade WSMAX for STKMAX. Unless I think of a cleverer way of doing this.
*/
#define PGD_SPECIAL 1536
/*
* User space process size: 2GB (default).
* This is a bit bogus - a linux thing.
*/
#define TASK_SIZE (PAGE_OFFSET)
#endif /* __VAX_MM_TASK_H */
Index: mmu_context.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/mmu_context.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- mmu_context.h 2001/06/09 18:00:25 1.3
+++ mmu_context.h 2001/07/31 17:33:26 1.4
@@ -1,8 +1,6 @@
-/* Copyright (C) May 2001 - Dave Airlie - Vax project - ai...@li...
- derived I'm sure from somewhere ... */
-
#ifndef _ASM_VAX_MMU_CONTEXT_H
#define _ASM_VAX_MMU_CONTEXT_H
+/* atp Jan 2001 */
#include <asm/mm/tlb.h>
@@ -12,6 +10,26 @@
#define destroy_context(mm) flush_tlb_mm(mm)
+static inline void set_vaxmm_regs_p0(pgd_t *pgdp)
+{
+ __mtpr(pgdp->br, PR_P0BR);
+ __mtpr( (pgdp->lr * 8), PR_P0LR);
+}
+
+static inline void set_vaxmm_regs_p1(pgd_t *pgdp)
+{
+ __mtpr(pgdp->br, PR_P1BR);
+ __mtpr( (pgdp->lr * 8), PR_P1LR);
+}
+
+static inline void set_vaxmm_regs(pgd_t *pgdp)
+{
+ __mtpr((pgdp[0]).br, PR_P0BR);
+ __mtpr( ((pgdp[0]).lr * 8), PR_P0LR);
+ __mtpr((pgdp[1]).br, PR_P1BR);
+ __mtpr( ((pgdp[1]).lr * 8), PR_P1LR);
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk, unsigned cpu)
{
@@ -30,15 +48,12 @@
clear_bit(cpu, &prev->cpu_vm_mask);
tsk->thread.pcb.p0br = (next->pgd[0]).br;
- tsk->thread.pcb.p0lr = (next->pgd[0]).lr * 8 | 0x04000000;
+ tsk->thread.pcb.p0lr = (next->pgd[0]).lr * 8 /*| 0x04000000*/;
tsk->thread.pcb.p1br = (next->pgd[1]).br;
tsk->thread.pcb.p1lr = (next->pgd[1]).lr * 8;
- __mtpr(next->pgd[0].br, PR_P0BR);
- __mtpr((next->pgd[0].lr * 8), PR_P0LR);
- __mtpr(next->pgd[1].br, PR_P1BR);
- __mtpr((next->pgd[1].lr * 8), PR_P1LR);
-
+ set_vaxmm_regs(next->pgd);
+
flush_tlb_all();
}
set_bit(cpu, &next->cpu_vm_mask);
Index: pagelet.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- pagelet.h 2001/02/15 16:50:54 1.2
+++ pagelet.h 2001/07/31 17:33:26 1.3
@@ -73,11 +73,18 @@
struct vax_pgd_descriptor {
unsigned long br;
unsigned long lr;
+ unsigned long pmd; /* first four pages of the task PTE slot are the pmds
+ * There are two pmd's one for p0 and one for p1 */
+ unsigned long pmd2; /* This is just a place holder, as we pretend that
+ * our pmds hold 2048 entries and are 2 pages long */
+ unsigned long slot; /* the base address of this slot */
+ unsigned long segment; /* The segment index - used in pgd_clear */
};
/* pgd_t definitions */
typedef struct vax_pgd_descriptor pgd_t;
-#define pgd_val(x) ((x).br)
+/* the .pmd is not a typo */
+#define pgd_val(x) ((x).pmd)
#define __pgd(x) ((pgd_t) { (x) } )
/* definition of pmd_t */
@@ -90,6 +97,11 @@
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
+
+/* hwpte_t */
+typedef struct { unsigned long hwpte; } hwpte_t;
+#define hwpte_val(x) ((x).hwpte)
+#define __hwpte(x) ((hwpte_t) { (x) } )
/* and pgprot_t */
typedef struct { unsigned long pgprot; } pgprot_t;
Index: pagelet_pgd.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pgd.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- pagelet_pgd.h 2001/02/15 01:17:23 1.1
+++ pagelet_pgd.h 2001/07/31 17:33:26 1.2
@@ -45,10 +45,11 @@
* into the pgd entry)
* All the actual stuff is done by the pmd_xxx functions
*/
-extern inline int pgd_none(pgd_t pgd) { return 0; }
-extern inline int pgd_bad(pgd_t pgd) { return 0; }
-extern inline int pgd_present(pgd_t pgd) { return 1; }
-extern inline void pgd_clear(pgd_t * pgdp) { }
+extern inline int pgd_none(pgd_t pgd) { return !(pgd).pmd; }
+extern inline int pgd_bad(pgd_t pgd) { return !(pgd).br; }
+extern inline int pgd_present(pgd_t pgd) { return ((pgd).pmd != 0); }
+
+extern void pgd_clear(pgd_t * pgdp);
/* to set the page-dir (p0br/p0lr) (p1br/p1lr) see arch/vax/mm/pgtable.c */
extern void set_page_dir(struct task_struct *task, pgd_t *pgdir);
Index: pagelet_pmd.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pmd.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- pagelet_pmd.h 2001/06/17 12:34:05 1.2
+++ pagelet_pmd.h 2001/07/31 17:33:26 1.3
@@ -1,61 +1,65 @@
/*
* pagelet_pmd.h
*
- * Defines the page mid level directory in our fake 2 level paging scheme.
- * As for all the 2 level schemes, this is folded away by the compiler.
+ * Defines the page mid level directory in our fake 3 level paging scheme.
*
* Copyright atp Jan 2001.
+ * atp Jul 2001. Go to a fake 3 level.
*/
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-/* 128 * 512. 128 ptes/page */
-#define PMD_SHIFT 30
+/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
+/* 1 page of ptes maps 128x4096 bytes = 512kb.
+ * Each "pmd" here is infact a 2 page = 8kb region at the start of the
+ * process page table region. It makes the accounting a lot easier.
+ */
+#define PMD_SHIFT 19
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/*
* entries per page directory level: the VAX is single level, so
* we don't really have any PMD directory physically, or real pgd for
- * that matter.
+ * that matter. Its just an 8kb region.
*/
-#define PTRS_PER_PMD 1
+#define PTRS_PER_PMD 2048
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
/* pmd_xxx functions */
-/* These are really operating on the pgd_t */
-/* These are just testing the br in each pgd_t for the presence/absence of info */
-
-/* set_pmd: for the moment, I'm not going to use this. Each pgd_t in the
- * pgd should be set by hand at process initialisation. It doesnt need to
- * ever change, except for the length register, which is handled in pte_alloc */
-#define set_pmd(pmdptr, pmdval)
+/* These are really operating on the first two pages of a balance slot */
+/*
+ * we dont want linux mucking about with our pmd pages. It will get it
+ * wrong. pmd_alloc and pmd_free do the business there.
+ */
+#define set_pmd(pmdptr, pmdval)
/* Fixme:, check the length as well as the base register. */
-extern inline int pmd_none(pmd_t pmd) { return (pmd_val(pmd) == 0); }
+extern inline int pmd_none(pmd_t pmd)
+{
+ if (pmd_val(pmd) & 0x1) return 1;
+ return (pmd_val(pmd) == 0);
+}
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
extern inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) != 0); }
-/* This is just zeroing out the base and length registers */
-/* FIXME: or validate code - I removed the zero'ing of the pmd,
- pmd are parts of pgds, and if we clear the br/lr of the P0 pmd,
- the zeroth member of pgd, we lose the vmalloc address so can't
- do vfree. - D.A. June 2001
-*/
-extern inline void pmd_clear(pmd_t * pmdp) {
- /* pmd_val(pmdp[0]) = 0;
- pmd_val(pmdp[1]) = 0;*/
- }
+/* clear the pmd entry */
+extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
-
/* Find an entry in the second-level page table.. */
+#define pmd_index(address) ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
- return (pmd_t *) dir;
+ pmd_t *ptr;
+ ptr = (pmd_t *)pmd_val(*dir) + pmd_index(address);
+ /* locate the pmd entry according to address */
+// printk("pmd_offset: pgd %8p, pmd_val %8lx, address %8lx, index %8lx, offset %8p\n",dir,pmd_val(*dir),address,pmd_index(address),ptr);
+ return ptr;
}
Index: pagelet_pte.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pte.h,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- pagelet_pte.h 2001/06/16 14:26:36 1.5
+++ pagelet_pte.h 2001/07/31 17:33:26 1.6
@@ -33,17 +33,17 @@
* Note that the first hwpte is the one that linux sees.
* The first hwpte is used for all tests except
* the dirty test, which has to be applied to all */
-typedef unsigned long hwpte_t;
+/*typedef unsigned long hwpte_t;*/
typedef struct pagelet_pagecluster {
- hwpte_t pte;
- hwpte_t pte1;
- hwpte_t pte2;
- hwpte_t pte3;
- hwpte_t pte4;
- hwpte_t pte5;
- hwpte_t pte6;
- hwpte_t pte7;
+ unsigned long pte;
+ unsigned long pte1;
+ unsigned long pte2;
+ unsigned long pte3;
+ unsigned long pte4;
+ unsigned long pte5;
+ unsigned long pte6;
+ unsigned long pte7;
} pagecluster_t;
/* each ptr is 32 bytes in size */
@@ -70,7 +70,7 @@
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTE_LOG2)&PTE_MASK&~PAGE_MASK)
+((unsigned long)(((address)>>PAGE_SHIFT)<<SIZEOF_PTE_LOG2)&PTE_MASK&~PAGE_MASK)
/* Certain architectures need to do special things when PTEs
@@ -101,6 +101,11 @@
ptep->pte6 = pte_val(pte)+6;
ptep->pte7 = pte_val(pte)+7;
}
+
+static inline void print_pte(pte_t *ptep)
+{
+ printk(KERN_DEBUG "%8p: %8lx %8lx %8lx %8lx %8lx %8lx %8lx %8lx\n", ptep, ptep->pte,ptep->pte1,ptep->pte2,ptep->pte3,ptep->pte4,ptep->pte5,ptep->pte6,ptep->pte7);
+}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -112,21 +117,24 @@
*
* See asm-i386/pgtable-3level.h for background.
*/
-/* D.A. May 2001 - FIXME: this needs cleaning up, hacked in a mk_pte and __mk_pte... will clean up later.. our mk_pte is being use incorrectly in some VAX code so I needed __mk_pte.
+/* D.A. May 2001 - FIXME: this needs cleaning up, hacked in a mk_pte and
+ __mk_pte... will clean up later.. our mk_pte is being use incorrectly
+ in some VAX code so I needed __mk_pte.
*/
-static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+
+static inline pte_t __mk_pte(unsigned long int page, pgprot_t pgprot)
{
pte_t pte;
- pte.pte = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ pte_val(pte) = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
return pte;
}
#define mk_pte(page, pgprot) __mk_pte(((page)-mem_map)<<PAGE_SHIFT,(pgprot))
/* This takes a physical page address that is used by the remapping functions */
-static inline pte_t mk_pte_phys(void *physpage, pgprot_t pgprot)
+static inline pte_t mk_pte_phys(unsigned long int physpage, pgprot_t pgprot)
{
pte_t pte;
- pte.pte = ((unsigned long)(physpage) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ pte_val(pte) = ((unsigned long)(physpage) >> PAGELET_SHIFT) | pgprot_val(pgprot);
return pte;
}
@@ -196,17 +204,16 @@
/* who needs that
-extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
-extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
-extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
-*/
+ * extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+ * extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+ * extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+ * extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+ */
/*
* these manipulate various bits in each hwpte.
- *
*/
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -235,6 +242,12 @@
return pte;
}
+static inline pte_t pte_mkinvalid(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_VALID;
+ return pte;
+}
+
/* software only - only bother with first pagelet pte in the pagecluster */
static inline pte_t pte_mkold(pte_t pte)
{
@@ -261,9 +274,31 @@
static inline int pte_none(pte_t pte) { return (!pte_val(pte)); }
static inline int pte_present(pte_t pte) { return (pte_val(pte) & _PAGE_VALID); }
+
+extern pte_t * pte_offset(pmd_t * dir, unsigned long address);
+
+/* items to manipulate a hwpte (for the S0 tables ) */
+
+static inline void set_hwpte(hwpte_t *ptep, hwpte_t pte)
+{
+ *ptep = pte;
+}
+
+static inline hwpte_t mk_hwpte(void *page, pgprot_t pgprot)
+{
+ hwpte_t hwpte;
+ hwpte_val(hwpte) = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot);
+ return hwpte;
+}
+
+static inline int hwpte_none(hwpte_t pte) { return !hwpte_val(pte); }
+static inline int hwpte_present(hwpte_t pte) { return hwpte_val(pte) & _PAGE_VALID; }
+
+static inline hwpte_t hwpte_mkinvalid(hwpte_t pte)
+{
+ hwpte_val(pte) &= ~_PAGE_VALID;
+ return pte;
+}
-/* find an entry in a pagetable */
-#define pte_offset(pmd, address) \
-((pte_t *) ( ((pgd_t *)(pmd))->br + ((address & 0x3fffffff)>> PAGE_SHIFT)*BYTES_PER_PTE_T))
#endif
Index: pgalloc.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgalloc.h,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- pgalloc.h 2001/06/26 19:01:11 1.8
+++ pgalloc.h 2001/07/31 17:33:26 1.9
@@ -1,7 +1,7 @@
#ifndef __ASM_VAX_MM_PGALLOC_H
#define __ASM_VAX_MM_PGALLOC_H
-/* atp 2001. pgalloc.h for VAX architecture. */
+/* Copyright atp 1998-2001. pgalloc.h for VAX architecture. */
/*
* Fixmes:
* 1) the pte_alloc/freeing stuff. Check Constraints here
@@ -16,6 +16,14 @@
/*
* (c) Copyright Dave Airlie 2001 - ai...@li...
* -- re-write for fixed sized processes
+ *
+ * atp Jun 2001 remove fixed size processes, use 3 level page table and pte slots.
+ * atp Jun-Jul 2001 - complete rewrite.
+ *
+ * each 'pgd' spans an address range of 0x40000000 bytes.
+ * each page of 'ptes' spans an address range of 0x80000 bytes
+ * So, there are 0x800 pages of 'ptes' per pgd. Keeping track of which page
+ * is mapped where, requires a pmd with 0x800 entries.
*/
#include <asm/processor.h>
#include <linux/threads.h>
@@ -25,6 +33,7 @@
#ifndef CONFIG_SMP
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
+ unsigned long pgd_slots_used;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
@@ -38,102 +47,66 @@
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+#define pgd_slots_used (quicklists.pgd_slots_used)
-
-/*
- * traditional two-level paging, page table allocation routines:
- */
-
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
-extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- return (pmd_t *) pgd;
-}
-
-
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-#if 0
-extern pgd_t *get_pgd_slow(void);
-#else
-extern __inline__ pgd_t *get_pgd_slow(void)
+
+extern pgd_t *pgd_alloc(void);
+extern pgd_t *get_pgd_fast(void);
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- /*
- * this is rather wasteful, as only 6 longwords are
- * used in the entire 4kb page. Perhaps we can do something
- * smarter here by using the quicklists to pack the pgds into
- * a single page.
- */
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret) {
- /* Allocate space for the p0/p1 page tables */
- /* allocate 192 pages at 4096 bytes each for page tables? */
- ret[0].br = (unsigned long)vmalloc(192 * PAGE_SIZE);
- if (ret[0].br==0)
- {
- printk("page_tables:vmalloc failed to allocate a page directory\n");
- BUG();
- return NULL;
- }
- memset((void *)ret[0].br, 0, 192*PAGE_SIZE);
- ret[0].lr = ((160*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
- /* the p1br needs to be set back from the end of the p1 ptes */
- ret[1].br = (ret[0].br - 0x800000) + (192*PAGE_SIZE);
- ret[1].lr = 0x40000-((32*PAGE_SIZE)>>SIZEOF_PTE_LOG2);
-
- printk("get_pgd: p0: %8lX, %8lX, p1: %8lX, %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr);
-
- /* set the s0 region, from the master copy in swapper_pg_dir */
- memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return ret;
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
}
-#endif
-extern __inline__ pgd_t *get_pgd_fast(void)
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
- unsigned long *ret;
+ /* we dont do this at present */
+}
- if ((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
+extern pmd_t *get_pmd_slow(void);
- return (pgd_t *)ret;
+/* Page Mid level directory handling routines. */
+static inline pmd_t *get_pmd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pmd_t *)ret;
}
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline void free_pmd_fast(pmd_t *pmd)
{
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
}
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline void free_pmd_slow(pmd_t *pmd)
{
- vfree((void *)pgd[0].br);
- free_page((unsigned long)pgd);
+ free_page((unsigned long)pmd);
}
+/* in arch/vax/mm/pgalloc.c */
+extern pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address);
+extern void pmd_free(pmd_t *pmd);
+extern void pte_free(pte_t *pte);
+extern unsigned long get_pageaddr_from_pte(pte_t *ptep);
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
+extern pte_t *get_pte_slow(void);
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset);
-extern void remap_and_clear_pte_page(pgd_t *pagetable, pte_t *page, unsigned long pte_page);
+extern void remap_and_clear_pte_page(pmd_t *s0addr, pte_t *page);
+extern void remap_pte_invalidate(pmd_t *s0addr);
extern __inline__ pte_t *get_pte_fast(void)
{
@@ -158,49 +131,30 @@
extern __inline__ void free_pte_slow(pte_t *pte)
{
pte_clear(pte);
-/* free_page((unsigned long)pte);*/
+ free_page((unsigned long)pte);
}
-#define pte_free_kernel(pte) free_pte_slow(pte)
-#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+extern __inline__ void page_clear(pte_t *pte) {memset(pte, 0, PAGE_SIZE);}
-/* atp jun 01, moved these to arch/vax/mm/pgalloc.c */
-/* Allocate a new page for a page table for the kernel */
-extern pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address);
-extern pte_t *pte_alloc(pmd_t *pmd, unsigned long address);
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(pmd) free_pmd_slow(pmd)
-
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
-extern int do_check_pgt_cache(int, int);
+/* atp jun 01, moved these to arch/vax/mm/pgalloc.c */
+/* Allocate a new page for a page table for the kernel */
+extern pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+extern pte_t *pte_alloc(pmd_t *pmd, unsigned long address);
+extern pte_t * pte_alloc_one(pmd_t *pmd);
-/* I cant find a reference to this in the generic or arch specific code
- * -- it used to be called from linux/mm/vmalloc.c, but is no longer */
-/* extern inline void set_pgdir(unsigned long address, pgd_t entry)
- * {
- * struct task_struct * p;
- * pgd_t *pgd;
- *
- * read_lock(&tasklist_lock);
- * for_each_task(p) {
- * if (!p->mm)
- * continue;
- * *pgd_offset(p->mm,address) = entry;
- * }
- * read_unlock(&tasklist_lock);
- * for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- * pgd[address >> PGDIR_SHIFT] = entry;
- * }
- */
+extern int do_check_pgt_cache(int, int);
/* tlb routines */
Index: pgtable.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgtable.h,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- pgtable.h 2001/07/21 11:58:51 1.13
+++ pgtable.h 2001/07/31 17:33:26 1.14
@@ -1,4 +1,7 @@
/* (c) 2001 Vax Porting Project, atp, kenn, airlied */
+
+/* FIXME: this is a mess its confusing and badly documented
+ * - needs cleaning up atp jul 2001 */
#ifndef _VAX_MM_PGTABLE_H
#define _VAX_MM_PGTABLE_H
@@ -20,6 +23,9 @@
/* the pagelet stuff */
#include <asm/mm/pgtable_pagelet.h>
+/* TASK address space sizing, for sizing SPT and so forth */
+#include <asm/mm/task.h>
+
/*
* See Documentation/vax/memory.txt
* for up to date memory layout
@@ -42,23 +48,29 @@
/* entries is (1024 * 1024) >> PAGELET_SIZE */
#define SPT_HWPTES_IOMAP (SPT_MAX_IOMAP<<1)
#define SPT_PTES_IOMAP (SPT_MAX_IOMAP >> 2)
- /*>> (PAGE_SHIFT-10)) */
+ /*/>> (PAGE_SHIFT-10)) */
/* FIXME: (PAGE_SHIFT-10) is hardwired here to 2. asm bug in head.S */
#define SPT_HWPTES_VMALLOC (SPT_MAX_VMALLOC << 1)
#define SPT_PTES_VMALLOC (SPT_MAX_VMALLOC >> 2)
#define SPT_BASE ((unsigned long)( (swapper_pg_dir[2]).br ))
-/* Length register is in words.. shift left 2 to get bytes */
-#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2)
+/* SPT_LEN can be an lvalue, and is the length in longwords */
+#define SPT_LEN ((unsigned long)( (swapper_pg_dir[2]).lr ))
+/* SPT_SIZE is the size in BYTES */
+#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2)
-/* I'm not sure these are ok. I've only tested the results of
- * These in the interrupt guard page routine in arch/vax/kernel/interrupt.c
- * if they are 4k ptes then set_pte needs to be used on the
- * results,
+/*
+ * Macros to get page table addresses + offsets.
+ *
+ * if they are 4k ptes then set_pte needs to be used on the results,
*/
+
/* macro to get linear page table entry for a physical address */
-#define GET_HWSPTE_PHYS(x) ((hwpte_t *)(SPT_BASE + ((x) >> (PAGELET_SHIFT-SIZEOF_PTR_LOG2))))
-#define GET_SPTE_PHYS(x) ((pte_t *)(SPT_BASE + ((x) >> (PAGE_SHIFT-SIZEOF_PTE_LOG2))))
+#define GET_HWSPTE_PHYS(x) ((hwpte_t *)(SPT_BASE + ( ((x) >> PAGELET_SHIFT) << SIZEOF_PTR_LOG2) ))
+
+/* this is like it is for a reason - we need to wipe out the lower bits, the old
+ * calculation using page_shift-sizeof_pte_log2 gave the wrong answer sometimes */
+#define GET_SPTE_PHYS(x) ((pte_t *)(SPT_BASE + ( ((x) >> PAGE_SHIFT) << SIZEOF_PTE_LOG2)))
/* macro to get linear page table entry for a virtual address
(only works for addresses in S0 space) */
@@ -76,23 +88,39 @@
space waste precious SPTEs.
*/
- /* the previous definition of VMALLOC START relied on the
- * VAX phy memory being an exact 4k multiple,
- * my VAX has 7f1f hw-pages so isn't aligned on 4K
- * workout the VMALLOC_START from the vmallocmap_base and the
- * system base register.-
- */
+/* the previous definition of VMALLOC START relied on the
+ * VAX phy memory being an exact 4k multiple,
+ * my VAX has 7f1f hw-pages so isn't aligned on 4K
+ * workout the VMALLOC_START from the vmallocmap_base and the
+ * system base register.-
+ */
+
+/* VMALLOC_OFFSET is the gap between the end of mapping of physical
+ * ram and the start of VMALLOC ?? */
#define VMALLOC_OFFSET (SPT_MAX_IOMAP * 1024)
- /*#define VMALLOC_START ((unsigned long) high_memory + VMALLOC_OFFSET)*/
#define VMALLOC_START (PAGE_OFFSET+((vmallocmap_base-swapper_pg_dir[2].br)<<(PAGELET_SHIFT-2)))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (VMALLOC_START + (SPT_MAX_VMALLOC * 1024))
+/* Start of task page table area - the variables this is based on
+ * are defined in asm-vax/mm/task.h */
+
+/* address in S0 space of where the process page table area starts and ends.*/
+#define TASKPTE_START PTE_TASK_ALIGN(VMALLOC_END)
+#define TASKPTE_END (TASKPTE_START+(PTE_TASK_SLOTSIZE * TASK_MAXUPRC))
+/* the number of hwptes to map this space */
+#define SPT_HWPTES_TASKPTE (((PTE_TASK_SLOTSIZE)>>PAGELET_SHIFT)*TASK_MAXUPRC)
+#define SPT_PTES_TASKPTE (SPT_HWPTES_TASKPTE >> 3)
+
+/* find a slot in the pagetable area for pgd (x), x is 0->TASK_MAXUPRC-1 */
+#define GET_TASKSLOT(x) (TASKPTE_START+((x) * PTE_TASK_SLOTSIZE))
+
+
/* page table for 0-4MB for everybody */
/* This is a c reference to the start of the system page table
* (see arch/vax/boot/head.S). The spt is initialised to cover physical
* memory by early boot code, based on VMB supplied information. Further
- * expansion happens later in the boot sequence */
+ * expansion happens later in the boot sequence in paging_init */
extern pte_t *pg0;
/* Number of SPTEs in system page table */
@@ -135,7 +163,7 @@
#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
#define SWP_OFFSET(x) ((x).val >> 8)
#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define pte_to_swp_entry(x) ((swp_entry_t) { (x).pte })
+#define pte_to_swp_entry(x) ((swp_entry_t) { pte_val(x) })
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
/* Memory sizing. You'll need to #include <asm/rpb.h> to get
Index: tlb.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/tlb.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- tlb.h 2001/02/15 01:17:23 1.1
+++ tlb.h 2001/07/31 17:33:26 1.2
@@ -16,7 +16,6 @@
* VAX hw ref manual pg 216. can use mtpr to either invalidate single
* (TBIS) or all (TBIA) TLB entries. In addition LDPCTX will
* invalidate all process virtual address translations.
- * FIXME: adopting sledgehammer (trust me i know what I'm doing) approach
*/
#define __flush_tlb() \
|
|
From: Andy P. <at...@us...> - 2001-07-31 17:33:29
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax
In directory usw-pr-cvs1:/tmp/cvs-serv1125
Modified Files:
irq.h ka43.h processor.h ptrace.h sigcontext.h vsa.h
Added Files:
ucontext.h
Log Message:
New mm layer + start of signal handling + misc fixes
--- NEW FILE ---
#ifndef _ASM_VAX_UCONTEXT_H
#define _ASM_VAX_UCONTEXT_H
/* atp Jul 2001, taken from other ports */
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct sigcontext uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* _ASM_VAX_UCONTEXT_H */
Index: irq.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/irq.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- irq.h 2001/02/16 00:54:26 1.2
+++ irq.h 2001/07/31 17:33:26 1.3
@@ -18,7 +18,8 @@
#define NO_IRQ 1023
#endif
-#define INT_STACK_SIZE 8192
+/* atp Jul 01 increased this to three pages - my M38 needs it for some reason */
+#define INT_STACK_SIZE 12288
#ifndef __ASSEMBLY__
Index: ka43.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/ka43.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- ka43.h 2001/06/26 19:01:11 1.3
+++ ka43.h 2001/07/31 17:33:26 1.4
@@ -158,5 +158,7 @@
#endif /* __ASSEMBLY */
+extern void ka43_diagmem_remap(unsigned long int address, unsigned long int size);
+
#endif /* __VAX_KA43_H */
Index: processor.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/processor.h,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- processor.h 2001/06/10 22:40:09 1.8
+++ processor.h 2001/07/31 17:33:26 1.9
@@ -5,7 +5,7 @@
#include <asm/pcb.h> /* process control block definition */
#include <asm/page.h>
#include <asm/vaxcpu.h> /* CPU type definitions */
-
+#include <asm/mm/task.h> /* task memory space defines */
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -25,18 +25,6 @@
} vax_cpu;
-/*
- * * User space process size: 2GB (default).
- * */
-#define TASK_SIZE (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * * space during mmap's.
- * */
-/*#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)*/
-/* Put the MMAP base at 32MB into the process memory space...
- we currently allocate 48MB per process */
-#define TASK_UNMAPPED_BASE (32*1024*1024)
/* from alpha port */
Index: ptrace.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/ptrace.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- ptrace.h 2001/01/23 16:51:27 1.2
+++ ptrace.h 2001/07/31 17:33:26 1.3
@@ -1,3 +1,4 @@
+/* ptrace.h linux vax porting team 1998-2001 */
#ifndef _VAX_PTRACE_H
#define _VAX_PTRACE_H
Index: sigcontext.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/sigcontext.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- sigcontext.h 2001/01/17 16:18:52 1.1
+++ sigcontext.h 2001/07/31 17:33:26 1.2
@@ -5,12 +5,27 @@
#include <asm/ptrace.h>
-struct sigcontext_struct {
- unsigned long _unused[4];
- int signal;
- unsigned long handler;
- unsigned long oldmask;
- struct pt_regs *regs;
+//struct sigcontext_struct {
+// unsigned long _unused[4];
+// int signal;
+// unsigned long handler;
+// unsigned long oldmask;
+// struct pt_regs *regs;
+//};
+
+/* This struct is saved by setup_frame in signal.c, to keep the current context while
+ a signal handler is executed. It's restored by sys_sigreturn.
+
+ To keep things simple, we use pt_regs here even though normally you just specify
+ the list of regs to save. Then we can use copy_from_user on the entire regs instead
+ of a bunch of get_user's as well...
+
+*/
+
+struct sigcontext {
+ struct pt_regs regs; /* needs to be first */
+ unsigned long oldmask;
+ unsigned long usp; /* usp before stacking this gunk on it */
};
#endif
Index: vsa.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/vsa.h,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- vsa.h 2001/06/17 11:43:45 1.4
+++ vsa.h 2001/07/31 17:33:26 1.5
@@ -30,6 +30,9 @@
extern int vsbus_disable_int(int bit_nr);
extern int vsbus_clear_int(int bit_nr);
extern int vsbus_probe_irq(void);
+extern int vsbus_probe_irq_on(void);
+extern unsigned char vsbus_probe_irq_report(void);
+
#define VSA_BASE_REGS 0x20080000
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv32129/kernel
Modified Files:
cpu_ka42.c cpu_ka43.c cpu_ka46.c interrupt.c process.c
reboot.c setup.c signal.c syscall.c
Log Message:
New mm layer, start of signals implementation + other misc fixes
Index: cpu_ka42.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka42.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- cpu_ka42.c 2001/06/26 18:59:00 1.5
+++ cpu_ka42.c 2001/07/31 17:28:26 1.6
@@ -28,6 +28,7 @@
unsigned int sidex;
};
+
struct ka42_machine_vector mv_ka42 = {
{
ka42_pre_vm_init,
Index: cpu_ka43.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka43.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- cpu_ka43.c 2001/06/26 18:59:00 1.6
+++ cpu_ka43.c 2001/07/31 17:28:26 1.7
@@ -10,6 +10,7 @@
* Fixed the cache initializing, added the functions
* ka43_cache_disbale/enable/clear and moved some stuff around.
* atp jun 2001 - machine check implementation
+ * atp Jul 2001 - diagmem remap functions
*/
#include <linux/types.h> /* For NULL */
@@ -22,6 +23,7 @@
#include <asm/mv.h>
#include <asm/vaxcpu.h>
#include <asm/vsa.h>
+#include <asm/mm/tlb.h>
#include <asm/ka43.h>
void ka43_pre_vm_init(void);
@@ -188,12 +190,12 @@
/* tell us all about it */
printk("KA43: machine check %d (0x%x)\n", ka43frame->mc43_code, ka43frame->mc43_code);
printk("KA43: reason: %s\n", ka43_mctype[ka43frame->mc43_code & 0xff]);
-
+ printk("KA43: at addr %x, pc %x, psl %x\n",ka43frame->mc43_addr,ka43frame->mc43_pc,ka43frame->mc43_psl);
/* fixme check restart and first part done flags */
if ((ka43frame->mc43_code & KA43_MC_RESTART) ||
(ka43frame->mc43_psl & KA43_PSL_FPDONE)) {
- printk("ka43_mchk: recovering from machine-check.\n");
+ printk("KA43: recovering from machine-check.\n");
ka43_cache_reset(); /* reset caches */
return; /* go on; */
}
@@ -202,4 +204,32 @@
printk("KA43: Machine Check - unknown error state - halting\n");
machine_halt();
+}
+
+/* slap the KA43_DIAGMEM bit on an area of S0 memory - used by drivers */
+/* size is the size of the region in bytes */
+void ka43_diagmem_remap(unsigned long int address, unsigned long int size)
+{
+ /*
+ * The KA43 seems to be nicely fscked up... All physical memory
+ * is accessible from 0x00000000 up (as normal) and also from
+ * 0x28000000 (KA43_DIAGMEM) in IO space. In order to reliably
+ * share memory with the LANCE, we _must_ read and write to this
+ * shared memory via the DIAGMEM region. Maybe this bypasses
+ * caches or something... If you don't do this you get evil
+ * "memory read parity error" machine checks.
+ */
+
+ /* You MUST remember to clear the DIAGMEM bits in these PTEs
+ before giving the pages back to free_pages() */
+
+ int i;
+ pte_t *p = GET_SPTE_VIRT(address);
+
+ printk(KERN_DEBUG "KA43: enabling KA43_DIAGMEM for memory from (%8lx) to (%8lx)\n",address, address+size);
+
+ for (i=0; i<(size>>PAGE_SHIFT); i++, p++) {
+ set_pte(p, __pte(pte_val(*p) | (KA43_DIAGMEM >> PAGELET_SHIFT)));
+ __flush_tlb_one(address + i * PAGE_SIZE);
+ }
}
Index: cpu_ka46.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka46.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- cpu_ka46.c 2001/06/26 18:59:00 1.6
+++ cpu_ka46.c 2001/07/31 17:28:26 1.7
@@ -16,6 +16,7 @@
#include <asm/mtpr.h>
#include <asm/mv.h>
#include <asm/vaxcpu.h>
+#include <asm/vsa.h>
#include <asm/ka46.h>
void ka46_pre_vm_init(void);
Index: interrupt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/interrupt.c,v
retrieving revision 1.12
retrieving revision 1.13
diff -u -r1.12 -r1.13
--- interrupt.c 2001/06/26 18:59:00 1.12
+++ interrupt.c 2001/07/31 17:28:26 1.13
@@ -75,7 +75,7 @@
/* asm("movl %0, r2\n"
"movl %1 ,r3\n"
"movl %2 ,r4\n"
- "halt" : : "g"(q), "g"(t), "g"(s) ); */
+ "halt" : : "g"(q), "g"(t), "g"(s) );*/
set_pte(q, p);
__flush_tlb_one(interrupt_stack[smp_processor_id()]);
Index: process.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/process.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- process.c 2001/05/27 16:17:42 1.9
+++ process.c 2001/07/31 17:28:26 1.10
@@ -193,12 +193,12 @@
newsp = regs->sp;
}
-/* printk("sys_clone: calling do_fork(0x%08lx, 0x%08lx, 0x%p)\n",
- clone_flags, newsp, regs); */
+ printk("sys_clone: calling do_fork(0x%08lx, 0x%08lx, 0x%p)\n",
+ clone_flags, newsp, regs);
retval = do_fork(clone_flags, newsp, regs, 0);
-/* printk("sys_clone: do_fork() returned %d\n", retval); */
+ printk("sys_clone: do_fork() returned %d\n", retval);
return retval;
}
Index: reboot.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/reboot.c,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- reboot.c 2001/06/26 18:59:00 1.2
+++ reboot.c 2001/07/31 17:28:26 1.3
@@ -62,6 +62,7 @@
{
if (mv->mcheck == NULL) {
printk("machine check - CPU specific handler not implemented - halting\n");
+ show_cpu_regs();
machine_halt();
}
mv->mcheck(stkframe);
Index: setup.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/setup.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- setup.c 2001/06/26 18:59:00 1.9
+++ setup.c 2001/07/31 17:28:26 1.10
@@ -28,6 +28,7 @@
/* Defined in arch/vax/mm/init.c */
extern void paging_init(void);
+unsigned long max_pfn; /* number of 4k pfns */
/* Linker will put this at the end of the kernel image */
extern char _end;
@@ -56,6 +57,8 @@
void __init setup_arch(char **cmdline_p)
{
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
+ unsigned int max_dma;
unsigned long bootmap_size;
unsigned long region_start;
unsigned long region_len;
@@ -74,8 +77,13 @@
/* Get the SID */
vax_cpu.sid = __mfpr(PR_SID);
- /* Initialize bootmem */
+ /* We expand the system page table in paging_init, so
+ * it comes before the bootmem allocator. */
+ paging_init();
+
+ /* Initialize bootmem */
+
/* We don't have any holes in our physical memory layout,
so we throw everything into the bootmem allocator.
Eventually, we will get smarter and use the bad page lists
@@ -96,15 +104,26 @@
printk("calling free_bootmem(start=%08lx, len=%08lx)\n",
region_start, region_len);
free_bootmem(region_start, region_len);
+
- region_start = __pa(SPT_BASE + SPT_SIZE);
+ region_start = __pa(SPT_BASE + SPT_SIZE);
region_len = (max_pfn << PAGE_SHIFT) - __pa(SPT_BASE + SPT_SIZE);
printk("calling free_bootmem(start=%08lx, len=%08lx)\n",
region_start, region_len);
free_bootmem(region_start, region_len);
+
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+ /* max_pfn is the number of 4k ptes */
+ if (max_pfn < max_dma) {
+ zones_size[ZONE_DMA] = max_pfn;
+ } else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = max_pfn - max_dma;
+ }
- paging_init();
+ free_area_init(zones_size);
/* Set up the initial PCB. We can refer to current because head.S
has already set us up on the kernel stack of task 0. */
Index: signal.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/signal.c,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- signal.c 2001/05/27 16:35:13 1.2
+++ signal.c 2001/07/31 17:28:26 1.3
@@ -3,10 +3,13 @@
This file contains the standard functions that the arch-independent
kernel expects for signal handling
+
+ Copyright, 1998-2001 atp, kenn, airlied.
+
+ atp Jul 2001, signal handling, based on S390/Intel version.
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -17,23 +20,157 @@
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
-
-#include <asm/bitops.h>
-#include <asm/pgalloc.h>
+#include <linux/stddef.h>
+#include <asm/ucontext.h>
#include <asm/uaccess.h>
-/* FIXME: obviously, these need to be filled in... */
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-int sys_sigaction(void)
-{
- return -ENOSYS;
-}
+asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
-int sys_sigsuspend(void)
+int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
{
+ if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
+ return -EFAULT;
+ if (from->si_code < 0)
+ return __copy_to_user(to, from, sizeof(siginfo_t));
+ else {
+ int err;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ /* First 32bits of unions are always present. */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ switch (from->si_code >> 16) {
+ case __SI_FAULT >> 16:
+ break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ /* case __SI_RT: This is not generated by the kernel as of now. */
+ }
+ return err;
+ }
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+ regs->r2 = -EINTR;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(struct pt_regs * regs,sigset_t *unewset, size_t sigsetsize)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+ regs->r2 = -EINTR;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->sp); /* is ->sp the right one FIXME: check */
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe {
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned char retcode[8]; /* trampoline code */
+};
+
+struct rt_sigframe {
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned char retcode[8]; /* trampoline code */
+};
- return -ENOSYS;
-}
int sys_sigreturn(void)
{
@@ -45,48 +182,5 @@
return -ENOSYS;
}
-int sys_rt_sigsuspend(void)
-{
- return -ENOSYS;
-}
-int sys_sigaltstack(void)
-{
- return -ENOSYS;
-}
-int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
-{
- if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
- return -EFAULT;
- if (from->si_code < 0)
- return __copy_to_user(to, from, sizeof(siginfo_t));
- else {
- int err;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user((short)from->si_code, &to->si_code);
- /* First 32bits of unions are always present. */
- err |= __put_user(from->si_pid, &to->si_pid);
- switch (from->si_code >> 16) {
- case __SI_FAULT >> 16:
- break;
- case __SI_CHLD >> 16:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- default:
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- /* case __SI_RT: This is not generated by the kernel as of now. */
- }
- return err;
- }
-
-}
Index: syscall.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/syscall.c,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- syscall.c 2001/01/29 00:58:54 1.3
+++ syscall.c 2001/07/31 17:28:26 1.4
@@ -78,7 +78,7 @@
nr_args = *user_ap;
}
-/* printk("Dispatching syscall %d with %d args\n", chmk_arg, nr_args); */
+/* printk("Dispatching syscall %d with %d args\n", chmk_arg, nr_args);*/
/* We pass all the user-supplied args plus the pointer to the
regs to the syscall function. If the syscall is implemented
@@ -115,7 +115,7 @@
"g"(-EFAULT)
: "r0","r1","r2","r3","r4","r5" /* clobbers*/ );
-/* printk("syscall %d returned %ld (0x%08lx)\n", chmk_arg, regs->r0, regs->r0); */
+/* printk("syscall %d returned %ld (0x%08lx)\n", chmk_arg, regs->r0, regs->r0);*/
return;
}
|
|
From: Andy P. <at...@us...> - 2001-07-31 17:28:30
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv32129/mm
Modified Files:
fault.c init.c pgalloc.c pgtable.c
Log Message:
New mm layer, start of signals implementation + other misc fixes
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/fault.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- fault.c 2001/06/26 18:59:00 1.6
+++ fault.c 2001/07/31 17:28:26 1.7
@@ -35,6 +35,7 @@
* it off to handle_mm_fault().
*
* reason:
+ * reason == 0 means kernel translation not valid fault in SPT.
* bit 0 = length violation
* bit 1 = fault during PPTE reference
* bit 2 = fault-on-read if 0, fault-on-write if 1
@@ -57,8 +58,9 @@
struct mm_struct *mm = NULL;
unsigned fixup;
-#if VAX_MM_DEBUG
- printk("mmfault: fault at %8X\n", address);
+#ifdef VAX_MM_DEBUG
+ printk("mmfault: fault at %8x, pc %8x, psl %8x, reason %8x\n",info->addr, info->pc, info->psl, info->reason);
+ printk("mmfault:p0br %8lx, p0lr %8lx, p1br %8lx, p1lr %8lx\n",Xmfpr(PR_P0BR),Xmfpr(PR_P0LR),Xmfpr(PR_P1BR),Xmfpr(PR_P1LR));
#endif
/* This check, and the mm != NULL checks later, will be removed
later, once we actually have a 'current' properly defined */
@@ -72,20 +74,27 @@
goto no_context;
down (&mm->mmap_sem);
+
vma = find_vma(mm, address);
+
if (!vma)
goto bad_area;
+
if (vma->vm_start <= address)
goto good_area;
+
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
+
if (expand_stack(vma, address))
goto bad_area;
+
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
+
if (reason & REASON_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -96,7 +105,8 @@
}
survive:
{
- int fault = handle_mm_fault(mm, vma, address, reason & REASON_WRITE);
+ int fault;
+ fault = handle_mm_fault(mm, vma, address, reason & REASON_WRITE);
if (!fault)
goto do_sigbus;
if (fault < 0)
@@ -111,10 +121,16 @@
*/
bad_area:
up(&mm->mmap_sem);
+ printk("\nStack dump\n");
+ hex_dump((void *)(regs->r1), 256);
+ show_regs(regs);
+ show_cpu_regs();
- if (user_mode(regs)) {
+ if (user_mode(regs)) {
printk("do_page_fault: sending SIGSEGV\n");
force_sig(SIGSEGV, current);
+ /* signals arent implemented yet */
+ machine_halt();
return;
}
@@ -212,13 +228,14 @@
struct accvio_info *info = (struct accvio_info *)excep_info;
static int active;
+
/* This active flag is just a temporary hack to help catch
accvios in the page fault handler. It will have to
go eventually as it's not SMP safe */
if (!active) {
/* active = 1;*/
do_page_fault(info, regs);
-#if VAX_MM_DEBUG
+#ifdef VAX_MM_DEBUG
printk("finished fault\n");
#endif
active = 0;
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/init.c,v
retrieving revision 1.14
retrieving revision 1.15
diff -u -r1.14 -r1.15
--- init.c 2001/06/26 18:59:00 1.14
+++ init.c 2001/07/31 17:28:26 1.15
@@ -14,15 +14,16 @@
#include <asm/pgalloc.h>
#include <asm/rpb.h>
-#undef VAX_INIT_DEBUG
+#define VAX_INIT_DEBUG
static unsigned long totalram_pages;
-unsigned long max_pfn; /* number of 4k pfns */
unsigned long empty_zero_page[PAGE_SIZE /
sizeof(unsigned long)]
__attribute__ ((__aligned__(PAGE_SIZE)));
+pte_t *pg0;
+
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
@@ -38,23 +39,51 @@
/*
* In other architectures, paging_init sets up the kernel's page tables.
- * In Linux/VAX, this is already done by the early boot code.
+ * In Linux/VAX, this is already done by the early boot code. For the
+ * physical RAM. In this routine we initialise the remaining areas of
+ * the memory, and system page table.
*/
void __init paging_init()
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
- unsigned int max_dma, max_norm;
+ hwpte_t *pte, *lastpte;
+ unsigned int ii;
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ /* sort out page table. */
+ pg0 = (pte_t *)SPT_BASE;
+
+ /* FIXME: This is where the VMALLOC stuff from head.S should go */
- /* max_pfn is the number of 4k ptes */
- if (max_pfn < max_dma) {
- zones_size[ZONE_DMA] = max_pfn;
- } else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = max_pfn - max_dma;
+ printk("VAXMM: Initialising mm layer for %d tasks of size %dMB\n",TASK_MAXUPRC,(TASK_WSMAX>>20));
+ /* Size the process page table slots. See asm/mm/task.h for details
+ * The _START and _END macros are from pgtable.h
+ * This is all in PAGELETS and HWPTES, hence no set_pte
+ */
+ pte = (hwpte_t *)GET_SPTE_VIRT(VMALLOC_END);
+ lastpte = (hwpte_t *)GET_SPTE_VIRT(TASKPTE_START);
+ ii=0;
+ /* clear this area */
+ while (pte<lastpte) {
+ *pte++ = __hwpte(0x00000000);
+ ii++;
+ }
+ /* this is stored in hwptes */
+ SPT_LEN += ii;
+
+ pte = (hwpte_t *)GET_SPTE_VIRT(TASKPTE_START);
+ lastpte = pte + SPT_HWPTES_TASKPTE;
+ /* clear this area */
+ while (pte<lastpte) {
+ *pte++ = __hwpte(0x00000000);
}
- free_area_init(zones_size);
+ /* this is stored in hwptes */
+ SPT_LEN += SPT_HWPTES_TASKPTE;
+ __mtpr(SPT_LEN, PR_SLR);
+ flush_tlb();
+
+ printk("VAXMM: system page table base %8lx, length (bytes) %8lx length (ptes) %8lx\n",SPT_BASE,SPT_SIZE,SPT_LEN);
+
+ /* clear the quicklists structure */
+ memset(&quicklists,0,sizeof(quicklists));
}
#if DEBUG_POISON
@@ -107,73 +136,6 @@
}
printk("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
-}
-
-
-/* page table stuff */
-
-/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable.
- * See comments in include/asm/mm/pgalloc.h for get_pte_kernel.
- */
-pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address)
-{
- return (pte_t *) NULL;
-}
-
-/* just want a page here - quite simple */
-pte_t *get_pte_slow(pmd_t * pmd, unsigned long address)
-{
- unsigned long pte;
-
- pte = (unsigned long) __get_free_page(GFP_KERNEL);
-
- if (pte) {
- return (pte_t *) pte;
- } else {
- return NULL;
- }
- return NULL;
-}
-
-/* remap a given page to be part of a contiguous page table for p0/1 space */
-void remap_and_clear_pte_page(pgd_t *pagetable, pte_t *page, unsigned long pte_page)
-{
- unsigned long page_physical_address, page_virtual_address, page_s0_address;
- pte_t *S0pte;
- pte_t tpte;
- pte_t *newpage;
- pgd_t *sys_pgd = swapper_pg_dir+2;
- /* address in S0 space is page pointer */
- /* find the entry in the SPTE corresponding to this page */
-
- page_physical_address=__pa(page);
- S0pte = pte_offset(sys_pgd, page_physical_address);
-
- /* S0pte = (pte_t *)((__pa(page) >> PAGELET_SHIFT)+sys_pgd->br);*/
-
-#if VAX_INIT_DEBUG
- printk("remap: virt addr %p, pteval %8lX , S0pte %p, %8lX\n", page, pte_val(*page), S0pte, pte_val(*S0pte));
-#endif
- if (!pte_present(*S0pte))
- {
- unsigned long phy_addr;
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- /* we have address in S0 of free page */
- /* need to remap this free page to the address in S0 where we are */
- /* Get the phy address of the page */
- //spte = pte_offset(sys_pgd, ((unsigned long)ret - PAGE_OFFSET));
- clear_page((void *)ret);
- pte_clear(S0pte);
- tpte = __mk_pte((void *)ret, (pgprot_t)PAGE_KERNEL);
-
- set_pte(S0pte, tpte);
- /* grab a free page */
- printk("S0 page invalid, %p %8lX\n", ret, pte_val(tpte));
- return;
- }
- /* zero out these pte's */
- // clear_page((void *) page);
- return;
}
void
Index: pgalloc.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgalloc.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- pgalloc.c 2001/06/27 09:17:41 1.1
+++ pgalloc.c 2001/07/31 17:28:26 1.2
@@ -3,23 +3,294 @@
* pgalloc.c Routines from include/asm-vax/mm/pgalloc.h
* Allocation of page table entries and so forth.
*
- * Copyright atp Jun 2001
+ * Copyright atp Jun 2001 - complete rewrite.
+ *
* GNU GPL
*/
#include <linux/sched.h>
#include <linux/mm.h>
-
+#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+
+#undef VAX_MM_PGALLOC_DEBUG
+
+/*
+ * allocate a pgd. We don't - at present, need to worry about
+ * maintaining a bit map as we put pgds that are finished with
+ * on our quicklists pool
+ */
+pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ pgtable_cache_size--;
+ }
+ return (pgd_t *)ret;
+}
+
+/* allocate a pgd */
+pgd_t *pgd_alloc(void)
+{
+ /* this is rather wasteful, as only a few longwords are
+ * used in the entire 4kb page. Perhaps we can do something
+ * smarter here by using the quicklists to pack the pgds into
+ * a single page. */
+ pgd_t *ret;
+ unsigned long taskslot;
+
+ /* grab a pgd off the cache */
+ ret = get_pgd_fast();
+
+ if (!ret) {
+ /* check if we have run out of balance slots */
+ if (pgd_slots_used >= TASK_MAXUPRC) return (pgd_t *)NULL;
+ ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ taskslot = GET_TASKSLOT(pgd_slots_used);
+ /* one more slot used */
+ pgd_slots_used++;
+ ret[0].pmd = 0; /* These are blank */
+ ret[1].pmd = 0;
+
+ } else {
+ /* pgd_clear keeps this */
+ taskslot=ret->slot;
+ }
+
+ if (ret) {
+
+ /* set the values of the base + length registers */
+ ret[0].br = taskslot+ (P0PTE_OFFSET); /* skip the PMD */
+ ret[0].lr = 0x0;
+ /* this comes in handy later */
+ ret[0].slot = taskslot;
+ /* p1br points at what would be page mapping 0x40000000 */
+ ret[1].br = taskslot+ (P1PTE_OFFSET) - 0x800000 ;
+ /* This is the unmapped number of PTEs */
+ ret[1].lr = 0x40000;
+ ret[1].slot = taskslot;
+
+ ret[0].segment = 0;
+ ret[1].segment = 1;
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pgd_alloc: p0: %8lX, %8lX, p1: %8lX, %8lx, slot %ld, taskslot %8lx\n", ret[0].br, ret[0].lr, ret[1].br, ret[1].lr, pgd_slots_used-1, ret[0].slot);
+#endif
+ /* set the s0 region, from the master copy in swapper_pg_dir */
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
+}
+
+void pgd_clear(pgd_t * pgdp)
+{
+ /* wipe a pgd structure carefully -- this is probably overkill */
+ pgdp->pmd=0;
+ pgdp->pmd2=0;
+
+ if (pgdp->segment) {
+ /* p1br points at what would be page mapping 0x40000000 */
+ pgdp->br = pgdp->slot+ (P1PTE_OFFSET) - 0x800000 ;
+ /* This is the unmapped number of PTEs */
+ pgdp->lr = 0x40000;
+ } else {
+ pgdp->br = pgdp->slot+ (P0PTE_OFFSET); /* skip the PMD */
+ pgdp->lr = 0x0;
+ }
+}
+
+/* bit of a null op - grab a page off the list - pmd_alloc does the real work */
+pmd_t *get_pmd_slow(void)
+{
+ return (pmd_t *) __get_free_page(GFP_KERNEL);
+}
+
+/* allocate a 'pmd'. In fact we will set it here too, to avoid confusion */
+pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ /* we want to allocate two pages and remap them into the
+ * appropriate pmd slot in the taskslot. */
+
+ unsigned int is_p1;
+ pmd_t *pmdpage;
+ pmd_t *s0addr;
+ unsigned long adjaddr;
+
+ is_p1 = pgd->segment;
+ adjaddr = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+
+ /* sanity check */
+ /* FIXME: is this pgd_none? */
+ if (pgd->pmd) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already allocated page (pgd=%8p,pmd=%8lx)\n",pgd,pgd->pmd);
+#endif
+ return (pmd_t *)pgd->pmd+adjaddr;
+ }
+
+ /* grab the first page */
+ pmdpage = get_pmd_fast();
+
+ if (!pmdpage) {
+ /* didnt work */
+ pmdpage = get_pmd_slow();
+ }
+ if (!pmdpage) {
+ /* didnt work again - give up */
+ printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
+ return NULL;
+ }
+ /* calculate which bit of the page table area this page fits into */
+
+ s0addr = (pmd_t *)pgd->slot; /* base of the slot */
+
+ s0addr += (is_p1) ? (P1PMD_OFFSET/sizeof(pmd_t)): (P0PMD_OFFSET/sizeof(pmd_t));
+
+ /* remap and clear this page */
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
+
+ /* this is the first page in our pmd table. */
+ pgd->pmd=(unsigned long)s0addr;
+
+ /* now, do the same for the second */
+ pmdpage = get_pmd_fast();
+
+ if (!pmdpage) {
+ pmdpage = get_pmd_slow();
+ }
+ if (!pmdpage) {
+ printk(KERN_ERR "VAXMM: unable to allocate a pmd for pgd (%8p)\n",pgd );
+ free_pmd_fast(get_pageaddr_from_pte(pgd->pmd));
+ remap_pte_invalidate(pgd->pmd);
+ return NULL;
+ }
+
+ s0addr += (PAGE_SIZE/sizeof(pmd_t));
-/* misc comments FIXME: sort and discard */
+ remap_and_clear_pte_page(s0addr, (pte_t *)pmdpage);
+ /* and the second page in our pmd table. */
+ pgd->pmd2=(unsigned long)s0addr;
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr %8lx, \n\tpgd->pmd %8lx, pgd->pmd2 %8lx\n",pgd,pgd->br, pgd->lr, pgd->pmd,pgd->pmd2);
+#endif
+ /* pages allocated, now store the backpointer we need in pte_alloc
+ * in the last slot in the address slot. Comfortably beyond where
+ * we expect to really be allocating memory. */
+ pmdpage = (pmd_t *)pgd->pmd;
+
+ /* FIXME: I _really_ dont like this flag. */
+ pmd_val(pmdpage[PGD_SPECIAL]) = (unsigned long)pgd | 0x1;
+
+ return (pmd_t *) pgd->pmd+adjaddr;
+}
+
+/* This inverts the remapping done in remap_and_clear */
+unsigned long get_pageaddr_from_pte(pte_t *ptep)
+{
+ unsigned long addr;
+ pte_t *s0pte;
+
+ s0pte = GET_SPTE_VIRT(ptep);
+
+ addr = (unsigned long)(((pte_val(*s0pte)&PAGELET_PFN_MASK)<<PAGELET_SHIFT)|PAGE_OFFSET);
+// printk("get_pageaddr: ptep %p, spte %8lx, *spte %8lx, addr %8lx\n",ptep,s0pte,pte_val(*s0pte),addr);
+ return addr;
+}
+
+/* free a 'pmd'. */
+void pmd_free(pmd_t *pmd)
+{
+ pmd_t *pmdp;
+ pmdp = pmd+(PAGE_SIZE/4);
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pmd_free: freeing pmd %p, pmd2 %p\n",pmd,pmdp);
+#endif
+ free_pmd_slow(get_pageaddr_from_pte(pmdp));
+ free_pmd_slow(get_pageaddr_from_pte(pmd));
+ /* invalidate the S0 ptes that map this */
+ remap_pte_invalidate(pmd);
+ remap_pte_invalidate(pmdp);
+}
+
+/* remap a given page to be part of a contiguous page table for p0/1 space
+ *
+ * This is like remap_pte_range in memory.c but VAX specific
+ *
+ * s0addr is the address in S0 space that we need to remap the page
+ * pointed to pte_page to. We also clear the page pointed at by pte_page
+ */
+void remap_and_clear_pte_page(pmd_t *s0addr, pte_t *pte_page)
+{
+
+ pte_t *s0pte;
+
+ /* sanity checks */
+ if (!s0addr) {
+ printk(KERN_ERR "VAXMM: null S0 address in remap_and_clear_pte_page!\n");
+ return;
+ }
+ if (!pte_page) {
+ printk(KERN_ERR "VAXMM: null pte_page in remap_and_clear_pte_page!\n");
+ return;
+ }
+
+ /* locate the S0 pte that describes the page pointed to by s0addr */
+
+ s0pte = GET_SPTE_VIRT(s0addr);
+
+ /* is it already pointing somewhere? */
+#ifdef VAX_MM_PGALLOC_DEBUG
+ if (pte_present(*s0pte)) {
+ printk(KERN_DEBUG "VAXMM: S0 pte %8p already valid in remap_and_clear_pte_page??\n",s0pte);
+ }
+#endif
+
+ page_clear(pte_page);
+
+ /* zap the map */
+ set_pte(s0pte,__mk_pte(pte_page,__pgprot(_PAGE_VALID|_PAGE_KW)));
+// print_pte(s0pte);
+ flush_tlb_all();
+
+}
+
+/* invalidate the S0 pte that was remapped to point at this page */
+void remap_pte_invalidate(pmd_t *s0addr)
+{
+ pte_t *s0pte;
+
+ /* sanity checks */
+ if (!s0addr) {
+ printk(KERN_ERR "VAXMM: null S0 address in remap_and_clear_pte_page!\n");
+ return;
+ }
+ /* locate the S0 pte that describes the page pointed to by s0addr */
+
+ s0pte = GET_SPTE_VIRT(s0addr);
+
+ set_pte(s0pte, pte_mkinvalid(*s0pte));
+// print_pte(s0pte);
+ /* FIXME: these flush_tlb_alls need replacing with flush_tlb_8 */
+ flush_tlb_all();
+
+// __flush_tlb_one(s0addr);
+}
-/* It is called from mm/vmalloc.c in alloc_area_pmd()
+/*
+ * Notes on pte_alloc_kernel()
+ *
+ * It is called from mm/vmalloc.c in alloc_area_pmd()
*
- * It needs to be physically contiguous with the rest of the
+ * Any extension to the SPT needs to be physically contiguous with the rest of the
* system page table.
* Things to consider:
- * If you want to allocate a pagetable to hold a pte for a given
+ * If you want to allocate a page to hold a pte for a
* new S0 address, (where this address is higher than SBR+SLR) then that
* new page table page must be allocated at the exact physical page
* That maps that S0 address. I.e.
@@ -27,17 +298,22 @@
* page containing the address
*
* PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
+ *
* PTE address (physical memory) = (PFN*4)+SBR
* Physical page address = (PTE address) & ~(PAGE_MASK)
* SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
*
- *
* If that physical page is already occupied, the contents must
* be ejected. This takes time, and can lead to deadlock, particularly
* if a dirty page needs to be written to disk/swap.
* Also, any physical pages that are in between the previous end of the
* system page table, and the new end (SBR+SLR) will need to be cleared,
* otherwise random rubbish will end up in the system page table.
+ *
+ * This requirement of a contiguous range of physical pages, at a precise
+ * address range is hard to meet on a system that has been running for any
+ * length of time.
+ *
* One way to do this by "locking up the machine", moving the contents
* of the physical pages needed to pages on the freelist, rewriting the PTE's
* to point at the new physical pages, and then allocating and expanding
@@ -53,119 +329,210 @@
*
* - we still need to implement this ... linux still calls it ..
* - D.A. May 2001.
+ *
+ * - Indeed, however, the implementation is still not obvious to me.
+ * atp July 2001.
*/
+
+
pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
pgd_t *pgdptr = (pgd_t *)pmd;
/* note the lr in the system pgd is in PAGELETS.... shift it down to
give page view */
+//printk("pte_allock: pmd, %p, address %8lx\n",pmd,address);
if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
return pte_offset(pmd, address);
else
return NULL;
}
+
/*
- * allocate a page, to hold page table entries.
- * for a user process.
+ * Allocate a page, to hold page table entries for a user process.
+ *
* We grab a random page. The only catch is that it must be virtually
* contiguous within the P0 or P1 page tables, which are held in S0
* space. So, we remap the page table area in S0 space too.
+ *
+ * The idea here is that a given task has an area in kernel
+ * address space that is TASK_WSMAX+TASK_STKSIZE in size (plus a few other bits).
+ * This space is initially unmapped. If the process needs to expand its page table
+ * (by mapping a page beyond the end of the relevant process page table)
+ * It can as long as it doesnt go beyond TASK_WSMAX in P0 and TASK_STKSIZE in P1.
+ * See asm-vax/mm/task.h for details.
+ *
+ * We make use of the knowledge that the pmd is a single block, to work back
+ * to the pgd, which is where the base and length register values are held.
+ *
+ * pmd is a pointer to the slot in our bogus pmd table we want to use.
*/
+
+
pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- pte_t *newpte= NULL;
-
- printk("pte_alloc: address %ld\n", address);
- /* Deal with P0 vs P1 spaces */
- /* need to handle error cases */
- if (address < 0x40000000)
- {
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) < (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
+ pgd_t *pgdp;
+ pmd_t *pmd_basep, *s0addr;
+ unsigned long int current_last_page,is_p1,target_page,npages,pte_number,adjusted_address;
+ pte_t *pte_page;
+ pmd_t *pmdi;
+ long int direction,ii;
+
+ pmd_basep = ((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */
+
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM:pte_alloc: pmd_basep %8lx, pmd %8lx, pmd_val %8lx, address %8lx, pmd_index %8lx\n",pmd_basep,pmd,pmd_val(*pmd),address,pmd_index(address));
+#endif
+ pgdp = (pgd_t *)(pmd_val(pmd_basep[PGD_SPECIAL]) & ~0x1);
+
+
+ /* FIXME: should test pgdp. this is pointless otherwise */
+ if ((!pgdp)||(pgd_none(*pgdp))) {
+ printk(KERN_ERR "VAXMM: Bad PGD (%8p, from pmd %8p) in pte_alloc\n",pgdp,pmd_basep);
+ return NULL;
+ }
+ if (pgdp->pmd != (unsigned long)pmd_basep) {
+ printk(KERN_ERR "VAXMM: Mismatched PGD (%8p, has pmd %8lx from pmd %8p) in pte_alloc\n",pgdp,pgdp->pmd,pmd_basep);
+ return NULL;
+ }
+
+ is_p1=pgdp->segment;
+
+// printk(KERN_DEBUG "ptealloc:pgd %8p, pgd->segment %ld, pgd->br %8lx, pgd->lr %lx, pgd->slot %8lx\n",pgdp,pgdp->segment,pgdp->br,pgdp->lr,pgdp->slot);
+
+ /* make an adjusted address + calculate linear page table entry */
+
+ adjusted_address = (((pmd-pmd_basep))<<(PAGE_SHIFT+7))+ (address&~PMD_MASK);
+ if (is_p1){
+ adjusted_address |= 0x40000000;
+ pte_number = (adjusted_address - 0x40000000) >> PAGE_SHIFT;
+ } else {
+ pte_number = (adjusted_address>>PAGE_SHIFT);
+ }
+
+ /* FIXME: check against WSMAX */
+
+ /* check that the pte we want isnt already allocated */
+ if (is_p1) {
+ if ((pte_number) > (pgdp->lr)) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
+#endif
+ return pte_offset(pmd, adjusted_address);
}
- }
- else
- {
- address-=0x40000000;
- pte_number = (address>>PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) > (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
+ } else {
+ if ((pte_number) < (pgdp->lr)) {
+#ifdef VAX_MM_PGALLOC_DEBUG
+ printk(KERN_DEBUG "VAXMM: pte_alloc called on already allocated page (pte %8lx, lr %8lx)\n",pte_number,pgdp->lr);
+#endif
+ return pte_offset(pmd, adjusted_address);
+ }
+ }
+
+ /* find the current last page in the page table */
+ current_last_page = (pgdp->lr >> 7) - 1; /* 128 PTE's per page */
+ target_page = pmd_index(adjusted_address);
+
+ if (is_p1) {
+ npages = current_last_page - target_page + 1;
+ /* The s0 address of the current end page in the page table is
+ * current_last_page * 128 ptes/page * 32 bytes/pte_t + base reg */
+
+ s0addr = (((current_last_page)<<7)*BYTES_PER_PTE_T)+pgdp->br;
+ direction = -1;
+ pmdi = pmd_basep+(current_last_page);
+ } else {
+ npages = target_page - current_last_page;
+ s0addr = (((current_last_page + 1)<<7)*BYTES_PER_PTE_T)+pgdp->br;
+ direction = 1;
+ pmdi = pmd_basep+(current_last_page + 1);
+ }
+ for (ii=0; ii<npages; ii++) {
+ if (!(pte_page=pte_alloc_one(pmdi))) {
+ printk(KERN_ERR "VAXMM: Unable to expand process page table (pgd=%8p)\n",pgdp);
+ return NULL;
}
+
+ /* remap and clear this page */
+ remap_and_clear_pte_page(s0addr, pte_page);
+
+ /* set the pmd */
+ pmd_val(*pmdi) = (unsigned long) s0addr;
+
+ /* increment/decrement length register. */
+ pgdp->lr += (direction*128);
+ s0addr += (direction * (PAGE_SIZE>>2));
+ pmdi += direction;
+
+ }
+
+ /* if task == current, the hw registers need to be set */
+ if (is_p1) {
+ if (current->thread.pcb.p1br == pgdp->br) {
+ current->thread.pcb.p1lr = pgdp->lr * 8;
+ set_vaxmm_regs_p1(pgdp);
+
+ }
+ } else {
+ if (current->thread.pcb.p0br == pgdp->br) {
+ current->thread.pcb.p0lr = pgdp->lr * 8;
+ set_vaxmm_regs_p0(pgdp);
+ }
+ }
+ /* we flush tlb anways as we have touched S0 page tables */
+ flush_tlb_all();
+ return pte_offset(pmd, adjusted_address);
+
+} /* pte_alloc */
- }
+/* allocate a page for the page table */
+pte_t * pte_alloc_one(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page = get_pte_fast();
+
+ if (!page) return get_pte_slow();
+ return page;
+ }
+ return (pte_t *) pmd_val(*pmd);
+}
- if (newpte)
- {
- remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
- /* make sure a page in S0 space is mapped */
+/* free the page after recovering the original address */
+void pte_free(pte_t *pte)
+{
+ free_pte_fast(get_pageaddr_from_pte(pte));
+ /* invalidate the S0 pte that maps this */
+ remap_pte_invalidate(pte);
+}
- }
- return newpte;
+/* Find an entry in the third-level page table.. */
+pte_t * pte_offset(pmd_t * dir, unsigned long address)
+{
+ return (pmd_val(*dir)+(((address>>PAGE_SHIFT)&(PTRS_PER_PTE-1))<<SIZEOF_PTE_LOG2));
+}
- /* old 2.2 code commented out for now .. in case it is of any use
- to anyone later - D.A. May 2001 */
-#if 0
- /* calculate the offset of the requested pte in this pagetable page */
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- unsigned long t2;
- pte_t *page;
-
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
-
- /* do we have a pgd base and length set ? */
- /* The p0br and p1br should be setup at process initialisation. */
- if (pmd_none(*pmd)) {
- printk("Got pmd_none\n");
- return NULL;
- }
-
- /* do we need to allocate another page(s) */
-
-
- /* this is already inside the page table region, and allocated */
- /* return the virtual address of the pte. (base registers for p0 and p1 */
- /* refer to virtual addresses in S0 space) so no _va() is needed */
- if (pte_number < (pgdptr->lr)) {
- return (pte_t *) (pgdptr->br + pte_number*BYTES_PER_PTE_T);
- }
-
- /* The address lies outside the current page table - by how much?*/
- /* FIXME: Maximum task size, defined by max p0 pagetable size */
-
- /* number of pages to allocate */
- t2 = ((pte_number - pgdptr->lr) >> SIZEOF_PTE_LOG2) + 1 ;
-
- while (t2--) {
- /* grab a page off the quicklist */
- page = get_pte_fast();
- /* or allocate a new one if none left */
- if (!page) page = get_pte_slow(pmd, address);
-
- /* run out of pages - out of memory */
- /* FIXME: is there anything else we need to do to signal failure?*/
- if (!page) {
- printk("%s:%d: run out of free pages building page table at pte %08lx.\n", __FILE__, __LINE__, pgdptr->lr);
- return NULL;
- }
- /* map this page into the S0 page table at the right point */
- remap_and_clear_pte_page((pgd_t *)pmd, page, pte_page);
-
- /* add this page of PTEs to the length register */
- /* FIXME: handle reverse P1 region... */
- pgdptr->lr += PTRS_PER_PTE;
- }
- return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
-#endif /* if 0 */
+/* get_pte_kernel_slow. allocate a page of PTEs for the S0 pagetable.
+ * See comments in include/asm/mm/pgalloc.h for get_pte_kernel.
+ */
+pte_t *get_pte_kernel_slow(pmd_t * pmd, unsigned long address)
+{
+ return (pte_t *) NULL;
}
+
+/* just want a page here - quite simple */
+/* bit of a null op - grab a page off the list - pte_alloc does the real work */
+pte_t *get_pte_slow(void)
+{
+ unsigned long pte;
+
+ pte = (unsigned long) __get_free_page(GFP_KERNEL);
+
+ if (pte) {
+ return (pte_t *) pte;
+ } else {
+ return NULL;
+ }
+ return NULL;
+}
+
Index: pgtable.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/pgtable.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- pgtable.c 2001/05/19 12:01:02 1.5
+++ pgtable.c 2001/07/31 17:28:26 1.6
@@ -1,13 +1,15 @@
/*
* $Id$
*
- * handle bits of VAX memory management
+ * handle bits of VAX memory management
+ * atp 2000
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
/* Note the factor of 8 in the length registers */
void set_page_dir(struct task_struct * tsk, pgd_t * pgdir)
@@ -22,10 +24,7 @@
/* This doesnt sound like a great idea... perhaps setipl(31) would
be a good idea here */
if (tsk == current) {
- __mtpr(tsk->thread.pcb.p0br, PR_P0BR );
- __mtpr(tsk->thread.pcb.p0lr, PR_P0LR );
- __mtpr(tsk->thread.pcb.p1br, PR_P1BR );
- __mtpr(tsk->thread.pcb.p1lr, PR_P1LR );
+ set_vaxmm_regs(pgdir);
flush_tlb_all();
}
}
|
|
From: Andy P. <at...@us...> - 2001-07-31 17:28:30
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax In directory usw-pr-cvs1:/tmp/cvs-serv32129 Modified Files: Makefile defconfig Log Message: New mm layer, start of signals implementation + other misc fixes Index: Makefile =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/Makefile,v retrieving revision 1.8 retrieving revision 1.9 diff -u -r1.8 -r1.9 --- Makefile 2001/06/24 19:34:56 1.8 +++ Makefile 2001/07/31 17:28:26 1.9 @@ -63,9 +63,11 @@ dd if=/dev/zero of=header.bin bs=512 count=1 mv -f header.bin vmlinux.SYS cat vmlinux.bin >> vmlinux.SYS + # this is helpful for low level debuggery -# rm -f vmlinux.lst -# $(OBJDUMP) -D vmlinux > vmlinux.lst +listfile: + rm -f vmlinux.lst + $(OBJDUMP) -D vmlinux > vmlinux.lst # This target will only re-compile stuff that's changed in arch/vax mopbootx: linkonly Index: defconfig =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/defconfig,v retrieving revision 1.6 retrieving revision 1.7 diff -u -r1.6 -r1.7 --- defconfig 2001/02/22 22:15:41 1.6 +++ defconfig 2001/07/31 17:28:26 1.7 @@ -218,8 +218,8 @@ # # Kernel hacking -# -CONFIG_DEBUG_MALLOC=y +# This appears to be missing bits... +#CONFIG_DEBUG_MALLOC=n CONFIG_MAGIC_SYSRQ=y CONFIG_PROFILE=y CONFIG_PROFILE_SHIFT=2 |
|
From: Andy P. <at...@us...> - 2001-07-31 17:28:30
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/boot
In directory usw-pr-cvs1:/tmp/cvs-serv32129/boot
Modified Files:
head.S tmp_init.c
Log Message:
New mm layer, start of signals implementation + other misc fixes
Index: head.S
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/boot/head.S,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -r1.11 -r1.12
--- head.S 2001/07/02 12:09:24 1.11
+++ head.S 2001/07/31 17:28:26 1.12
@@ -198,13 +198,17 @@
# PAGE_OFFSET here is 0x80000000 - the start of system space.
#
# swapper_pg_dir is actually a pgd_t. The spt is the third entry.
-# p0br/p0lr, p1br/p1lr, sbr/slr
+# see include/asm-vax/mm/pagelet.h for details of the pgd_t structure.
+#
# First find a suitable start position for the SPT. This must be
# longword aligned
- addl3 $4, r9, r5 # R9 holds kernel end
- bicl2 $3, r5 # R5 is R9 rounded up to longword aligned
+#
+# Correction, for 3100/85 it needs to be page aligned.
+
+ addl3 $0x200, r9, r5 # R9 holds kernel end
+ bicl2 $0x1ff, r5 # R5 is R9 rounded up to page aligned
moval swapper_pg_dir, r0
- movl r5, 16(r0) # save address of base of system page table
+ movl r5, 48(r0) # save address of base of system page table
# Fill in the main part of the SPT (the entries that map physical
# memory)
@@ -253,8 +257,8 @@
blssu sparefill2
# system page table is setup. Save SPT length and zap processor registers
moval swapper_pg_dir, r0
- movl r7, 20(r0)
- mtpr 16(r0), $PR_SBR # set SBR
+ movl r7, 52(r0)
+ mtpr 48(r0), $PR_SBR # set SBR
mtpr r7, $PR_SLR # set SLR
# PCBB
# set up the process control block. Some machines need a valid PCB for
@@ -273,7 +277,7 @@
calls $0, VAX_start_mm # do that ole black magic
# made it
moval swapper_pg_dir, r0
- addl2 $PAGE_OFFSET, 16(r0) # fix up our reference to the system page tbl.
+ addl2 $PAGE_OFFSET, 48(r0) # fix up our reference to the system page tbl.
addl2 $PAGE_OFFSET, iomap_base # ... and the IOMAP PTEs
addl2 $PAGE_OFFSET, vmallocmap_base # ... and the IOMAP PTEs
addl2 $PAGE_OFFSET, mv # fix up machine vector pointer
Index: tmp_init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/boot/tmp_init.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- tmp_init.c 2001/03/04 23:45:50 1.9
+++ tmp_init.c 2001/07/31 17:28:26 1.10
@@ -31,7 +31,7 @@
#include <asm/bugs.h>
-extern void init_IRQ(void);
+/* extern void init_IRQ(void);
extern void sched_init(void);
extern void time_init(void);
extern void calibrate_delay(void);
@@ -60,13 +60,15 @@
extern void fork_init(unsigned long);
extern void cpu_idle(void);
-
+*/
+
/* stuff that is declared in head.S */
extern unsigned long int phys_start; /* physical address of kernel*/
extern unsigned long int virt_start; /* virtual address of kernel */
extern unsigned long int boot_ap; /* argument pointer */
extern unsigned long int boot_r11; /* rpb pointer */
extern unsigned long int boot_scb; /* scb pointer */
+extern unsigned long int iomap_base;
/* head.S copies the RPB into this structure */
struct rpb_struct boot_rpb;
@@ -104,6 +106,9 @@
* bit of arch-specific C code before starting the main start_kernel
*/
+#define IOMAP_START (PAGE_OFFSET+((iomap_base-swapper_pg_dir[2].br)<<(PAGELET_SHIFT-2)))
+
+
void vax_start_kernel(void)
{
/* set the number of 4k pages */
@@ -141,168 +146,12 @@
max_hwpfn, max_pfn, max_hwpfn/2);
printk("CPU type: %s, SID: %08x\n", mv->cpu_type_str(), vax_cpu.sid);
-
+
+ printk("VM: mapped physical from %x to %x, iomap from %x\n", PAGE_OFFSET, PAGE_OFFSET+(max_hwpfn*512), IOMAP_START);
+ printk("VM: vmalloc from %x to %x\n", VMALLOC_START, VMALLOC_END);
+ printk("VM: ptemap from %x to %x for %d processes\n",TASKPTE_START, TASKPTE_END,TASK_MAXUPRC);
printk("calling start_kernel...\n");
start_kernel();
-}
-
-void tmp_start_kernel(void)
-{
- char * command_line;
- unsigned long mempages;
-
-/*
- * Interrupts are still disabled. Do necessary setups, then
- * enable them
- */
- /* set the number of 4k pages */
- max_pfn = max_hwpfn/8;
-
- /* Protect us from interrupt stack overflows */
- guard_int_stack();
-
- /* If it is possible to register a console for your
- machine at this point in the boot sequence, do so
- in post_vm_init(). Otherwise, implement mv->console_init()
- which will be called later. */
-
- mv->post_vm_init();
-
- #ifdef __SMP__
- static int boot_cpu = 1;
- /* "current" has been set up, we need to load it now */
- if (!boot_cpu)
- initialize_secondary();
- boot_cpu = 0;
- #endif
-
- /*
- * Interrupts are still disabled. Do necessary setups, then
- * enable them
- */
- lock_kernel();
- printk(linux_banner);
-
- printk("RPB info: l_pfncnt: %08x, .l_vmb_version: %08x .l_badpgs: %08x\n",
- boot_rpb.l_pfncnt, boot_rpb.l_vmb_version, boot_rpb.l_badpgs);
-
- printk("Physical memory: %08x HW pagelets, %08lx pages (%dKB)\n",
- max_hwpfn, max_pfn, max_hwpfn/2);
-
- setup_arch(&command_line);
-
- printk("CPU type: %s, SID: %08x\n", mv->cpu_type_str(), vax_cpu.sid);
-
- trap_init();
- init_IRQ();
- sched_init();
- time_init();
- softirq_init();
-
- console_init();
-
- kmem_cache_init();
-
- printk("Dropping IPL to 0 to allow clock interrupts\n");
-
- sti();
-
- calibrate_delay();
-
- /* mem_init finishes up memory init. It places all remaining
- memory on the free list */
-
- mem_init();
- kmem_cache_sizes_init();
-
-#ifdef CONFIG_PROC_FS
- proc_root_init();
-#endif
-
- mempages = num_physpages;
-
- fork_init(mempages);
- proc_caches_init();
- vfs_caches_init(mempages);
- buffer_init(mempages);
- page_cache_init(mempages);
- kiobuf_setup();
- signals_init();
- inode_init(mempages);
-
-#if defined(CONFIG_SYSVIPC)
- ipc_init();
-#endif
-#if defined(CONFIG_QUOTA)
- dquot_init_hash();
-#endif
-
- check_bugs();
- printk("POSIX conformance testing by UNIFIX\n");
-
- kernel_thread(tmp_init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
-
- unlock_kernel();
-
- current->need_resched = 1;
- cpu_idle();
-
- printk("\nThis is as far as we've got... - halting\n");
- machine_halt();
-}
-
-static void __init do_initcalls(void)
-{
- initcall_t *call;
-
- call = &__initcall_start;
- do {
- (*call)();
- call++;
- } while (call < &__initcall_end);
-
- /* Make sure there is no pending stuff from the initcall sequence */
- flush_scheduled_tasks();
-}
-
-static void tmp_do_basic_setup(void)
-{
- /*
- * Tell the world that we're going to be the grim
- * reaper of innocent orphaned children.
- *
- * We don't want people to have to make incorrect
- * assumptions about where in the task array this
- * can be found.
- */
- child_reaper = current;
-
- if (mv->init_devices) {
- mv->init_devices();
- }
-
-#if defined(CONFIG_NET)
- sock_init();
-#endif
-
- start_context_thread();
- do_initcalls();
-
- /* .. filesystems .. */
- filesystem_setup();
-
- /* Mount the root filesystem.. */
- mount_root();
-
-}
-
-static int tmp_init(void * unused)
-{
- printk("tmp_init: in init thread\n");
-
- tmp_do_basic_setup();
-
- panic("No init found.");
}
|
|
From: Dave A. <ai...@us...> - 2001-07-21 11:58:56
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm In directory usw-pr-cvs1:/tmp/cvs-serv1555 Modified Files: pgtable.h Log Message: DA: SPT_SIZE define is wrong.. or we use it wrong in setup.c .. I fixed it here.. others might like to fix setup.c .. not really sure which is the right way... Index: pgtable.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgtable.h,v retrieving revision 1.12 retrieving revision 1.13 diff -u -r1.12 -r1.13 --- pgtable.h 2001/06/10 10:20:42 1.12 +++ pgtable.h 2001/07/21 11:58:51 1.13 @@ -42,13 +42,14 @@ /* entries is (1024 * 1024) >> PAGELET_SIZE */ #define SPT_HWPTES_IOMAP (SPT_MAX_IOMAP<<1) #define SPT_PTES_IOMAP (SPT_MAX_IOMAP >> 2) - /*/>> (PAGE_SHIFT-10)) */ + /*>> (PAGE_SHIFT-10)) */ /* FIXME: (PAGE_SHIFT-10) is hardwired here to 2. asm bug in head.S */ #define SPT_HWPTES_VMALLOC (SPT_MAX_VMALLOC << 1) #define SPT_PTES_VMALLOC (SPT_MAX_VMALLOC >> 2) #define SPT_BASE ((unsigned long)( (swapper_pg_dir[2]).br )) -#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr )) +/* Length register is in words.. shift left 2 to get bytes */ +#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2) /* I'm not sure these are ok. I've only tested the results of * These in the interrupt guard page routine in arch/vax/kernel/interrupt.c |
|
From: Andy P. <at...@us...> - 2001-07-02 12:09:27
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/boot
In directory usw-pr-cvs1:/tmp/cvs-serv28164
Modified Files:
head.S
Log Message:
PCB initialisation code prior to VM start.
Index: head.S
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/boot/head.S,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- head.S 2001/06/24 19:34:56 1.10
+++ head.S 2001/07/02 12:09:24 1.11
@@ -3,6 +3,10 @@
# start of boot. entry point
# this assumes vmb has does most of the hard work (ie uvax rom vmb)
# save useful registers. jump to c in boot.c
+#
+# TBD: Some of this stuff could do with being rewritten in C
+# Some of this stuff could be in .init sections and thrown away.
+#
#include <asm/mtpr.h> /* Processor register definitions */
#include <asm/mv.h> /* machine vector definitions */
@@ -252,11 +256,21 @@
movl r7, 20(r0)
mtpr 16(r0), $PR_SBR # set SBR
mtpr r7, $PR_SLR # set SLR
+# PCBB
+# set up the process control block. Some machines need a valid PCB for
+# mm to work properly.
+# We should use the pcb for the init task for this, but Since this bit
+# should be done in C, rather than hardwiring offsets, I have put a fake
+# PCB in a throwaway .init section below.
+ moval fake_pcb,r9
+ movl $PAGE_OFFSET,88(r9) # p1br
+ mtpr r9, $PR_PCBB
# no need to TBIA - memory mapping not enabled
+# ready to turn on VM
moval msg_starting_vm, r10
jsb printstr
jsb crlf
- calls $0, VAX_start_mm
+ calls $0, VAX_start_mm # do that ole black magic
# made it
moval swapper_pg_dir, r0
addl2 $PAGE_OFFSET, 16(r0) # fix up our reference to the system page tbl.
@@ -371,3 +385,8 @@
cpu_type: .int 0x00000000
prom_sidex: .int 0x00000000
+# our dummy pcb
+.section .data.init
+.globl fake_pcb
+fake_pcb: .fill 24,4,0x00000000
+
|
|
From: Andy P. <at...@us...> - 2001-06-27 09:17:45
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv21707
Added Files:
pgalloc.c
Log Message:
missing pgalloc.c from memory reorganisation
--- NEW FILE ---
/* $Id: pgalloc.c,v 1.1 2001/06/27 09:17:41 atp Exp $
*
* pgalloc.c Routines from include/asm-vax/mm/pgalloc.h
* Allocation of page table entries and so forth.
*
* Copyright atp Jun 2001
* GNU GPL
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
/* misc comments FIXME: sort and discard */
/* It is called from mm/vmalloc.c in alloc_area_pmd()
*
* It needs to be physically contiguous with the rest of the
* system page table.
* Things to consider:
* If you want to allocate a pagetable to hold a pte for a given
* new S0 address, (where this address is higher than SBR+SLR) then that
* new page table page must be allocated at the exact physical page
* That maps that S0 address. I.e.
* To map a virtual address X you need to allocate the physical
* page containing the address
*
* PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
* PTE address (physical memory) = (PFN*4)+SBR
* Physical page address = (PTE address) & ~(PAGE_MASK)
* SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
*
*
* If that physical page is already occupied, the contents must
* be ejected. This takes time, and can lead to deadlock, particularly
* if a dirty page needs to be written to disk/swap.
* Also, any physical pages that are in between the previous end of the
* system page table, and the new end (SBR+SLR) will need to be cleared,
* otherwise random rubbish will end up in the system page table.
* One way to do this by "locking up the machine", moving the contents
* of the physical pages needed to pages on the freelist, rewriting the PTE's
* to point at the new physical pages, and then allocating and expanding
* the system page table. No scheduling allowed. Also how you locate all
* of the references to a given physical page so that you can rewrite them
* without conducting a thorough search of all page tables (possibly
* incurring page faults for those P0 page tables that have been swapped out)
* is not clear.
*
*
* - At present I'm ducking this. We fix the S0 page table size at
* boot time, and disallow dynamic expansion. atp Feb 2001.
*
* - we still need to implement this ... linux still calls it ..
* - D.A. May 2001.
*/
pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
pgd_t *pgdptr = (pgd_t *)pmd;
/* note the lr in the system pgd is in PAGELETS.... shift it down to
give page view */
if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
return pte_offset(pmd, address);
else
return NULL;
}
/*
* allocate a page, to hold page table entries.
* for a user process.
* We grab a random page. The only catch is that it must be virtually
* contiguous within the P0 or P1 page tables, which are held in S0
* space. So, we remap the page table area in S0 space too.
*/
pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
unsigned long pte_number, pte_page, pte_page_offset;
pgd_t *pgdptr = (pgd_t *)pmd;
pte_t *newpte= NULL;
printk("pte_alloc: address %ld\n", address);
/* Deal with P0 vs P1 spaces */
/* need to handle error cases */
if (address < 0x40000000)
{
pte_number = (address >> PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
if ((pte_number) < (pgdptr->lr)) {
newpte = pte_offset(pmd, address);
}
}
else
{
address-=0x40000000;
pte_number = (address>>PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
if ((pte_number) > (pgdptr->lr)) {
newpte = pte_offset(pmd, address);
}
}
if (newpte)
{
remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
/* make sure a page in S0 space is mapped */
}
return newpte;
/* old 2.2 code commented out for now .. in case it is of any use
to anyone later - D.A. May 2001 */
#if 0
/* calculate the offset of the requested pte in this pagetable page */
unsigned long pte_number, pte_page, pte_page_offset;
pgd_t *pgdptr = (pgd_t *)pmd;
unsigned long t2;
pte_t *page;
pte_number = (address >> PAGE_SHIFT);
pte_page = (pte_number >> SIZEOF_PTE_LOG2);
pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
/* do we have a pgd base and length set ? */
/* The p0br and p1br should be setup at process initialisation. */
if (pmd_none(*pmd)) {
printk("Got pmd_none\n");
return NULL;
}
/* do we need to allocate another page(s) */
/* this is already inside the page table region, and allocated */
/* return the virtual address of the pte. (base registers for p0 and p1 */
/* refer to virtual addresses in S0 space) so no _va() is needed */
if (pte_number < (pgdptr->lr)) {
return (pte_t *) (pgdptr->br + pte_number*BYTES_PER_PTE_T);
}
/* The address lies outside the current page table - by how much?*/
/* FIXME: Maximum task size, defined by max p0 pagetable size */
/* number of pages to allocate */
t2 = ((pte_number - pgdptr->lr) >> SIZEOF_PTE_LOG2) + 1 ;
while (t2--) {
/* grab a page off the quicklist */
page = get_pte_fast();
/* or allocate a new one if none left */
if (!page) page = get_pte_slow(pmd, address);
/* run out of pages - out of memory */
/* FIXME: is there anything else we need to do to signal failure?*/
if (!page) {
printk("%s:%d: run out of free pages building page table at pte %08lx.\n", __FILE__, __LINE__, pgdptr->lr);
return NULL;
}
/* map this page into the S0 page table at the right point */
remap_and_clear_pte_page((pgd_t *)pmd, page, pte_page);
/* add this page of PTEs to the length register */
/* FIXME: handle reverse P1 region... */
pgdptr->lr += PTRS_PER_PTE;
}
return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
#endif /* if 0 */
}
|
|
From: Andy P. <at...@us...> - 2001-06-26 19:01:16
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax
In directory usw-pr-cvs1:/tmp/cvs-serv19188
Modified Files:
mv.h ka43.h scb.h
Log Message:
machine check implementation
Index: mv.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mv.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- mv.h 2001/01/17 16:18:52 1.1
+++ mv.h 2001/06/26 19:01:11 1.2
@@ -30,6 +30,8 @@
void (*reboot)(void); /* If implemented, these are called from */
void (*halt)(void); /* machine_restart/_halt */
+
+ void (*mcheck)(void *); /* machine check handler */
void (*init_devices)(void); /* Optional */
@@ -51,8 +53,9 @@
#define MV_CONSOLE_INIT 24
#define MV_REBOOT 28
#define MV_HALT 32
-#define MV_INIT_DEVICES 36
-#define MV_CPU_TYPE_STR 40
+#define MV_MCHECK 36
+#define MV_INIT_DEVICES 40
+#define MV_CPU_TYPE_STR 44
#ifndef __ASSEMBLY__
Index: ka43.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/ka43.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- ka43.h 2001/03/04 23:53:25 1.2
+++ ka43.h 2001/06/26 19:01:11 1.3
@@ -5,6 +5,8 @@
*
* Taken from NetBSD
*
+ * atp. jun 01 machine check stuff lifted from NetBSD. Thanks ragge!.
+ *
*/
#ifndef __VAX_KA43_H
@@ -114,6 +116,45 @@
unsigned short pad7;
unsigned short diagtme; /* Diagnostic time register */
};
+
+struct ka43_mcframe { /* Format of KA43 machine check frame: */
+ int mc43_bcnt; /* byte count, always 24 (0x18) */
+ int mc43_code; /* machine check type code and restart bit */
+ int mc43_addr; /* most recent (faulting?) virtual address */
+ int mc43_viba; /* contents of VIBA register */
+ int mc43_sisr; /* ICCS bit 6 and SISR bits 15:0 */
+ int mc43_istate; /* internal state */
+ int mc43_sc; /* shift count register */
+ int mc43_pc; /* trapped PC */
+ int mc43_psl; /* trapped PSL */
+};
+
+#define KA43_MC_RESTART 0x00008000 /* Restart possible*/
+#define KA43_PSL_FPDONE 0x00010000 /* First Part Done */
+
+static char *ka43_mctype[] = {
+ "no error (0)", /* Code 0: No error */
+ "FPA: protocol error", /* Code 1-5: FPA errors */
+ "FPA: illegal opcode",
+ "FPA: operand parity error",
+ "FPA: unknown status",
+ "FPA: result parity error",
+ "unused (6)", /* Code 6-7: Unused */
+ "unused (7)",
+ "MMU error (TLB miss)", /* Code 8-9: MMU errors */
+ "MMU error (TLB hit)",
+ "HW interrupt at unused IPL", /* Code 10: Interrupt error */
+ "MOVCx impossible state", /* Code 11-13: Microcode errors */
+ "undefined trap code (i-box)",
+ "undefined control store address",
+ "unused (14)", /* Code 14-15: Unused */
+ "unused (15)",
+ "PC tag or data parity error", /* Code 16: Cache error */
+ "data bus parity error", /* Code 17: Read error */
+ "data bus error (NXM)", /* Code 18: Write error */
+ "undefined data bus state", /* Code 19: Bus error */
+};
+#define MC43_MAX 19
#endif /* __ASSEMBLY */
Index: scb.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/scb.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- scb.h 2001/01/17 16:18:52 1.1
+++ scb.h 2001/06/26 19:01:11 1.2
@@ -74,6 +74,7 @@
#define SCB_CHMU 0x13
#define SCB_BUSERR1 0x14
#define SCB_BUSERR2 0x15
+#define SCB_MEMCORR 0x15 /* corrected read memory handler */
#define SCB_BUSERR3 0x16
#define SCB_BUSERR4 0x17
#define SCB_BUSERR5 0x18
|
|
From: Andy P. <at...@us...> - 2001-06-26 19:01:16
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv19188/mm
Modified Files:
pgalloc.h
Log Message:
machine check implementation
Index: pgalloc.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgalloc.h,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- pgalloc.h 2001/06/17 12:39:19 1.7
+++ pgalloc.h 2001/06/26 19:01:11 1.8
@@ -166,162 +166,10 @@
#define pgd_free(pgd) free_pgd_slow(pgd)
#define pgd_alloc() get_pgd_fast()
+/* atp jun 01, moved these to arch/vax/mm/pgalloc.c */
/* Allocate a new page for a page table for the kernel */
-/* It is called from mm/vmalloc.c in alloc_area_pmd()
- *
- * It needs to be physically contiguous with the rest of the
- * system page table.
- * Things to consider:
- * If you want to allocate a pagetable to hold a pte for a given
- * new S0 address, (where this address is higher than SBR+SLR) then that
- * new page table page must be allocated at the exact physical page
- * That maps that S0 address. I.e.
- * To map a virtual address X you need to allocate the physical
- * page containing the address
- *
- * PFN = ( (X-PAGE_OFFSET) >> PAGE_SHIFT)
- * PTE address (physical memory) = (PFN*4)+SBR
- * Physical page address = (PTE address) & ~(PAGE_MASK)
- * SLR = ((Physical page address + (1<<PAGE_SHIFT)) - SBR) / 4.
- *
- *
- * If that physical page is already occupied, the contents must
- * be ejected. This takes time, and can lead to deadlock, particularly
- * if a dirty page needs to be written to disk/swap.
- * Also, any physical pages that are in between the previous end of the
- * system page table, and the new end (SBR+SLR) will need to be cleared,
- * otherwise random rubbish will end up in the system page table.
- * One way to do this by "locking up the machine", moving the contents
- * of the physical pages needed to pages on the freelist, rewriting the PTE's
- * to point at the new physical pages, and then allocating and expanding
- * the system page table. No scheduling allowed. Also how you locate all
- * of the references to a given physical page so that you can rewrite them
- * without conducting a thorough search of all page tables (possibly
- * incurring page faults for those P0 page tables that have been swapped out)
- * is not clear.
- *
- *
- * - At present I'm ducking this. We fix the S0 page table size at
- * boot time, and disallow dynamic expansion. atp Feb 2001.
- *
- * - we still need to implement this ... linux still calls it ..
- * - D.A. May 2001.
- */
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- pgd_t *pgdptr = (pgd_t *)pmd;
-
- /* note the lr in the system pgd is in PAGELETS.... shift it down to
- give page view */
- if ((address >> PAGE_SHIFT) < (pgdptr->lr>>3))
- return pte_offset(pmd, address);
- else
- return NULL;
-}
-
-/*
- * allocate a page, to hold page table entries.
- * for a user process.
- * We grab a random page. The only catch is that it must be virtually
- * contiguous within the P0 or P1 page tables, which are held in S0
- * space. So, we remap the page table area in S0 space too.
- */
-extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
-{
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- pte_t *newpte= NULL;
-
- /* Deal with P0 vs P1 spaces */
- /* need to handle error cases */
- if (address < 0x40000000)
- {
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) < (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
- }
- }
- else
- {
- address-=0x40000000;
- pte_number = (address>>PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
- if ((pte_number) > (pgdptr->lr)) {
- newpte = pte_offset(pmd, address);
- }
-
- }
-
- if (newpte)
- {
- remap_and_clear_pte_page((pgd_t *)pmd, newpte, pte_page);
- /* make sure a page in S0 space is mapped */
-
- }
- return newpte;
-
- /* old 2.2 code commented out for now .. in case it is of any use
- to anyone later - D.A. May 2001 */
-#if 0
- /* calculate the offset of the requested pte in this pagetable page */
- unsigned long pte_number, pte_page, pte_page_offset;
- pgd_t *pgdptr = (pgd_t *)pmd;
- unsigned long t2;
- pte_t *page;
-
- pte_number = (address >> PAGE_SHIFT);
- pte_page = (pte_number >> SIZEOF_PTE_LOG2);
- pte_page_offset = pte_number & (PTRS_PER_PTE - 1);
-
- /* do we have a pgd base and length set ? */
- /* The p0br and p1br should be setup at process initialisation. */
- if (pmd_none(*pmd)) {
- printk("Got pmd_none\n");
- return NULL;
- }
-
- /* do we need to allocate another page(s) */
-
-
- /* this is already inside the page table region, and allocated */
- /* return the virtual address of the pte. (base registers for p0 and p1 */
- /* refer to virtual addresses in S0 space) so no _va() is needed */
- if (pte_number < (pgdptr->lr)) {
- return (pte_t *) (pgdptr->br + pte_number*BYTES_PER_PTE_T);
- }
-
- /* The address lies outside the current page table - by how much?*/
- /* FIXME: Maximum task size, defined by max p0 pagetable size */
-
- /* number of pages to allocate */
- t2 = ((pte_number - pgdptr->lr) >> SIZEOF_PTE_LOG2) + 1 ;
-
- while (t2--) {
- /* grab a page off the quicklist */
- page = get_pte_fast();
- /* or allocate a new one if none left */
- if (!page) page = get_pte_slow(pmd, address);
-
- /* run out of pages - out of memory */
- /* FIXME: is there anything else we need to do to signal failure?*/
- if (!page) {
- printk("%s:%d: run out of free pages building page table at pte %08lx.\n", __FILE__, __LINE__, pgdptr->lr);
- return NULL;
- }
- /* map this page into the S0 page table at the right point */
- remap_and_clear_pte_page((pgd_t *)pmd, page, pte_page);
-
- /* add this page of PTEs to the length register */
- /* FIXME: handle reverse P1 region... */
- pgdptr->lr += PTRS_PER_PTE;
- }
- return (pte_t *)( pgdptr->br + pte_number*BYTES_PER_PTE_T);
-#endif /* if 0 */
-}
-
+extern pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+extern pte_t *pte_alloc(pmd_t *pmd, unsigned long address);
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
|
From: Andy P. <at...@us...> - 2001-06-26 18:59:04
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv18447/mm
Modified Files:
Makefile fault.c init.c
Log Message:
machine check fixes. separate pgalloc.h routines into arch/vax/mm/pgalloc.c
Index: Makefile
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/Makefile,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- Makefile 2001/03/04 23:44:43 1.3
+++ Makefile 2001/06/26 18:59:00 1.4
@@ -12,7 +12,7 @@
all: mm.o
O_TARGET := mm.o
-obj-y := init.o pgtable.o ioremap.o extable.o fault.o
+obj-y := init.o pgtable.o pgalloc.o ioremap.o extable.o fault.o
OX_OBJS :=
MX_OBJS :=
Index: fault.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/fault.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- fault.c 2001/05/20 13:25:11 1.5
+++ fault.c 2001/06/26 18:59:00 1.6
@@ -45,6 +45,7 @@
#define REASON_PPTEREF (1<<1)
#define REASON_WRITE (1<<2)
+#undef VAX_MM_DEBUG
static void
do_page_fault(struct accvio_info *info, struct pt_regs *regs)
@@ -56,7 +57,9 @@
struct mm_struct *mm = NULL;
unsigned fixup;
+#if VAX_MM_DEBUG
printk("mmfault: fault at %8X\n", address);
+#endif
/* This check, and the mm != NULL checks later, will be removed
later, once we actually have a 'current' properly defined */
if (tsk != NULL) {
@@ -215,7 +218,9 @@
if (!active) {
/* active = 1;*/
do_page_fault(info, regs);
+#if VAX_MM_DEBUG
printk("finished fault\n");
+#endif
active = 0;
} else {
Index: init.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/mm/init.c,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- init.c 2001/06/16 19:41:44 1.13
+++ init.c 2001/06/26 18:59:00 1.14
@@ -14,6 +14,8 @@
#include <asm/pgalloc.h>
#include <asm/rpb.h>
+#undef VAX_INIT_DEBUG
+
static unsigned long totalram_pages;
unsigned long max_pfn; /* number of 4k pfns */
@@ -148,9 +150,10 @@
S0pte = pte_offset(sys_pgd, page_physical_address);
/* S0pte = (pte_t *)((__pa(page) >> PAGELET_SHIFT)+sys_pgd->br);*/
-
- printk("remap: virt addr %p, pteval %8lX , S0pte %p, %8lX\n", page, pte_val(*page), S0pte, pte_val(*S0pte));
+#if VAX_INIT_DEBUG
+ printk("remap: virt addr %p, pteval %8lX , S0pte %p, %8lX\n", page, pte_val(*page), S0pte, pte_val(*S0pte));
+#endif
if (!pte_present(*S0pte))
{
unsigned long phy_addr;
|
|
From: Andy P. <at...@us...> - 2001-06-26 18:59:04
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/boot In directory usw-pr-cvs1:/tmp/cvs-serv18447/boot Modified Files: mmstart.S Log Message: machine check fixes. separate pgalloc.h routines into arch/vax/mm/pgalloc.c Index: mmstart.S =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/boot/mmstart.S,v retrieving revision 1.1 retrieving revision 1.2 diff -u -r1.1 -r1.2 --- mmstart.S 2001/01/17 16:13:57 1.1 +++ mmstart.S 2001/06/26 18:59:00 1.2 @@ -68,6 +68,7 @@ mtpr $PAGE_OFFSET, $PR_P0BR # Clear out P0BR, P0LR - do not mtpr $0, $PR_P0LR # need them anymore + mtpr $0, $PR_TBIA # clear tlb after touching BRs. ret .globl PK_str1 |
|
From: Andy P. <at...@us...> - 2001-06-26 18:59:04
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv18447/kernel
Modified Files:
cpu_generic.c cpu_ka410.c cpu_ka42.c cpu_ka43.c cpu_ka46.c
cpu_ka55.c cpu_ka630.c cpu_ka640.c cpu_ka650.c cpu_ka660.c
entry.S interrupt.c interrupt.h reboot.c setup.c
Log Message:
machine check fixes. separate pgalloc.h routines into arch/vax/mm/pgalloc.c
Index: cpu_generic.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_generic.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_generic.c 2001/01/17 16:13:57 1.1
+++ cpu_generic.c 2001/06/26 18:59:00 1.2
@@ -11,6 +11,19 @@
* Stuff that is specific to a given CPU can be found in cpu_XXX.c
*/
+/*
+ * Generic reboot and halt functions are in reboot.c
+ * CPUs that need to do special stuff in their halt and reboot functions
+ * should point to their own functions in their machine vector,
+ * otherwise they can leave NULL in the machine vector slots for these
+ * functions
+ *
+ * atp. This holds for machine check functions too. Leave a NULL if you
+ * just want a halt instruction on receipt of a machine check.
+ * See VARM Chapter 5 for details on machine check frames.
+ */
+
+
#include <asm/dz11.h>
#include <asm/io.h> /* For ioremap() */
#include <asm/mtpr.h>
@@ -71,6 +84,7 @@
: "r2" );
}
+
int ka46_48_49_prom_getchar(void)
{
/* Not yet implemented */
@@ -102,6 +116,7 @@
only be used after VM is enabled and the DZ11 registers have been
mapped by map_dz11_regs(). */
+
/* This gets set to non-NULL once the I/O page has been mapped */
volatile struct dz11_regs *dz11_addr = NULL;
@@ -109,6 +124,7 @@
console. Normally it is line 3 */
static unsigned int dz11_line;
+
/* stuff a char out of a DZ11-compatible serial chip */
void dz11_putchar(int c)
{
@@ -148,6 +164,7 @@
while ( ((txcs = dz11_addr->csr ) & DZ11_CSR_TRDY) == 0) ;
}
+
int dz11_getchar(void)
{
/* Not yet implemented */
@@ -164,11 +181,5 @@
dz11_addr = ioremap(dz11_phys_addr, sizeof(*dz11_addr));
}
-
-/*
- * Generic reboot and halt functions. CPUs that need to do special stuff
- * here should point to their own functions in their machine vector,
- * otherwise they can leave NULL in the machine vector slots for these
- * functions
- */
+
Index: cpu_ka410.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka410.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_ka410.c 2001/01/17 16:13:57 1.1
+++ cpu_ka410.c 2001/06/26 18:59:00 1.2
@@ -38,6 +38,8 @@
NULL, /* reboot */
NULL, /* halt */
+
+ NULL, /* mcheck - machine check */
NULL, /* init_devices */
Index: cpu_ka42.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka42.c,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- cpu_ka42.c 2001/02/22 22:36:42 1.4
+++ cpu_ka42.c 2001/06/26 18:59:00 1.5
@@ -43,6 +43,7 @@
NULL, /* reboot */
NULL, /* halt */
+ NULL, /* mcheck - machine check */
ka42_init_devices, /* init_devices */
Index: cpu_ka43.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka43.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- cpu_ka43.c 2001/03/04 23:50:30 1.5
+++ cpu_ka43.c 2001/06/26 18:59:00 1.6
@@ -9,6 +9,7 @@
* 2000/04/01 Mattias Nordlund
* Fixed the cache initializing, added the functions
* ka43_cache_disbale/enable/clear and moved some stuff around.
+ * atp jun 2001 - machine check implementation
*/
#include <linux/types.h> /* For NULL */
@@ -28,6 +29,7 @@
void ka43_cache_disable(volatile unsigned int *creg_addr);
void ka43_cache_clear(volatile unsigned int *ctag_addr);
void ka43_cache_enable(volatile unsigned int *creg_addr);
+void ka43_mcheck(void *stkframe);
void ka43_init_devices(void);
@@ -35,6 +37,14 @@
/* Internal CPU register space */
static volatile struct ka43_cpu_regs *cpu_regs;
+/* We keep the cache page remaps handy incase we want to reset the cache
+ - see the machine check etc..
+ - perhaps we should bung this in the mv too.
+ atp jun 01
+ */
+static volatile unsigned int *ka43_ctag_addr;
+static volatile unsigned int *ka43_creg_addr;
+
struct ka43_machine_vector {
struct vax_mv mv;
@@ -56,7 +66,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ ka43_mcheck, /* mcheck - machine check */
+
ka43_init_devices,
ka43_cpu_type_str
@@ -73,18 +84,16 @@
mv_ka43.sidex = *(unsigned int *)RIGEL_SIDEX_ADDR;
}
+/* dont call ka43_cache_reset before this function (unlikely) */
void ka43_post_vm_init(void)
{
- volatile unsigned int *ctag_addr;
- volatile unsigned int *creg_addr;
-
init_dz11_console(0x200A0000, 3);
dz_serial_console_init(0, 0);
cpu_regs = ioremap(KA43_CPU_BASE, KA43_CPU_SIZE);
- creg_addr = ioremap(KA43_CH2_CREG, 1);
- ctag_addr = ioremap(KA43_CT2_BASE, KA43_CT2_SIZE);
+ ka43_creg_addr = ioremap(KA43_CH2_CREG, 1);
+ ka43_ctag_addr = ioremap(KA43_CT2_BASE, KA43_CT2_SIZE);
/* Disable parity on DMA and CPU memory accesses. Don't know what the
story is with this, but VMS seems do this too... */
@@ -93,13 +102,10 @@
/*
* Resetting the cache involves disabling it, then clear it and enable again.
*/
- ka43_cache_disable(creg_addr);
- ka43_cache_clear(ctag_addr);
- ka43_cache_enable(creg_addr);
-
- /* Don't need these mappings any more */
- iounmap((void *)ctag_addr);
- iounmap((void *)creg_addr);
+ ka43_cache_disable(ka43_creg_addr);
+ ka43_cache_clear(ka43_ctag_addr);
+ ka43_cache_enable(ka43_creg_addr);
+
}
void ka43_cache_disable(volatile unsigned int *creg_addr)
@@ -148,6 +154,16 @@
}
}
+void ka43_cache_reset(void)
+{
+ /*
+ * Resetting the cache involves disabling it, then clear it and enable again.
+ */
+ ka43_cache_disable(ka43_creg_addr);
+ ka43_cache_clear(ka43_ctag_addr);
+ ka43_cache_enable(ka43_creg_addr);
+}
+
void ka43_init_devices(void)
{
#ifdef CONFIG_VSBUS
@@ -161,4 +177,29 @@
return "KA43";
}
+/* if this seems very similar to the netbsd implementation, then
+ * it is. After all how many ways can you check a sequence of flags?
+ */
+void ka43_mcheck(void *stkframe)
+{
+ /* map the frame to the stack */
+ struct ka43_mcframe *ka43frame = (struct ka43_mcframe *)stkframe;
+
+ /* tell us all about it */
+ printk("KA43: machine check %d (0x%x)\n", ka43frame->mc43_code, ka43frame->mc43_code);
+ printk("KA43: reason: %s\n", ka43_mctype[ka43frame->mc43_code & 0xff]);
+
+ /* fixme check restart and first part done flags */
+
+ if ((ka43frame->mc43_code & KA43_MC_RESTART) ||
+ (ka43frame->mc43_psl & KA43_PSL_FPDONE)) {
+ printk("ka43_mchk: recovering from machine-check.\n");
+ ka43_cache_reset(); /* reset caches */
+ return; /* go on; */
+ }
+
+ /* Unknown error state, panic/halt the machine */
+ printk("KA43: Machine Check - unknown error state - halting\n");
+ machine_halt();
+}
Index: cpu_ka46.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka46.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- cpu_ka46.c 2001/02/22 22:36:42 1.5
+++ cpu_ka46.c 2001/06/26 18:59:00 1.6
@@ -48,7 +48,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ NULL, /* mcheck - machine check */
+
ka46_init_devices, /* init_devices */
ka46_cpu_type_str
Index: cpu_ka55.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka55.c,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- cpu_ka55.c 2001/02/11 23:47:25 1.3
+++ cpu_ka55.c 2001/06/26 18:59:00 1.4
@@ -46,7 +46,8 @@
NULL, /* halt */
NULL, /* init_devices */
-
+ NULL, /* mcheck - machine check */
+
ka55_cpu_type_str
},
0 /* System ID Extension from ROM */
Index: cpu_ka630.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka630.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_ka630.c 2001/01/17 16:13:57 1.1
+++ cpu_ka630.c 2001/06/26 18:59:00 1.2
@@ -39,7 +39,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ NULL, /* mcheck - machine check */
+
NULL, /* init_devices */
ka630_cpu_type_str
Index: cpu_ka640.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka640.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_ka640.c 2001/01/17 16:13:57 1.1
+++ cpu_ka640.c 2001/06/26 18:59:00 1.2
@@ -40,7 +40,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ NULL, /* mcheck - machine check */
+
NULL, /* init_devices */
ka640_cpu_type_str
Index: cpu_ka650.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka650.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_ka650.c 2001/01/17 16:13:57 1.1
+++ cpu_ka650.c 2001/06/26 18:59:00 1.2
@@ -40,7 +40,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ NULL, /* mcheck - machine check */
+
NULL, /* init_devices */
ka650_cpu_type_str
Index: cpu_ka660.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/cpu_ka660.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- cpu_ka660.c 2001/01/17 16:13:57 1.1
+++ cpu_ka660.c 2001/06/26 18:59:00 1.2
@@ -44,7 +44,8 @@
NULL, /* reboot */
NULL, /* halt */
-
+ NULL, /* mcheck - machine check */
+
NULL, /* init_devices */
ka660_cpu_type_str
Index: entry.S
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/entry.S,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- entry.S 2001/05/27 12:34:13 1.4
+++ entry.S 2001/06/26 18:59:00 1.5
@@ -5,6 +5,7 @@
Copyright Dec 1998 atp.
Copyright 2000, Kenn Humborg <ke...@li...>
+ 2001 atp. Additions for Machine check handling.
*/
#include <linux/sys.h>
@@ -12,7 +13,7 @@
/* irqvec_handler is the generic handler for all interrupts and
exceptions for which a driver (or other code) has registered
- a handler. We are responsible for
+ a handler. Except machine checks. We are responsible for
o saving all registers
@@ -28,6 +29,7 @@
See Documentation/vax/interrupts.txt for the gory details */
+
.globl irqvec_handler
irqvec_handler:
/* At this point stack looks like:
@@ -218,6 +220,34 @@
popr $0x3fff /* restore R0 to FP */
moval 4(sp), sp /* Remove handler_PC */
rei
+
+/* mcheck_handler is the handler for machine check exceptions.
+ This is here, because its easier to special case it, and deal with
+ the machine dependent number of longwords than warp the generic
+ registration methods to deal with it
+ atp jun 2001
+ */
+/* ensure longword alignment */
+.align 2
+.globl machine_check_handler
+machine_check_handler:
+/* Note this doesnt use the usual exception registration, as we dont
+ * know in advance how many longwords of exception info have been pushed.
+ *
+ * The interrupt stack at this point looks like this
+ * SP: longword count of exception info
+ * exception longwords
+ * :
+ * PC
+ * PSL
+ */
+ pushr $0x3f /* push all registers in case we can restart */
+ pushab 24(sp) /* address of stack slot which holds byte count */
+ calls $1, machine_check /* in reboot.c */
+ popr $0x3f /* spring them off */
+ addl2 (sp)+,sp /* get rid of the machine check frame */
+
+ rei /* dismiss */
.data
.globl sys_call_table
Index: interrupt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/interrupt.c,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -r1.11 -r1.12
--- interrupt.c 2001/05/27 13:25:29 1.11
+++ interrupt.c 2001/06/26 18:59:00 1.12
@@ -94,17 +94,27 @@
SCB_VECTOR(i) = &stray_handlers[i].inst_jsb;
}
+
flush_icache();
}
void trap_init(void)
{
- /* Initialize the SCB with the stray interrupt/exception
+ extern void machine_check_handler(void);
+ void *mhandler;
+
+ /* Initialize the SCB with the stray interrupt/exception
handlers. Some of these will be overridden later
as device drivers hook up to their interrupts. */
setup_scb();
+ /* Install the specific machine check handler in entry.S
+ * bits 0-1 must contain 1. machine check handler is longword aligned
+ */
+ mhandler = (void *)(machine_check_handler) + 1;
+ scb.scb.mcheck = mhandler;
+
/* And tell the hardware to use this SCB */
__mtpr(__pa(&scb), PR_SCBB);
@@ -137,10 +147,11 @@
}
/* Perhaps this should be done in CPU-specific code? */
- if (register_excep_handler(0x15, corrected_read_handler, 0, 0)) {
+ if (register_excep_handler(SCB_MEMCORR, corrected_read_handler, 0, 0)) {
printk("Panic: unable to register corrected read handler\n");
machine_halt();
}
+
}
void init_IRQ(void)
Index: interrupt.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/interrupt.h,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- interrupt.h 2001/01/29 00:56:19 1.4
+++ interrupt.h 2001/06/26 18:59:00 1.5
@@ -65,4 +65,3 @@
extern void reserved_instr_handler(struct pt_regs *regs, void *excep_info);
extern void corrected_read_handler(struct pt_regs *regs, void *excep_info);
extern void syscall_handler(struct pt_regs *regs, void *excep_info);
-
Index: reboot.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/reboot.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- reboot.c 2001/01/17 16:13:57 1.1
+++ reboot.c 2001/06/26 18:59:00 1.2
@@ -3,7 +3,9 @@
This file contains the standard functions that the arch-independent
kernel expects for halting, rebooting and powering off the machine.
-
+
+ It also contains the machine check dispatcher
+
The real work will be done by cpu-specific code via the machine
vector. Eventually...
@@ -49,6 +51,20 @@
mv->halt();
while (1);
+}
+
+/* This is called directly, from entry.S
+ * It checks for a cpu specific machine check handler and hands over to it.
+ * Otherwise it will just halt, as there is no way to recover without a
+ * sensible cpu specific routine
+ */
+void machine_check(void *stkframe)
+{
+ if (mv->mcheck == NULL) {
+ printk("machine check - CPU specific handler not implemented - halting\n");
+ machine_halt();
+ }
+ mv->mcheck(stkframe);
}
Index: setup.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/setup.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- setup.c 2001/06/08 20:11:10 1.8
+++ setup.c 2001/06/26 18:59:00 1.9
@@ -65,7 +65,7 @@
* atp -- have a temporary one.
* Shouldn't we use strcpy here?
*/
- memcpy(command_line, "root=/dev/nfs nfsroot=/tftpboot/vaxroot\0",54);
+ memcpy(command_line, "root=/dev/nfs nfsroot=/tftpboot/vaxroot rw debug\0",63);
*cmdline_p = command_line;
/* Save unparsed command line copy for /proc/cmdline */
memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
|
|
From: Andy P. <at...@us...> - 2001-06-24 19:35:05
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax In directory usw-pr-cvs1:/tmp/cvs-serv2641 Modified Files: Makefile Log Message: Fix to machine vector overwrite bug. Index: Makefile =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/Makefile,v retrieving revision 1.7 retrieving revision 1.8 diff -u -r1.7 -r1.8 --- Makefile 2001/02/21 00:16:56 1.7 +++ Makefile 2001/06/24 19:34:56 1.8 @@ -63,6 +63,9 @@ dd if=/dev/zero of=header.bin bs=512 count=1 mv -f header.bin vmlinux.SYS cat vmlinux.bin >> vmlinux.SYS +# this is helpful for low level debuggery +# rm -f vmlinux.lst +# $(OBJDUMP) -D vmlinux > vmlinux.lst # This target will only re-compile stuff that's changed in arch/vax mopbootx: linkonly @@ -118,6 +121,7 @@ fastdep: archclean: + rm -f vmlinux.* vmlinux cd $(TOPDIR)/arch/vax ; rm -f *.out TEST.BIN TEST.SYS header.bin archmrproper: |
|
From: Andy P. <at...@us...> - 2001-06-24 19:35:04
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/boot
In directory usw-pr-cvs1:/tmp/cvs-serv2641/boot
Modified Files:
head.S
Log Message:
Fix to machine vector overwrite bug.
Index: head.S
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/boot/head.S,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- head.S 2001/06/10 10:35:27 1.9
+++ head.S 2001/06/24 19:34:56 1.10
@@ -27,7 +27,6 @@
movl r11, boot_r11
mfpr $PR_SCBB, boot_scb
-
jsb identify_cpu
# now fix up the machine vector entries. (They currently contain
@@ -35,10 +34,9 @@
# the pointers to the functions we use before VM init to point
# into the newly-loaded kernel image.)
movl mv, r2
-
moval start, r3
- subl2 $PAGE_OFFSET+KERNEL_START_PHYS, r3
+ subl2 $PAGE_OFFSET+KERNEL_START_PHYS, r3
addl2 r3, MV_PRE_VM_PUTCHAR(r2)
addl2 r3, MV_PRE_VM_GETCHAR(r2)
addl2 r3, MV_CPU_TYPE_STR(r2)
@@ -47,9 +45,9 @@
subl2 $PAGE_OFFSET, MV_PRE_VM_INIT(r2)
# print the cpu type
- jsb crlf
+ jsb crlf
movab msg_cpu_type, r10
- jsb printstr
+ jsb printstr
movl mv, r10
calls $0, *MV_CPU_TYPE_STR(r10)
@@ -80,6 +78,15 @@
jsb printint
jsb crlf
+# Save off the current machine vector address in boot_mv, because it
+# lies in the .bss section and it will get clobbered real soon...
+# - atp. in fact it gets clobbered real quick, if your kernel is
+# larger than about 950k, as the relocation code clobbers it, along
+# with every thing else poking its head above $KERNEL_START_PHYS,
+# like the entire .bss section.
+
+ movl mv, boot_mv
+
# copy the loaded image higher up in physical RAM
#chunks of 65535B data are moved top-to-bottom while
#inside one chunk the data is moved bottom-to-top.
@@ -108,10 +115,6 @@
movc3 r6,(r1),(r3)
copy_done:
-# Save off the current machine vector address in R11, because it
-# lies in the .bss section and it will get clobbered real soon...
-
- movl mv, r11
#
# Next we have to fill the .bss section will zeros. We do it now
# instead of when we are preparing the loadable image because it
@@ -154,9 +157,9 @@
# made it, note that sp is still down there
# halt
-# fix up the machine vector pointer (by restoring it from R11 and
+# fix up the machine vector pointer (by restoring it from boot_mv and
# adding in the distance that the kernel was re-located)
- addl3 r2, r11, mv
+ addl3 r2, boot_mv, mv
movl mv, r3
#and the pre-vm i/o functions
@@ -359,6 +362,11 @@
boot_r11: .int 0x00000000
.globl boot_scb
boot_scb: .int 0x00000000
+#
+# This is here because we need a safe place to store it as we
+# relocate around in memory. It may be handy later.
+.globl boot_mv
+boot_mv: .int 0x00000000
cpu_type: .int 0x00000000
prom_sidex: .int 0x00000000
|
|
From: Dave A. <ai...@us...> - 2001-06-17 12:39:22
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv21038
Modified Files:
pgalloc.h
Log Message:
DA: free the vmalloc'ed area for the pgd, fix free_pte_slow to just clear
the pte..
Index: pgalloc.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pgalloc.h,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- pgalloc.h 2001/06/16 16:58:45 1.6
+++ pgalloc.h 2001/06/17 12:39:19 1.7
@@ -126,6 +126,7 @@
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
+ vfree((void *)pgd[0].br);
free_page((unsigned long)pgd);
}
@@ -153,9 +154,11 @@
pgtable_cache_size++;
}
+/* we don't allocate any space for pte, just clear the one passed to us */
extern __inline__ void free_pte_slow(pte_t *pte)
{
- free_page((unsigned long)pte);
+ pte_clear(pte);
+/* free_page((unsigned long)pte);*/
}
#define pte_free_kernel(pte) free_pte_slow(pte)
|
|
From: Dave A. <ai...@us...> - 2001-06-17 12:34:09
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm
In directory usw-pr-cvs1:/tmp/cvs-serv20152/include/asm-vax/mm
Modified Files:
pagelet_pmd.h
Log Message:
DA: remove clear from pmd_clear .. make it a nop...
reasons given in comment
Index: pagelet_pmd.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/mm/pagelet_pmd.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- pagelet_pmd.h 2001/02/15 01:17:23 1.1
+++ pagelet_pmd.h 2001/06/17 12:34:05 1.2
@@ -40,9 +40,14 @@
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
extern inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) != 0); }
/* This is just zeroing out the base and length registers */
+/* FIXME: or validate code - I removed the zero'ing of the pmd,
+ pmd are parts of pgds, and if we clear the br/lr of the P0 pmd,
+ the zeroth member of pgd, we lose the vmalloc address so can't
+ do vfree. - D.A. June 2001
+*/
extern inline void pmd_clear(pmd_t * pmdp) {
- pmd_val(pmdp[0]) = 0;
- pmd_val(pmdp[1]) = 0;
+ /* pmd_val(pmdp[0]) = 0;
+ pmd_val(pmdp[1]) = 0;*/
}
|