|
From: Jan-Benedict G. <jb...@he...> - 2006-04-07 20:12:55
|
arch/vax/boot/head.S | 141 ++++++++++++++++++++++++------------------
arch/vax/kernel/asm-offsets.c | 4 -
2 files changed, 86 insertions(+), 59 deletions(-)
New commits:
commit b36dd710bc4699877e8222f92d59394dc62fceb6
Author: Jan-Benedict Glaw <jb...@d2...>
Date: Fri Apr 7 22:12:17 2006 +0200
[VAX] Zap another magic number from head.S
Signed-off-by: Jan-Benedict Glaw <jb...@lu...>
commit 3e87c0b8b1492463b60d9646a57767fb3db6530c
Author: Jan-Benedict Glaw <jb...@d2...>
Date: Fri Apr 7 21:55:02 2006 +0200
[VAX] Updates for head.S
This patch adds some more comments into head.S and zaps one magic constant.
Signed-off-by: Jan-Benedict Glaw <jb...@lu...>
diff --git a/arch/vax/boot/head.S b/arch/vax/boot/head.S
index 3861e3a..bf59290 100644
--- a/arch/vax/boot/head.S
+++ b/arch/vax/boot/head.S
@@ -1,12 +1,12 @@
-# Copyright atp Nov 1998.
-# Changes for aligning IOMAP/VMALLOC - Copyright ai...@li... - June 2001
-# start of boot. entry point
-# this assumes vmb has does most of the hard work (ie uvax rom vmb)
-# save useful registers. jump to c in boot.c
-#
-# TBD: Some of this stuff could do with being rewritten in C
-# Some of this stuff could be in .init sections and thrown away.
-#
+/*
+ * Copyright atp Nov 1998.
+ * Changes for aligning IOMAP/VMALLOC - Copyright ai...@li... - June 2001
+ * start of boot. entry point
+ * this assumes vmb has does most of the hard work (ie uvax rom vmb)
+ * save useful registers. jump to c in boot.c
+ * TBD: Some of this stuff could do with being rewritten in C
+ * Some of this stuff could be in .init sections and thrown away.
+ */
#include <linux/init.h> /* For __INITDATA */
#include <asm/mtpr.h> /* Processor register definitions */
@@ -16,86 +16,108 @@
#include "boot_sections.h"
+/*
+ * __BOOT is the starting point of the kernel image, put at virtual address
+ * 0x80100000 (cf. arch/vax/kernel/vmlinux.lds.S) and physicalls the address
+ * the firmware jumps to.
+ */
__BOOT
.globl _stext
_stext:
.globl start
start:
- jmp codestart # Word displacement.
+ /*
+ * Entry point, jumped to by the firmware. We jump further down
+ * into the image and keep some space to put the command line
+ * below this.
+ */
+ jmp codestart
.globl bootparam
bootparam:
- .fill 256,1,0 # The boot parameter block. Presently just the
- # command line.
-
+ /*
+ * 256 bytes to keep the boot command line. This can be modified
+ * by `setcmdline'.
+ */
+ .fill 256, 1, 0
codestart:
- # Disable memory mapping
+ /* Disable memory mapping, though it should be disabled already... */
mtpr $0, $PR_MAPEN
mtpr $31, $PR_IPL
- # Save r11, AP, SCBB and location of command line
- movl %ap, boot_ap
- movl %r11, boot_r11
- mfpr $PR_SCBB, boot_scb
- movab bootparam, %r5
- addl2 $PAGE_OFFSET, %r5 # we will only access this when MAPEN=1
- movl %r5, kernel_cmd_line
-
- # Put the sp somewhere safe, over our bootblock in fact
- moval start, %r5
- subl2 $0x200, %r5
- movl %r5,%sp
+ /* Save some important values */
+ movl %ap, boot_ap /* Save AP */
+ movl %r11, boot_r11 /* Save R11 */
+ mfpr $PR_SCBB, boot_scb /* Save System Control Block
+ from its CPU register */
+ movab bootparam, %r5 /* R5 = physical address of command line */
+ addl2 $PAGE_OFFSET, %r5 /* R5 += PAGE_OFFSET (now it's a virtual address) */
+ movl %r5, kernel_cmd_line /* Now kernel_cmd_line points to the right thing. */
+
+ /* Put the sp somewhere safe, over our bootblock in fact */
+ moval start, %r5 /* R5 = &start */
+ subl2 $0x200, %r5 /* R5 -= 0x200 */
+ movl %r5, %sp /* SP = R5 */
+#if 0
# Debug code:
# movzbl $0x42, %r2
# jsb 0x20040058
+#endif
+ /* Identify the CPU we're running on. */
pushal start
- calls $1, idcpu # Identify this CPU and...
- movl %r0, mv # ...put the returned mv ptr into mv.
-
- # Now fix up the machine vector entries. (They currently contain
- # pointers to virtual addresses in S0 space. We need to change
- # the pointers to the functions we use before VM init to point
- # into the newly-loaded kernel image.)
- movl mv, %r10
- moval start, %r8
+ calls $1, idcpu
+ movl %r0, mv /* mv = idcpu(&start) */
+ /*
+ * Prepare to fix some of the Machine Vector pointers to
+ * point to physical memory (instead of virtual addresses) because
+ * we haven't yet enabled mapping, but want to use the function
+ * pointers (eg. to print something.)
+ */
+ movl mv, %r10 /* R10 = &mv */
+ moval start, %r8 /* R8 = &start */
+
+ /*
+ * Fix machine vector addresses to point to physical instead of
+ * virtual addresses because we haven't yet enabled VM.
+ */
subl2 $PAGE_OFFSET+KERNEL_START_PHYS, %r8
addl2 %r8, MV_PRE_VM_PUTCHAR(%r10)
addl2 %r8, MV_PRE_VM_GETCHAR(%r10)
addl2 %r8, MV_CPU_TYPE_STR(%r10)
- calls $0, boot_crlf
- calls $0, boot_crlf
+ pushab msg_welcome /* Welcome to Linux on VAX */
+ calls $1, boot_printstr
- # print the cpu type
+ /* Print the cpu type */
calls $0, boot_print_cpu_id
- # print first line of debug diagnostics
- pushab msg_loaded # ascii string
+ /* Print first line of debug diagnostics */
+ pushab msg_loaded /* "head.s loaded at..." */
calls $1, boot_printstr
- pushal start # where we were loaded
+ pushal start /* Where we were loaded */
calls $1, boot_printint
calls $0, boot_crlf
- pushab msg_registers # ascii string
+ pushab msg_registers /* Print "rpb/r5/ap/sp" */
calls $1, boot_printstr
calls $0, boot_printspace
- movl boot_r11, %r11
- pushl %r11 # r11 (holds the rpb base address, usually 0x0)
+ movl boot_r11, %r11 /* Prepare access to RPB */
+ pushl %r11 /* RPB base address */
calls $1, boot_printint
calls $0, boot_printspace
- # FIXME: magic offset -> asmoffsets.h
- pushl 48(%r11) # saved r5 in RPB (argument to BOOT command)
+
+ pushl RPB_BOOT_R5_OFFSET(%r11)) /* Saved R5 in RPB */
calls $1, boot_printint
calls $0, boot_printspace
- pushl %ap # argument pointer (struct arglist)
+ pushl %ap /* Argument pointer (struct arglist) */
calls $1, boot_printint
calls $0, boot_printspace
- pushl %sp # stack pointer
+ pushl %sp /* Stack pointer */
calls $1, boot_printint
calls $0, boot_crlf
@@ -107,8 +129,9 @@ codestart:
# like the entire .bss section.
movl mv, boot_mv
- # copy the loaded image higher up in physical RAM
-
+ /*
+ * Copy the loaded image higher up in physical RAM
+ */
movl $__bss_start, %r6
subl2 $start, %r6 # byte count to r6
pushl %r6
@@ -156,10 +179,9 @@ reloc:
addl2 %r2, MV_PRE_VM_GETCHAR(%r10)
addl2 %r2, MV_CPU_TYPE_STR(%r10)
- # Print 'relocated at phys addr xxxxx'
- pushab msg_relocated
+ pushab msg_relocated /* Print "relocated at phys addr "... */
calls $1, boot_printstr
- pushal reloc
+ pushal reloc /* ...and the address. */
calls $1, boot_printint
calls $0, boot_crlf
calls $0, boot_crlf
@@ -208,7 +230,7 @@ sysfill:
bisl3 $_PAGE_VALID + _PAGE_UW, %r6, (%r5)+
# set PFN, VALID bit and protection UW in PTE
incl %r6 # next PFN
- cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES
+ cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTEs
blssu sysfill
# We need to align the IOMAP/VMALLOC tables (well at least the VMALLOC
@@ -225,7 +247,7 @@ sysfill:
zerofl:
movl $0x00000000, (%r5)+
incl %r6 # next PFN
- cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES
+ cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTEs
blssu zerofl
nozerofl:
# Zero out the spare part of the SPT (the entries that will be used
@@ -235,7 +257,7 @@ nozerofl:
sparef1:
movl $0x00000000, (%r5)+
incl %r6 # next PFN
- cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES
+ cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTEs
blssu sparef1
movl %r5, vmallocmap_base
@@ -295,8 +317,8 @@ sparefill2:
# and faking a saved PC/PSL on the interrupt stack which we then
# 'return' to
moval init_thread_union, %r0
- addl2 $8192, %r0 # FIXME: taken from <linux/sched.h>
- mtpr %r0,$PR_KSP
+ addl2 $THREAD_SIZE, %r0
+ mtpr %r0, $PR_KSP
pushl $0x001f0000 # IS=0, accmode=prevmode=K, IPL=31
pushab now_on_kstack
@@ -307,6 +329,9 @@ now_on_kstack:
calls $0, vax_start_kernel # should never return
halt
+msg_welcome:
+ .ascii "\r\n\r\n* ** Welcome to Linux on VAX ** *\r\n\r\n\0"
+ .align 1
msg_loaded:
.ascii "Boot Head.S loaded at address \0"
.align 1
diff --git a/arch/vax/kernel/asm-offsets.c b/arch/vax/kernel/asm-offsets.c
index 1801560..0d673f6 100644
--- a/arch/vax/kernel/asm-offsets.c
+++ b/arch/vax/kernel/asm-offsets.c
@@ -45,9 +45,11 @@ void foo (void)
BLANK ();
/*
- * PAGE_OFFSET
+ * Define some other important values. Getting these the direct way
+ * would mean to hack the header files quite hard...
*/
DEFINE (PAGE_OFFSET, PAGE_OFFSET);
+ DEFINE (THREAD_SIZE, THREAD_SIZE);
BLANK ();
/*
|