From: <ai...@us...> - 2003-10-03 11:47:09
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel In directory sc8-pr-cvs1:/tmp/cvs-serv26634 Modified Files: process.c entry.S Log Message: backport Kenns ret_from_fork from 2.5 Index: process.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/process.c,v retrieving revision 1.23 retrieving revision 1.24 diff -u -d -r1.23 -r1.24 --- process.c 27 Sep 2003 09:24:03 -0000 1.23 +++ process.c 3 Oct 2003 11:47:06 -0000 1.24 @@ -58,7 +58,13 @@ /* Get phys address of next process pcb */ pcbb = virt_to_phys(&next->thread.pcb); - + + /* When 'next' starts running, R0 will hold the task pointer + for the process we just switched away from. This will end + up in R0 at ret_from_fork, for new processes and will be + the return value from this function for existing processes */ + next->thread.pcb.r0 = (unsigned long) prev; + /* svpctx should deal with writing the stuff into *prev */ asm(" movl %1, %%r11 \n" @@ -102,7 +108,7 @@ }; /* Defined in entry.S */ -extern void ret_from_syscall(void); +extern void ret_from_fork(void); int copy_thread(int unused1, unsigned long clone_flags, unsigned long usp, unsigned long unused2, @@ -111,7 +117,7 @@ struct new_thread_stack *child_stack; struct pt_regs *child_regs; void *stack_top; - + /* stack top is at the end of the task_union - take the pointer, add in the size of task_union, and then drop by a longword to keep it within the @@ -124,7 +130,7 @@ #ifdef VAX_PROCESS_DEBUG printk("copy_thread: pid %d, task 0x%08lx, kstack_top 0x%8lx, usp 0x%08lx, ksp 0x%08lx\n", - p->pid, (unsigned long)p, stack_top, usp, child_stack); + p->pid, (unsigned long)p, stack_top, usp, child_stack); #endif child_regs = &child_stack->regs; @@ -137,10 +143,17 @@ p->thread.pcb.ksp = (unsigned long)child_stack; p->thread.pcb.usp = usp; - p->thread.pcb.pc = (unsigned long)ret_from_syscall; + p->thread.pcb.pc = (unsigned long)ret_from_fork; p->thread.pcb.psl = __psl; - + /* New thread must start with IPL 31 to prevent any interrupts + from occuring between the time it is first scheduled (in __switch_to + above) and when ret_from_fork calls schedule_tail(). If an + interrupt comes in during this time, schedule() might get called + from do_irq_excep() before schedule_tail() has released the + runqueue lock (in finish_task_switch) */ + p->thread.pcb.psl.ipl = 31; + /* We could speed this up by loading the register values into the PCB and start the new thread just before the REI in entry.S, letting the regular context switching load the Index: entry.S =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/entry.S,v retrieving revision 1.9 retrieving revision 1.10 diff -u -d -r1.9 -r1.10 --- entry.S 12 Feb 2003 02:37:33 -0000 1.9 +++ entry.S 3 Oct 2003 11:47:06 -0000 1.10 @@ -191,7 +191,16 @@ movl (%sp)+, %r0 rei - + + .globl ret_from_fork +ret_from_fork: + /* A newly-created thread starts here when it is first + scheduled. R0 will contain the previous task (the one + that we just scheduled away from on this CPU). */ + pushl %r0 + calls $1, schedule_tail + brb ret_from_syscall + /* irqvec_stray is the generic handler for all exceptions and interrupts for which there is no registered handler. We just save all registers, and call unhandled_exception(), passing it the return address saved |