Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv9090/vax/kernel
Modified Files:
init_task.c interrupt.c interrupt.h process.c ptrace.c
regdump.c semaphore.c setup.c syscall.c
Log Message:
synch 2.4.15 commit 14
Index: init_task.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/init_task.c,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -r1.2 -r1.3
--- init_task.c 20 Jan 2001 22:00:20 -0000 1.2
+++ init_task.c 9 Apr 2002 13:50:55 -0000 1.3
@@ -1,5 +1,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
Index: interrupt.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/interrupt.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -u -r1.23 -r1.24
--- interrupt.c 15 Dec 2001 12:23:58 -0000 1.23
+++ interrupt.c 9 Apr 2002 13:50:55 -0000 1.24
@@ -6,6 +6,10 @@
It also contains the interrupt stack. Eventually, there will
need to be a separate interrupt stack per-cpu, within the
per-cpu data structures.
+ *
+ *
+ * FIXME: We should use the new interrupt architecture. It looks like
+ * a closer match to the VAX SCB.
*/
#include <linux/types.h>
@@ -462,8 +466,9 @@
restore_flags(flags);
// if (status) {
- if (softirq_active(cpu)&softirq_mask(cpu)) {
- do_softirq();
+// if (softirq_active(cpu)&softirq_mask(cpu)) {
+ if (softirq_pending(cpu)) {
+ do_softirq();
// }
}
@@ -508,7 +513,8 @@
ret_from_sys_call:
cpu=smp_processor_id();
- if (softirq_active(cpu)&softirq_mask(cpu)) {
+ if (softirq_pending(cpu)) {
+// if (softirq_active(cpu)&softirq_mask(cpu)) {
do_softirq();
goto ret_from_intr;
}
@@ -516,7 +522,8 @@
ret_from_exception:
cpu=smp_processor_id();
- if (softirq_active(cpu)&softirq_mask(cpu)) {
+ if (softirq_pending(cpu)) {
+// if (softirq_active(cpu)&softirq_mask(cpu)) {
do_softirq();
}
Index: interrupt.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/interrupt.h,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- interrupt.h 9 Nov 2001 23:48:40 -0000 1.8
+++ interrupt.h 9 Apr 2002 13:50:55 -0000 1.9
@@ -7,7 +7,7 @@
have to move them to include/asm-vax instead.
*/
-#include <asm/irq.h>
+#include <linux/interrupt.h>
/* This is the max number of exception and interrupt handlers we can
handle. You can increase this as far as NR_IRQS if you need to. */
Index: process.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/process.c,v
retrieving revision 1.15
retrieving revision 1.16
diff -u -r1.15 -r1.16
--- process.c 28 Oct 2001 23:54:18 -0000 1.15
+++ process.c 9 Apr 2002 13:50:55 -0000 1.16
@@ -11,7 +11,7 @@
#include <linux/smp_lock.h>
#include <linux/fs.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/current.h>
#include <asm/processor.h>
@@ -24,6 +24,8 @@
#include <asm/elf.h>
+#undef VAX_PROCESS_DEBUG
+
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
@@ -47,7 +49,7 @@
unsigned long pcbb; /* physical address of new pcb */
struct task_struct *retval;
-#if 0
+#ifdef VAX_PROCESS_DEBUG
printk("vax_switch_to: switching %08lx -> %08lx\n",
(unsigned long)prev, (unsigned long)next);
#endif
@@ -119,9 +121,11 @@
stack_top -= 4;
child_stack = (struct new_thread_stack *)(stack_top) - 1;
-
-// printk("copy_thread: pid %d, task 0x%08lx, kstack_top 0x%8lx, usp 0x%08lx, ksp 0x%08lx\n",
-// p->pid, (unsigned long)p, stack_top, usp, child_stack);
+
+#ifdef VAX_PROCESS_DEBUG
+ printk("copy_thread: pid %d, task 0x%08lx, kstack_top 0x%8lx, usp 0x%08lx, ksp 0x%08lx\n",
+ p->pid, (unsigned long)p, stack_top, usp, child_stack);
+#endif
child_regs = &child_stack->regs;
*child_regs = *regs;
@@ -185,8 +189,10 @@
/* In child. At this point SP points to the very top of
our kernel stack, so we cannot pop anything off. That
means that we can never return from here. */
-/* printk("kernel_thread: calling thread function at %08lx\n",
- (unsigned long)fn);*/
+#ifdef VAX_PROCESS_DEBUG
+ printk("kernel_thread: calling thread function at %08lx\n",
+ (unsigned long)fn);
+#endif
kernel_thread_exit(fn(arg));
}
@@ -197,14 +203,15 @@
if (!newsp) {
newsp = regs->sp;
}
-
-/* printk("sys_clone: calling do_fork(0x%08lx, 0x%08lx, 0x%p)\n",
- clone_flags, newsp, regs); */
-
+#ifdef VAX_PROCESS_DEBUG
+ printk("sys_clone: calling do_fork(0x%08lx, 0x%08lx, 0x%p)\n",
+ clone_flags, newsp, regs);
+#endif
retval = do_fork(clone_flags, newsp, regs, 0);
-/* printk("sys_clone: do_fork() returned %d\n", retval);*/
-
+#ifdef VAX_PROCESS_DEBUG
+ printk("sys_clone: do_fork() returned %d\n", retval);
+#endif
return retval;
}
@@ -250,7 +257,9 @@
void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp)
{
-/* printk("starting thread %8lX %8lX %8lX\n", new_pc, new_sp, regs->sp);*/
+#ifdef VAX_PROCESS_DEBUG
+ printk("starting thread %8lX %8lX %8lX\n", new_pc, new_sp, regs->sp);
+#endif
set_fs(USER_DS);
regs->pc = new_pc;
regs->sp = new_sp;
Index: ptrace.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/ptrace.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- ptrace.c 14 Nov 2001 09:13:23 -0000 1.6
+++ ptrace.c 9 Apr 2002 13:50:55 -0000 1.7
@@ -120,6 +120,17 @@
return retval;
}
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* make sure the single step bit is not set. */
+ /* FIXME: */
+}
+
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
@@ -159,7 +170,7 @@
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
@@ -307,18 +318,7 @@
break;
}
case PTRACE_DETACH: /* detach a process that was attached. */
- res = -EIO;
- if ((unsigned long) data > _NSIG)
- break;
- child->ptrace &= ~(PT_PTRACED|PT_TRACESYS);
- child->exit_code = data;
- write_lock_irq(&tasklist_lock);
- REMOVE_LINKS(child);
- child->p_pptr = child->p_opptr;
- SET_LINKS(child);
- write_unlock_irq(&tasklist_lock);
- wake_up_process(child);
- res = 0;
+ res = ptrace_detach(child, data);
break;
default:
Index: regdump.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/regdump.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- regdump.c 11 Sep 2001 19:23:38 -0000 1.5
+++ regdump.c 9 Apr 2002 13:50:55 -0000 1.6
@@ -126,6 +126,17 @@
}
}
+/* FIXME: new as of 2.4.15. Not 100% sure what this is supposed to do,
+ * but I suspect its the equivalent of our dump_stack() routine, on
+ * an arbitrary task's kernel stack */
+void show_trace_task(struct task_struct * tsk)
+{
+ unsigned long int ksp;
+ ksp = tsk->thread.pcb.ksp;
+ hex_dump((void *)ksp, 256);
+ return;
+}
+
void dump_stack(unsigned int frames)
{
unsigned int reg_count;
Index: semaphore.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/semaphore.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- semaphore.c 29 Jan 2001 01:09:18 -0000 1.1
+++ semaphore.c 9 Apr 2002 13:50:55 -0000 1.2
@@ -1,6 +1,8 @@
/*
* $Id$
*
+ * Updated for new rwsem.h 2.4.4, Mar 2002 atp.
+ *
* VAX version (based on S390 version)
* Copyright (C) 2001, Kenn Humborg
*
@@ -160,146 +162,5 @@
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
-}
-
-void down_read_failed_biased(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
-
- for (;;) {
- if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->read_bias_granted)
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-void down_write_failed_biased(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
-
- for (;;) {
- if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->write_bias_granted)
- schedule();
- }
-
- remove_wait_queue(&sem->write_bias_wait, &wait);
- tsk->state = TASK_RUNNING;
-
- /* if the lock is currently unbiased, awaken the sleepers
- * FIXME: this wakes up the readers early in a bit of a
- * stampede -> bad!
- */
- if (atomic_read(&sem->count) >= 0)
- wake_up(&sem->wait);
-}
-
-/* Wait for the lock to become unbiased. Readers
- * are non-exclusive. =)
- */
-void down_read_failed(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- up_read(sem); /* this takes care of granting the lock */
-
- add_wait_queue(&sem->wait, &wait);
-
- while (atomic_read(&sem->count) < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&sem->count) >= 0)
- break;
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-/* Wait for the lock to become unbiased. Since we're
- * a writer, we'll make ourselves exclusive.
- */
-void down_write_failed(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- up_write(sem); /* this takes care of granting the lock */
-
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- while (atomic_read(&sem->count) < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&sem->count) >= 0)
- break; /* we must attempt to acquire or bias the lock */
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-/* Called when someone has done an up that transitioned from
- * negative to non-negative, meaning that the lock has been
- * granted to whomever owned the bias.
- */
-void rwsem_wake_readers(struct rw_semaphore *sem)
-{
- if (xchg(&sem->read_bias_granted, 1))
- BUG();
- wake_up(&sem->wait);
-}
-
-void rwsem_wake_writers(struct rw_semaphore *sem)
-{
- if (xchg(&sem->write_bias_granted, 1))
- BUG();
- wake_up(&sem->write_bias_wait);
-}
-
-void __down_read_failed(int count, struct rw_semaphore *sem)
-{
- do {
- if (count == -1) {
- down_read_failed_biased(sem);
- break;
- }
- down_read_failed(sem);
- count = atomic_dec_return(&sem->count);
- } while (count != 0);
-}
-
-void __down_write_failed(int count, struct rw_semaphore *sem)
-{
- do {
- if (count < 0 && count > -RW_LOCK_BIAS) {
- down_write_failed_biased(sem);
- break;
- }
- down_write_failed(sem);
- count = atomic_add_return(-RW_LOCK_BIAS, &sem->count);
- } while (count != 0);
-}
-
-void __rwsem_wake(int count, struct rw_semaphore *sem)
-{
- if (count == 0)
- rwsem_wake_readers(sem);
- else
- rwsem_wake_writers(sem);
}
Index: setup.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/setup.c,v
retrieving revision 1.17
retrieving revision 1.18
diff -u -r1.17 -r1.18
--- setup.c 16 Sep 2001 15:24:31 -0000 1.17
+++ setup.c 9 Apr 2002 13:50:55 -0000 1.18
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <linux/seq_file.h>
#include <asm/rpb.h>
#include <asm/page.h>
@@ -35,13 +36,13 @@
extern char _end;
/*
- * BUFFER is PAGE_SIZE bytes long.
+ * Get CPU information for use by the procfs.
*/
-int get_cpuinfo(char *buffer)
+static int show_cpuinfo(struct seq_file *m, void *v)
{
int len;
- len = sprintf(buffer, "cpu\t\t\t: VAX\n"
+ seq_printf(m, "cpu\t\t\t: VAX\n"
"cpu type\t\t: %s\n"
"cpu sidex\t\t: %d\n"
"page size\t\t: %ld\n"
@@ -52,9 +53,27 @@
PAGE_SIZE,
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100);
- return len;
+ return 0;
}
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return (void*)(*pos == 0);
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: show_cpuinfo,
+};
void __init setup_arch(char **cmdline_p)
{
@@ -114,8 +133,6 @@
printk("calling free_bootmem(start=%08lx, len=%08lx)\n",
region_start, region_len);
free_bootmem(region_start, region_len);
-
-
region_start = PAGEALIGNUP(__pa(SPT_BASE + SPT_SIZE));
region_len = PAGEALIGNDN((max_pfn << PAGE_SHIFT)) - region_start;
Index: syscall.c
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/syscall.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -r1.8 -r1.9
--- syscall.c 14 Sep 2001 11:41:41 -0000 1.8
+++ syscall.c 9 Apr 2002 13:50:55 -0000 1.9
@@ -78,8 +78,10 @@
nr_args = *user_ap;
}
-// printk("Dispatching syscall %d with %d args, regs=%8lx\n", chmk_arg, nr_args,regs);
-
+// if (chmk_arg==106) {
+// printk("Dispatching syscall %d with %d args, regs=%8lx ap=%8lx\n", chmk_arg, nr_args,regs,user_ap);
+// printk(" -- arg0 %d, arg1 %8lx, arg2 %8lx\n",*user_ap,*(user_ap+1),*(user_ap+2));
+// }
/* We pass all the user-supplied args plus the pointer to the
regs to the syscall function. If the syscall is implemented
in the core kernel, then it will ignore the additional
@@ -260,9 +262,9 @@
goto out;
}
- down(¤t->mm->mmap_sem);
+ down_write(¤t->mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up(¤t->mm->mmap_sem);
+ up_write(¤t->mm->mmap_sem);
if (file)
fput(file);
|