From: Kenn H. <ke...@us...> - 2003-02-16 00:13:21
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1:/tmp/cvs-serv11691/include/asm-vax Modified Files: bitops.h current.h delay.h hardirq.h tlbflush.h Log Message: Remove SMP-related definitions. Index: bitops.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/bitops.h,v retrieving revision 1.7 retrieving revision 1.8 diff -u -r1.7 -r1.8 --- bitops.h 11 Jan 2003 15:37:58 -0000 1.7 +++ bitops.h 16 Feb 2003 00:13:16 -0000 1.8 @@ -19,11 +19,7 @@ * Updates for 2.4.3+ atp Mar 2002 */ -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else #define LOCK_PREFIX "" -#endif #define ADDR (*(volatile long *) addr) Index: current.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/current.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -r1.4 -r1.5 --- current.h 29 Oct 2002 00:52:51 -0000 1.4 +++ current.h 16 Feb 2003 00:13:16 -0000 1.5 @@ -5,9 +5,7 @@ struct task_struct; -#ifndef __SMP__ - -/* Later, we will probably arrange for current to live in R11 +/* Later, we might arrange for current to live in R11 permanently. For now, I'm going to compute it from the current kernel stack pointer. Note that we may be called while on the interrupt stack, so we need to use MFPR to @@ -22,10 +20,5 @@ } #define current get_current() - - -#else -#error SMP not supported -#endif #endif /* _VAX_CURRENT_H */ Index: delay.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/delay.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -r1.3 -r1.4 --- delay.h 20 May 2002 00:33:39 -0000 1.3 +++ delay.h 16 Feb 2003 00:13:16 -0000 1.4 @@ -46,12 +46,7 @@ __delay(prod.hi); } -#ifdef __SMP__ -#define udelay(usecs) \ - __udelay(usecs, cpu_data[smp_processor_id()].loops_per_jiffy) -#else #define udelay(usecs) \ __udelay(usecs, loops_per_jiffy) -#endif #endif /* _VAX_DELAY_H */ Index: hardirq.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/hardirq.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -r1.4 -r1.5 --- hardirq.h 9 Feb 2003 01:49:23 -0000 1.4 +++ hardirq.h 16 Feb 2003 00:13:17 -0000 1.5 @@ -74,14 +74,9 @@ #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) -#ifndef CONFIG_SMP /* FIXME: implement irq_exit() properly when we do CONFIG_PREEMPT */ #define irq_exit() (preempt_count() -= HARDIRQ_OFFSET) #define synchronize_irq() barrier(); - -#else -#error Nil on SMP -#endif /* CONFIG_SMP */ #endif /* _ASM_VAX_HARDIRQ_H */ Index: tlbflush.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/tlbflush.h,v retrieving revision 1.2 retrieving revision 1.3 diff -u -r1.2 -r1.3 --- tlbflush.h 23 Jan 2003 23:18:53 -0000 1.2 +++ tlbflush.h 16 Feb 2003 00:13:17 -0000 1.3 @@ -3,9 +3,6 @@ #include <linux/mm.h> -/* FIXME: update to 2.4 - clever_smp_invalidate has gone. - */ - /* * TLB flushing: * @@ -26,8 +23,6 @@ #define __flush_tlb_one(addr) \ __asm__ __volatile__("mtpr %0,%1" : :"m" (addr), "i" (PR_TBIS) ) -#ifndef __SMP__ - #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb() #define local_flush_tlb() __flush_tlb() @@ -52,93 +47,6 @@ if (vma->vm_mm == current->mm) __flush_tlb(); } - -#else - -/* FIXME: SMP - another day perhaps */ -/* - * We aren't very clever about this yet - SMP could certainly - * avoid some global flushes.. - */ - - -#define local_flush_tlb() \ - __flush_tlb() - - -#define CLEVER_SMP_INVALIDATE -#ifdef CLEVER_SMP_INVALIDATE - -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - */ - -static inline void flush_tlb_current_task(void) -{ - /* just one copy of this mm? */ - if (atomic_read(¤t->mm->count) == 1) - local_flush_tlb(); /* and that's us, so.. */ - else - smp_flush_tlb(); -} - -#define flush_tlb() flush_tlb_current_task() - -#define flush_tlb_all() smp_flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct * mm) -{ - if (mm == current->mm && atomic_read(&mm->count) == 1) - local_flush_tlb(); - else - smp_flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct * vma, - unsigned long va) -{ - if (vma->vm_mm == current->mm && atomic_read(¤t->mm->count) == 1) - __flush_tlb_one(va); - else - smp_flush_tlb(); -} - -static inline void flush_tlb_range(struct mm_struct * mm, - unsigned long start, unsigned long end) -{ - flush_tlb_mm(mm); -} - - -#else - -#define flush_tlb() \ - smp_flush_tlb() - -#define flush_tlb_all() flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - flush_tlb(); -} - -static inline void flush_tlb_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - flush_tlb(); -} -#endif /* clever_smp_invalidate */ -#endif /* smp */ extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { |