From: Kenn H. <ke...@us...> - 2003-02-09 01:49:27
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1:/tmp/cvs-serv31289/include/asm-vax Modified Files: hardirq.h softirq.h system.h Log Message: 2.5.28 removed the big IRQ lock. We also now require some of the preemption definitions (even though we don't support preemption yet). Index: hardirq.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/hardirq.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -r1.3 -r1.4 --- hardirq.h 20 May 2002 00:33:39 -0000 1.3 +++ hardirq.h 9 Feb 2003 01:49:23 -0000 1.4 @@ -20,21 +20,63 @@ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ /* - * Are we in an interrupt context? Either doing bottom half - * or hardware interrupt processing? + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * - bits 16-30 are the hardirq count (max # of hardirqs: 256) + * - bit 31 is the PREEMPT_ACTIVE flag */ -#define in_interrupt() ({ const int __cpu = smp_processor_id(); \ - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }) -#define in_irq() (local_irq_count(smp_processor_id()) != 0) +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 15 -#ifndef CONFIG_SMP +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) + +#define __MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) + +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK|SOFTIRQ_MASK)) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + +/* + * The hardirq mask has to be large enough to have space + * for potentially all IRQ sources in the system nesting + * on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +#endif + +/* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? + */ +#define in_irq() hardirq_count() +#define in_softirq() softirq_count() +#define in_interrupt() irq_count() -#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) -#define hardirq_endlock(cpu) do { } while (0) +#define hardirq_trylock() (!in_interrupt()) +#define hardirq_endlock() do { } while (0) -#define irq_enter(cpu) (local_irq_count(cpu)++) -#define irq_exit(cpu) (local_irq_count(cpu)--) +#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) + + +#ifndef CONFIG_SMP +/* FIXME: implement irq_exit() properly when we do CONFIG_PREEMPT */ +#define irq_exit() (preempt_count() -= HARDIRQ_OFFSET) #define synchronize_irq() barrier(); Index: softirq.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/softirq.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -r1.3 -r1.4 --- softirq.h 20 May 2002 00:33:39 -0000 1.3 +++ softirq.h 9 Feb 2003 01:49:23 -0000 1.4 @@ -1,20 +1,24 @@ #ifndef _VAX_SOFTIRQ_H #define _VAX_SOFTIRQ_H -#include <asm/atomic.h> -#include <asm/hardirq.h> +#include <linux/preempt.h> -#define __cpu_bh_enable(cpu) \ - do { barrier(); local_bh_count(cpu)--; } while (0) -#define cpu_bh_disable(cpu) \ - do { local_bh_count(cpu)++; barrier(); } while (0) - -#define local_bh_disable() cpu_bh_disable(smp_processor_id()) -#define local_bh_enable() __cpu_bh_enable(smp_processor_id()) -#define __local_bh_enable() __cpu_bh_enable(smp_processor_id()) - -#define in_softirq() (local_bh_count(smp_processor_id()) != 0) +#define local_bh_disable() \ + do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0) +#define __local_bh_enable() \ + do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0) +#define local_bh_enable() __local_bh_enable() +/* FIXME: implement this like i386 or arm */ +#if 0 +#define local_bh_enable() \ + do { \ + __local_bh_enable(); \ + if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \ + __asm__("bl%? __do_softirq": : : "lr");/* out of line */\ + preempt_check_resched(); \ + } while (0) +#endif #endif /* _VAX_SOFTIRQ_H */ Index: system.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/system.h,v retrieving revision 1.6 retrieving revision 1.7 diff -u -r1.6 -r1.7 --- system.h 26 Jan 2003 17:06:29 -0000 1.6 +++ system.h 9 Feb 2003 01:49:23 -0000 1.7 @@ -10,9 +10,6 @@ #define prepare_arch_schedule(prev) do { } while(0) #define finish_arch_schedule(prev) do { } while(0) -#define prepare_arch_switch(rq) do { } while(0) -#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) - #include <asm/psl.h> @@ -36,7 +33,10 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ -extern void switch_to(struct task_struct*, struct task_struct*, struct task_struct *); +/* This macro wrapper around switch_to() means that the compiler + doesn't need to push the unused 3rd argument on the stack */ +extern void __switch_to(struct task_struct*, struct task_struct*); +#define switch_to(prev, next, last) __switch_to((prev), (next)) /* mb is the alpha instruction to cause serialisation of memory operations. According to a software note in section 5.5.4 of @@ -74,8 +74,11 @@ __r0; \ }) -#define __cli() setipl(31) - +#define local_irq_save(flags) ((flags) = getipl()) +#define local_irq_save_off(flags) ((flags) = swpipl(31)) +#define local_irq_restore(flags) setipl(flags) +#define local_irq_disable() setipl(31) + /* If we're handling an interrupt (i.e. the IS bit is set in the PSL and we're on the interrupt stack), then we must not enable interrupts by dropping IPL all the way to 0. If we do, and @@ -85,7 +88,7 @@ So, instead, we drop IPL to 1 if we're running on the interrupt stack, thus making sure that REI will be kept happy. */ -/* since 2.4.6, there is now a local_irq_enable -> __sti() in +/* since 2.4.6, there is now a local_irq_enable in * the softirq/tasklet code - which is fine, except when we are * interrupted servicing a device interrupt with IPL=0x14 (say). * in this case, if we set IPL to 1, we return to the higher IPL @@ -94,7 +97,7 @@ */ extern __inline__ void -__sti(void) +local_irq_enable(void) { if (__psl.is) { if (__psl.prevmode != PSL_MODE_KERNEL) { @@ -106,39 +109,8 @@ } } -#define __save_flags(flags) ((flags) = getipl()) -#define __save_and_cli(flags) ((flags) = swpipl(31)) -#define __restore_flags(flags) setipl(flags) - -#define local_irq_save(flags) __save_and_cli(flags) -#define local_irq_restore(flags) __restore_flags(flags) -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() - #ifdef __SMP__ - -extern int global_irq_holder; - -#define save_and_cli(flags) (save_flags(flags), cli()) - -extern void __global_cli(void); -extern void __global_sti(void); -extern unsigned long __global_save_flags(void); -extern void __global_restore_flags(unsigned long flags); - -#define cli() __global_cli() -#define sti() __global_sti() -#define save_flags(flags) ((flags) = __global_save_flags()) -#define restore_flags(flags) __global_restore_flags(flags) - -#else /* __SMP__ */ - -#define cli() __cli() -#define sti() __sti() -#define save_flags(flags) __save_flags(flags) -#define save_and_cli(flags) __save_and_cli(flags) -#define restore_flags(flags) __restore_flags(flags) - +#error SMP not supported #endif /* __SMP__ */ #define nop() __asm__ __volatile__ ("nop") |