From: Kenn H. <ke...@us...> - 2001-01-29 00:44:48
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax In directory usw-pr-cvs1:/tmp/cvs-serv26062 Modified Files: semaphore.h Log Message: semaphore-helper.h Index: semaphore.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/semaphore.h,v retrieving revision 1.2 retrieving revision 1.3 diff -u -r1.2 -r1.3 --- semaphore.h 2001/01/18 15:52:28 1.2 +++ semaphore.h 2001/01/29 00:44:39 1.3 @@ -1,37 +1,29 @@ -#ifndef _VAX_SEMAPHORE_H -#define _VAX_SEMAPHORE_H - - /* - * SMP- and interrupt-safe semaphores.. + * $Id$ + * + * VAX version based on S390 version + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * - * (C) Copyright 1996 Linus Torvalds - * - * VAX port atp Nov 1998. - * - this makes my brain hurt. - * - updated for 2.4 Jan 2001. New wait_queue_head_t plus misc software rot. - * Lifted from s390 and parisc + * Derived from "include/asm-i386/semaphore.h" + * (C) Copyright 1996 Linus Torvalds */ + +#ifndef _VAX_SEMAPHORE_H +#define _VAX_SEMAPHORE_H + #include <asm/system.h> #include <asm/atomic.h> -#include <linux/wait.h> /* wait_queue_head_t */ -#include <linux/linkage.h> +#include <linux/wait.h> struct semaphore { atomic_t count; - int waking; + int sleepers; wait_queue_head_t wait; -#if WAITQUEUE_DEBUG - long __magic; -#endif }; -#if WAITQUEUE_DEBUG -# define __SEM_DEBUG_INIT(name) \ - , (long)&(name).__magic -#else -# define __SEM_DEBUG_INIT(name) -#endif +#define __SEM_DEBUG_INIT(name) #define __SEMAPHORE_INITIALIZER(name,count) \ { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ @@ -49,9 +41,6 @@ extern inline void sema_init (struct semaphore *sem, int val) { *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); -#if WAITQUEUE_DEBUG - sem->__magic = (long)&sem->__magic; -#endif } static inline void init_MUTEX (struct semaphore *sem) @@ -74,52 +63,32 @@ asmlinkage int __down_trylock(struct semaphore * sem); asmlinkage void __up(struct semaphore * sem); -extern spinlock_t semaphore_wake_lock; - - -extern __inline__ void down(struct semaphore * sem) +extern inline void down(struct semaphore * sem) { -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - if (atomic_dec_return(&sem->count) < 0) __down(sem); } -extern __inline__ int down_interruptible(struct semaphore * sem) +extern inline int down_interruptible(struct semaphore * sem) { int ret = 0; -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); return ret; } -extern __inline__ int down_trylock(struct semaphore * sem) +extern inline int down_trylock(struct semaphore * sem) { int ret = 0; -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif if (atomic_dec_return(&sem->count) < 0) ret = __down_trylock(sem); return ret; } -/* - * Note! This is subtle. We jump to wake people up only if - * the semaphore was negative (== somebody was waiting on it). - */ -extern __inline__ void up(struct semaphore * sem) +extern inline void up(struct semaphore * sem) { -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif if (atomic_inc_return(&sem->count) <= 0) __up(sem); } @@ -143,38 +112,27 @@ * flopping back and forth between readers and writers * under heavy use. * - * -ben + * -ben */ struct rw_semaphore { - atomic_t count; - volatile unsigned char write_bias_granted; - volatile unsigned char read_bias_granted; - volatile unsigned char pad1; - volatile unsigned char pad2; - wait_queue_head_t wait; - wait_queue_head_t write_bias_wait; -#if WAITQUEUE_DEBUG - long __magic; - atomic_t readers; - atomic_t writers; -#endif + atomic_t count; + volatile unsigned int write_bias_granted; + volatile unsigned int read_bias_granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; }; -#if WAITQUEUE_DEBUG -#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif +#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS 0x01000000 +#define __RWSEM_DEBUG_INIT /* */ #define __RWSEM_INITIALIZER(name,count) \ -{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ - __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } +{ ATOMIC_INIT(count), 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } #define __DECLARE_RWSEM_GENERIC(name,count) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) @@ -182,110 +140,55 @@ extern inline void init_rwsem(struct rw_semaphore *sem) { - atomic_set(&sem->count, RW_LOCK_BIAS); - sem->read_bias_granted = 0; - sem->write_bias_granted = 0; - init_waitqueue_head(&sem->wait); - init_waitqueue_head(&sem->write_bias_wait); -#if WAITQUEUE_DEBUG - sem->__magic = (long)&sem->__magic; - atomic_set(&sem->readers, 0); - atomic_set(&sem->writers, 0); -#endif -} - -#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME -extern struct rw_semaphore *__build_read_lock(struct rw_semaphore *sem, const char *what); -extern struct rw_semaphore *__build_write_lock(struct rw_semaphore *sem, const char *what); -#endif - -/* we use FASTCALL convention for the helpers */ -extern struct rw_semaphore *FASTCALL(__down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(__down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(__rwsem_wake(struct rw_semaphore *sem)); + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +} +extern void __down_read_failed(int, struct rw_semaphore *); +extern void __down_write_failed(int, struct rw_semaphore *); +extern void __rwsem_wake(int, struct rw_semaphore *); + extern inline void down_read(struct rw_semaphore *sem) { -#if WAITQUEUE_DEBUG - if (sem->__magic != (long)&sem->__magic) - BUG(); -#endif -#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME - __build_read_lock(sem, "__down_read_failed"); -#endif -#if WAITQUEUE_DEBUG - if (sem->write_bias_granted) - BUG(); - if (atomic_read(&sem->writers)) - BUG(); - atomic_inc(&sem->readers); -#endif + int count; + count = atomic_dec_return(&sem->count); + if (count < 0) + __down_read_failed(count, sem); } extern inline void down_write(struct rw_semaphore *sem) { -#if WAITQUEUE_DEBUG - if (sem->__magic != (long)&sem->__magic) - BUG(); -#endif -#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME - __build_write_lock(sem, "__down_write_failed"); -#endif -#if WAITQUEUE_DEBUG - if (atomic_read(&sem->writers)) - BUG(); - if (atomic_read(&sem->readers)) - BUG(); - if (sem->read_bias_granted) - BUG(); - if (sem->write_bias_granted) - BUG(); - atomic_inc(&sem->writers); -#endif + int count; + count = atomic_add_return (-RW_LOCK_BIAS, &sem->count); + if (count < 0) + __down_write_failed(count, sem); } /* When a reader does a release, the only significant * case is when there was a writer waiting, and we've * bumped the count to 0: we must wake the writer up. */ -extern inline void __up_read(struct rw_semaphore *sem) +extern inline void up_read(struct rw_semaphore *sem) { + int count; + count = atomic_inc_return(&sem->count); + if (count == 0) + __rwsem_wake(count, sem); } /* releasing the writer is easy -- just release it and * wake up any sleepers. */ -extern inline void __up_write(struct rw_semaphore *sem) -{ -} - -extern inline void up_read(struct rw_semaphore *sem) -{ -#if WAITQUEUE_DEBUG - if (sem->write_bias_granted) - BUG(); - if (atomic_read(&sem->writers)) - BUG(); - atomic_dec(&sem->readers); -#endif - __up_read(sem); -} - extern inline void up_write(struct rw_semaphore *sem) { -#if WAITQUEUE_DEBUG - if (sem->read_bias_granted) - BUG(); - if (sem->write_bias_granted) - BUG(); - if (atomic_read(&sem->readers)) - BUG(); - if (atomic_read(&sem->writers) != 1) - BUG(); - atomic_dec(&sem->writers); -#endif - __up_write(sem); + int count; + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count >= 0 && count < RW_LOCK_BIAS) + __rwsem_wake(count, sem); } +#endif /* _VAX_SEMAPHORE_H */ -#endif /* !(_VAX_SEMAPHORE_H) */ |