|
From: Kenn H. <ke...@us...> - 2001-01-29 01:09:27
|
Update of /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv29867/arch/vax/kernel
Modified Files:
Makefile
Added Files:
semaphore.c
Log Message:
Semaphores added. SYSV IPC and proc filesystem compile and sort-of-work.
Init sequence now gets as far as tty init. EXTRAVERSION updated to
-20010129
--- NEW FILE ---
/*
* $Id: semaphore.c,v 1.1 2001/01/29 01:09:18 kenn Exp $
*
* VAX version (based on S390 version)
* Copyright (C) 2001, Kenn Humborg
*
* S390 version
* Copyright (C) 1998 IBM Corporation
* Author(s): Martin Schwidefsky
*
* Derived from "linux/arch/i386/kernel/semaphore.c
* Copyright (C) 1999, Linus Torvalds
*
*/
#include <linux/sched.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is
* protected by the semaphore spinlock.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*/
int __down_trylock(struct semaphore * sem)
{
unsigned long flags;
int sleepers;
spin_lock_irqsave(&semaphore_lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
void down_read_failed_biased(struct rw_semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
for (;;) {
if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!sem->read_bias_granted)
schedule();
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
}
void down_write_failed_biased(struct rw_semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
for (;;) {
if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!sem->write_bias_granted)
schedule();
}
remove_wait_queue(&sem->write_bias_wait, &wait);
tsk->state = TASK_RUNNING;
/* if the lock is currently unbiased, awaken the sleepers
* FIXME: this wakes up the readers early in a bit of a
* stampede -> bad!
*/
if (atomic_read(&sem->count) >= 0)
wake_up(&sem->wait);
}
/* Wait for the lock to become unbiased. Readers
* are non-exclusive. =)
*/
void down_read_failed(struct rw_semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
up_read(sem); /* this takes care of granting the lock */
add_wait_queue(&sem->wait, &wait);
while (atomic_read(&sem->count) < 0) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (atomic_read(&sem->count) >= 0)
break;
schedule();
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
}
/* Wait for the lock to become unbiased. Since we're
* a writer, we'll make ourselves exclusive.
*/
void down_write_failed(struct rw_semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
up_write(sem); /* this takes care of granting the lock */
add_wait_queue_exclusive(&sem->wait, &wait);
while (atomic_read(&sem->count) < 0) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (atomic_read(&sem->count) >= 0)
break; /* we must attempt to acquire or bias the lock */
schedule();
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
}
/* Called when someone has done an up that transitioned from
* negative to non-negative, meaning that the lock has been
* granted to whomever owned the bias.
*/
void rwsem_wake_readers(struct rw_semaphore *sem)
{
if (xchg(&sem->read_bias_granted, 1))
BUG();
wake_up(&sem->wait);
}
void rwsem_wake_writers(struct rw_semaphore *sem)
{
if (xchg(&sem->write_bias_granted, 1))
BUG();
wake_up(&sem->write_bias_wait);
}
void __down_read_failed(int count, struct rw_semaphore *sem)
{
do {
if (count == -1) {
down_read_failed_biased(sem);
break;
}
down_read_failed(sem);
count = atomic_dec_return(&sem->count);
} while (count != 0);
}
void __down_write_failed(int count, struct rw_semaphore *sem)
{
do {
if (count < 0 && count > -RW_LOCK_BIAS) {
down_write_failed_biased(sem);
break;
}
down_write_failed(sem);
count = atomic_add_return(-RW_LOCK_BIAS, &sem->count);
} while (count != 0);
}
void __rwsem_wake(int count, struct rw_semaphore *sem)
{
if (count == 0)
rwsem_wake_readers(sem);
else
rwsem_wake_writers(sem);
}
Index: Makefile
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/arch/vax/kernel/Makefile,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- Makefile 2001/01/26 00:27:00 1.3
+++ Makefile 2001/01/29 01:09:18 1.4
@@ -12,7 +12,7 @@
O_TARGET := kernel.o
obj-y := process.o setup.o regdump.o interrupt.o entry.o time.o \
- syscall.o signal.o \
+ syscall.o signal.o semaphore.o \
init_task.o reboot.o cpu_generic.o \
cpu_ka630.o cpu_ka640.o cpu_ka650.o cpu_ka660.o \
cpu_ka410.o cpu_ka42.o cpu_ka43.o cpu_ka46.o
|