⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 semaphore.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
字号:
/* *  linux/arch/S390/kernel/semaphore.c * *  S390 version *    Copyright (C) 1998 IBM Corporation *    Author(s): Martin Schwidefsky * *  Derived from "linux/arch/i386/kernel/semaphore.c *    Copyright (C) 1999, Linus Torvalds * */#include <linux/sched.h>#include <asm/semaphore.h>/* * Semaphores are implemented using a two-way counter: * The "count" variable is decremented for each process * that tries to acquire the semaphore, while the "sleeping" * variable is a count of such acquires. * * Notably, the inline "up()" and "down()" functions can * efficiently test if they need to do any extra work (up * needs to do something only if count was negative before * the increment operation. * * "sleeping" and the contention routine ordering is * protected by the semaphore spinlock. * * Note that these functions are only called when there is * contention on the lock, and as such all this is the * "non-critical" part of the whole semaphore business. The * critical part is the inline stuff in <asm/semaphore.h> * where we want to avoid any extra jumps and calls. *//* * Logic: *  - only on a boundary condition do we need to care. When we go *    from a negative count to a non-negative, we wake people up. *  - when we go from a non-negative count to a negative do we *    (a) synchronize with the "sleeper" count and (b) make sure *    that we're on the wakeup list before we synchronize so that *    we cannot lose wakeup events. */void __up(struct semaphore *sem){	wake_up(&sem->wait);}static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;void __down(struct semaphore * sem){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	tsk->state = TASK_UNINTERRUPTIBLE;	add_wait_queue_exclusive(&sem->wait, &wait);	spin_lock_irq(&semaphore_lock);	sem->sleepers++;	for (;;) {		int sleepers = sem->sleepers;		/*		 * Add "everybody else" into it. They aren't		 * playing, because we own the spinlock.		 */		if (!atomic_add_negative(sleepers - 1, &sem->count)) {			sem->sleepers = 0;			break;		}		sem->sleepers = 1;	/* us - see -1 above */		spin_unlock_irq(&semaphore_lock);		schedule();		tsk->state = TASK_UNINTERRUPTIBLE;		spin_lock_irq(&semaphore_lock);	}	spin_unlock_irq(&semaphore_lock);	remove_wait_queue(&sem->wait, &wait);	tsk->state = TASK_RUNNING;	wake_up(&sem->wait);}int __down_interruptible(struct semaphore * sem){	int retval = 0;	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	tsk->state = TASK_INTERRUPTIBLE;	add_wait_queue_exclusive(&sem->wait, &wait);	spin_lock_irq(&semaphore_lock);	sem->sleepers ++;	for (;;) {		int sleepers = sem->sleepers;		/*		 * With signals pending, this turns into		 * the trylock failure case - we won't be		 * sleeping, and we* can't get the lock as		 * it has contention. Just correct the count		 * and exit.		 */		if (signal_pending(current)) {			retval = -EINTR;			sem->sleepers = 0;			atomic_add(sleepers, &sem->count);			break;		}		/*		 * Add "everybody else" into it. They aren't		 * playing, because we own the spinlock. The		 * "-1" is because we're still hoping to get		 * the lock.		 */		if (!atomic_add_negative(sleepers - 1, &sem->count)) {			sem->sleepers = 0;			break;		}		sem->sleepers = 1;	/* us - see -1 above */		spin_unlock_irq(&semaphore_lock);		schedule();		tsk->state = TASK_INTERRUPTIBLE;		spin_lock_irq(&semaphore_lock);	}	spin_unlock_irq(&semaphore_lock);	tsk->state = TASK_RUNNING;	remove_wait_queue(&sem->wait, &wait);	wake_up(&sem->wait);	return retval;}/* * Trylock failed - make sure we correct for * having decremented the count. */int __down_trylock(struct semaphore * sem){        unsigned long flags;	int sleepers;	spin_lock_irqsave(&semaphore_lock, flags);	sleepers = sem->sleepers + 1;	sem->sleepers = 0;	/*	 * Add "everybody else" and us into it. They aren't	 * playing, because we own the spinlock.	 */	if (!atomic_add_negative(sleepers, &sem->count))		wake_up(&sem->wait);	spin_unlock_irqrestore(&semaphore_lock, flags);	return 1;}void down_read_failed_biased(struct rw_semaphore *sem){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	add_wait_queue(&sem->wait, &wait);	/* put ourselves at the head of the list */	for (;;) {		if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))			break;		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (!sem->read_bias_granted)			schedule();	}	remove_wait_queue(&sem->wait, &wait);	tsk->state = TASK_RUNNING;}void down_write_failed_biased(struct rw_semaphore *sem){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	add_wait_queue_exclusive(&sem->write_bias_wait, &wait);	/* put ourselves at the end of the list */	for (;;) {		if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))			break;		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (!sem->write_bias_granted)			schedule();	}	remove_wait_queue(&sem->write_bias_wait, &wait);	tsk->state = TASK_RUNNING;	/* if the lock is currently unbiased, awaken the sleepers	 * FIXME: this wakes up the readers early in a bit of a	 * stampede -> bad!	 */	if (atomic_read(&sem->count) >= 0)		wake_up(&sem->wait);}/* Wait for the lock to become unbiased.  Readers * are non-exclusive. =) */void down_read_failed(struct rw_semaphore *sem){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	up_read(sem);	/* this takes care of granting the lock */	add_wait_queue(&sem->wait, &wait);	while (atomic_read(&sem->count) < 0) {		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (atomic_read(&sem->count) >= 0)			break;		schedule();	}	remove_wait_queue(&sem->wait, &wait);	tsk->state = TASK_RUNNING;}/* Wait for the lock to become unbiased. Since we're * a writer, we'll make ourselves exclusive. */void down_write_failed(struct rw_semaphore *sem){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	up_write(sem);	/* this takes care of granting the lock */	add_wait_queue_exclusive(&sem->wait, &wait);	while (atomic_read(&sem->count) < 0) {		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (atomic_read(&sem->count) >= 0)			break;	/* we must attempt to acquire or bias the lock */		schedule();	}	remove_wait_queue(&sem->wait, &wait);	tsk->state = TASK_RUNNING;}/* Called when someone has done an up that transitioned from * negative to non-negative, meaning that the lock has been * granted to whomever owned the bias. */void rwsem_wake_readers(struct rw_semaphore *sem){	if (xchg(&sem->read_bias_granted, 1))		BUG();	wake_up(&sem->wait);}void rwsem_wake_writers(struct rw_semaphore *sem){	if (xchg(&sem->write_bias_granted, 1))		BUG();	wake_up(&sem->write_bias_wait);}void __down_read_failed(int count, struct rw_semaphore *sem) {	do {		if (count == -1) {			down_read_failed_biased(sem);			break;		}		down_read_failed(sem);		count = atomic_dec_return(&sem->count);	} while (count != 0);}void __down_write_failed(int count, struct rw_semaphore *sem){	do {		if (count < 0 && count > -RW_LOCK_BIAS) {			down_write_failed_biased(sem);			break;		}		down_write_failed(sem);		count = atomic_add_return(-RW_LOCK_BIAS, &sem->count);	} while (count != 0);}void __rwsem_wake(int count, struct rw_semaphore *sem){	if (count == 0)		rwsem_wake_readers(sem);	else		rwsem_wake_writers(sem);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -