⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtmutex.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 2 页
字号:
	spin_unlock_irqrestore(&current->pi_lock, flags);	/*	 * Clear the pi_blocked_on variable and enqueue a possible	 * waiter into the pi_waiters list of the pending owner. This	 * prevents that in case the pending owner gets unboosted a	 * waiter with higher priority than pending-owner->normal_prio	 * is blocked on the unboosted (pending) owner.	 */	spin_lock_irqsave(&pendowner->pi_lock, flags);	WARN_ON(!pendowner->pi_blocked_on);	WARN_ON(pendowner->pi_blocked_on != waiter);	WARN_ON(pendowner->pi_blocked_on->lock != lock);	pendowner->pi_blocked_on = NULL;	if (rt_mutex_has_waiters(lock)) {		struct rt_mutex_waiter *next;		next = rt_mutex_top_waiter(lock);		plist_add(&next->pi_list_entry, &pendowner->pi_waiters);	}	spin_unlock_irqrestore(&pendowner->pi_lock, flags);	wake_up_process(pendowner);}/* * Remove a waiter from a lock * * Must be called with lock->wait_lock held */static void remove_waiter(struct rt_mutex *lock,			  struct rt_mutex_waiter *waiter){	int first = (waiter == rt_mutex_top_waiter(lock));	struct task_struct *owner = rt_mutex_owner(lock);	unsigned long flags;	int chain_walk = 0;	spin_lock_irqsave(&current->pi_lock, flags);	plist_del(&waiter->list_entry, &lock->wait_list);	waiter->task = NULL;	current->pi_blocked_on = NULL;	spin_unlock_irqrestore(&current->pi_lock, flags);	if (first && owner != current) {		spin_lock_irqsave(&owner->pi_lock, flags);		plist_del(&waiter->pi_list_entry, &owner->pi_waiters);		if (rt_mutex_has_waiters(lock)) {			struct rt_mutex_waiter *next;			next = rt_mutex_top_waiter(lock);			plist_add(&next->pi_list_entry, &owner->pi_waiters);		}		__rt_mutex_adjust_prio(owner);		if (owner->pi_blocked_on)			chain_walk = 1;		spin_unlock_irqrestore(&owner->pi_lock, flags);	}	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));	if (!chain_walk)		return;	/* gets dropped in rt_mutex_adjust_prio_chain()! */	get_task_struct(owner);	spin_unlock(&lock->wait_lock);	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);	spin_lock(&lock->wait_lock);}/* * Recheck the pi chain, in case we got a priority setting * * Called from sched_setscheduler */void rt_mutex_adjust_pi(struct task_struct *task){	struct rt_mutex_waiter *waiter;	unsigned long flags;	spin_lock_irqsave(&task->pi_lock, flags);	waiter = task->pi_blocked_on;	if (!waiter || waiter->list_entry.prio == task->prio) {		spin_unlock_irqrestore(&task->pi_lock, flags);		return;	}	spin_unlock_irqrestore(&task->pi_lock, flags);	/* gets dropped in rt_mutex_adjust_prio_chain()! */	get_task_struct(task);	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);}/* * Slow path lock function: */static int __schedrt_mutex_slowlock(struct rt_mutex *lock, int state,		  struct hrtimer_sleeper *timeout,		  int detect_deadlock){	struct rt_mutex_waiter waiter;	int ret = 0;	debug_rt_mutex_init_waiter(&waiter);	waiter.task = NULL;	spin_lock(&lock->wait_lock);	/* Try to acquire the lock again: */	if (try_to_take_rt_mutex(lock)) {		spin_unlock(&lock->wait_lock);		return 0;	}	set_current_state(state);	/* Setup the timer, when timeout != NULL */	if (unlikely(timeout))		hrtimer_start(&timeout->timer, timeout->timer.expires,			      HRTIMER_MODE_ABS);	for (;;) {		/* Try to acquire the lock: */		if (try_to_take_rt_mutex(lock))			break;		/*		 * TASK_INTERRUPTIBLE checks for signals and		 * timeout. Ignored otherwise.		 */		if (unlikely(state == TASK_INTERRUPTIBLE)) {			/* Signal pending? */			if (signal_pending(current))				ret = -EINTR;			if (timeout && !timeout->task)				ret = -ETIMEDOUT;			if (ret)				break;		}		/*		 * waiter.task is NULL the first time we come here and		 * when we have been woken up by the previous owner		 * but the lock got stolen by a higher prio task.		 */		if (!waiter.task) {			ret = task_blocks_on_rt_mutex(lock, &waiter,						      detect_deadlock);			/*			 * If we got woken up by the owner then start loop			 * all over without going into schedule to try			 * to get the lock now:			 */			if (unlikely(!waiter.task)) {				/*				 * Reset the return value. We might				 * have returned with -EDEADLK and the				 * owner released the lock while we				 * were walking the pi chain.				 */				ret = 0;				continue;			}			if (unlikely(ret))				break;		}		spin_unlock(&lock->wait_lock);		debug_rt_mutex_print_deadlock(&waiter);		if (waiter.task)			schedule_rt_mutex(lock);		spin_lock(&lock->wait_lock);		set_current_state(state);	}	set_current_state(TASK_RUNNING);	if (unlikely(waiter.task))		remove_waiter(lock, &waiter);	/*	 * try_to_take_rt_mutex() sets the waiter bit	 * unconditionally. We might have to fix that up.	 */	fixup_rt_mutex_waiters(lock);	spin_unlock(&lock->wait_lock);	/* Remove pending timer: */	if (unlikely(timeout))		hrtimer_cancel(&timeout->timer);	/*	 * Readjust priority, when we did not get the lock. We might	 * have been the pending owner and boosted. Since we did not	 * take the lock, the PI boost has to go.	 */	if (unlikely(ret))		rt_mutex_adjust_prio(current);	debug_rt_mutex_free_waiter(&waiter);	return ret;}/* * Slow path try-lock function: */static inline intrt_mutex_slowtrylock(struct rt_mutex *lock){	int ret = 0;	spin_lock(&lock->wait_lock);	if (likely(rt_mutex_owner(lock) != current)) {		ret = try_to_take_rt_mutex(lock);		/*		 * try_to_take_rt_mutex() sets the lock waiters		 * bit unconditionally. Clean this up.		 */		fixup_rt_mutex_waiters(lock);	}	spin_unlock(&lock->wait_lock);	return ret;}/* * Slow path to release a rt-mutex: */static void __schedrt_mutex_slowunlock(struct rt_mutex *lock){	spin_lock(&lock->wait_lock);	debug_rt_mutex_unlock(lock);	rt_mutex_deadlock_account_unlock(current);	if (!rt_mutex_has_waiters(lock)) {		lock->owner = NULL;		spin_unlock(&lock->wait_lock);		return;	}	wakeup_next_waiter(lock);	spin_unlock(&lock->wait_lock);	/* Undo pi boosting if necessary: */	rt_mutex_adjust_prio(current);}/* * debug aware fast / slowpath lock,trylock,unlock * * The atomic acquire/release ops are compiled away, when either the * architecture does not support cmpxchg or when debugging is enabled. */static inline intrt_mutex_fastlock(struct rt_mutex *lock, int state,		  int detect_deadlock,		  int (*slowfn)(struct rt_mutex *lock, int state,				struct hrtimer_sleeper *timeout,				int detect_deadlock)){	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {		rt_mutex_deadlock_account_lock(lock, current);		return 0;	} else		return slowfn(lock, state, NULL, detect_deadlock);}static inline intrt_mutex_timed_fastlock(struct rt_mutex *lock, int state,			struct hrtimer_sleeper *timeout, int detect_deadlock,			int (*slowfn)(struct rt_mutex *lock, int state,				      struct hrtimer_sleeper *timeout,				      int detect_deadlock)){	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {		rt_mutex_deadlock_account_lock(lock, current);		return 0;	} else		return slowfn(lock, state, timeout, detect_deadlock);}static inline intrt_mutex_fasttrylock(struct rt_mutex *lock,		     int (*slowfn)(struct rt_mutex *lock)){	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {		rt_mutex_deadlock_account_lock(lock, current);		return 1;	}	return slowfn(lock);}static inline voidrt_mutex_fastunlock(struct rt_mutex *lock,		    void (*slowfn)(struct rt_mutex *lock)){	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))		rt_mutex_deadlock_account_unlock(current);	else		slowfn(lock);}/** * rt_mutex_lock - lock a rt_mutex * * @lock: the rt_mutex to be locked */void __sched rt_mutex_lock(struct rt_mutex *lock){	might_sleep();	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);}EXPORT_SYMBOL_GPL(rt_mutex_lock);/** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * * @lock: 		the rt_mutex to be locked * @detect_deadlock:	deadlock detection on/off * * Returns: *  0 		on success * -EINTR 	when interrupted by a signal * -EDEADLK	when the lock would deadlock (when deadlock detection is on) */int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,						 int detect_deadlock){	might_sleep();	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,				 detect_deadlock, rt_mutex_slowlock);}EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);/** * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible *				       the timeout structure is provided *				       by the caller * * @lock: 		the rt_mutex to be locked * @timeout:		timeout structure or NULL (no timeout) * @detect_deadlock:	deadlock detection on/off * * Returns: *  0 		on success * -EINTR 	when interrupted by a signal * -ETIMEOUT	when the timeout expired * -EDEADLK	when the lock would deadlock (when deadlock detection is on) */intrt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,		    int detect_deadlock){	might_sleep();	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,				       detect_deadlock, rt_mutex_slowlock);}EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);/** * rt_mutex_trylock - try to lock a rt_mutex * * @lock:	the rt_mutex to be locked * * Returns 1 on success and 0 on contention */int __sched rt_mutex_trylock(struct rt_mutex *lock){	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);}EXPORT_SYMBOL_GPL(rt_mutex_trylock);/** * rt_mutex_unlock - unlock a rt_mutex * * @lock: the rt_mutex to be unlocked */void __sched rt_mutex_unlock(struct rt_mutex *lock){	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);}EXPORT_SYMBOL_GPL(rt_mutex_unlock);/*** * rt_mutex_destroy - mark a mutex unusable * @lock: the mutex to be destroyed * * This function marks the mutex uninitialized, and any subsequent * use of the mutex is forbidden. The mutex must not be locked when * this function is called. */void rt_mutex_destroy(struct rt_mutex *lock){	WARN_ON(rt_mutex_is_locked(lock));#ifdef CONFIG_DEBUG_RT_MUTEXES	lock->magic = NULL;#endif}EXPORT_SYMBOL_GPL(rt_mutex_destroy);/** * __rt_mutex_init - initialize the rt lock * * @lock: the rt lock to be initialized * * Initialize the rt lock to unlocked state. * * Initializing of a locked rt lock is not allowed */void __rt_mutex_init(struct rt_mutex *lock, const char *name){	lock->owner = NULL;	spin_lock_init(&lock->wait_lock);	plist_head_init(&lock->wait_list, &lock->wait_lock);	debug_rt_mutex_init(lock, name);}EXPORT_SYMBOL_GPL(__rt_mutex_init);/** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a *				proxy owner * * @lock: 	the rt_mutex to be locked * @proxy_owner:the task to set as owner * * No locking. Caller has to do serializing itself * Special API call for PI-futex support */void rt_mutex_init_proxy_locked(struct rt_mutex *lock,				struct task_struct *proxy_owner){	__rt_mutex_init(lock, NULL);	debug_rt_mutex_proxy_lock(lock, proxy_owner);	rt_mutex_set_owner(lock, proxy_owner, 0);	rt_mutex_deadlock_account_lock(lock, proxy_owner);}/** * rt_mutex_proxy_unlock - release a lock on behalf of owner * * @lock: 	the rt_mutex to be locked * * No locking. Caller has to do serializing itself * Special API call for PI-futex support */void rt_mutex_proxy_unlock(struct rt_mutex *lock,			   struct task_struct *proxy_owner){	debug_rt_mutex_proxy_unlock(lock);	rt_mutex_set_owner(lock, NULL, 0);	rt_mutex_deadlock_account_unlock(proxy_owner);}/** * rt_mutex_next_owner - return the next owner of the lock * * @lock: the rt lock query * * Returns the next owner of the lock or NULL * * Caller has to serialize against other accessors to the lock * itself. * * Special API call for PI-futex support */struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock){	if (!rt_mutex_has_waiters(lock))		return NULL;	return rt_mutex_top_waiter(lock)->task;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -