⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	hlock->trylock = trylock;	hlock->read = read;	hlock->check = check;	hlock->hardirqs_off = hardirqs_off;	if (check != 2)		goto out_calc_hash;#ifdef CONFIG_TRACE_IRQFLAGS	/*	 * If non-trylock use in a hardirq or softirq context, then	 * mark the lock as used in these contexts:	 */	if (!trylock) {		if (read) {			if (curr->hardirq_context)				if (!mark_lock(curr, hlock,						LOCK_USED_IN_HARDIRQ_READ))					return 0;			if (curr->softirq_context)				if (!mark_lock(curr, hlock,						LOCK_USED_IN_SOFTIRQ_READ))					return 0;		} else {			if (curr->hardirq_context)				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))					return 0;			if (curr->softirq_context)				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))					return 0;		}	}	if (!hardirqs_off) {		if (read) {			if (!mark_lock(curr, hlock,					LOCK_ENABLED_HARDIRQS_READ))				return 0;			if (curr->softirqs_enabled)				if (!mark_lock(curr, hlock,						LOCK_ENABLED_SOFTIRQS_READ))					return 0;		} else {			if (!mark_lock(curr, hlock,					LOCK_ENABLED_HARDIRQS))				return 0;			if (curr->softirqs_enabled)				if (!mark_lock(curr, hlock,						LOCK_ENABLED_SOFTIRQS))					return 0;		}	}#endif	/* mark it as used: */	if (!mark_lock(curr, hlock, LOCK_USED))		return 0;out_calc_hash:	/*	 * Calculate the chain hash: it's the combined has of all the	 * lock keys along the dependency chain. We save the hash value	 * at every step so that we can get the current hash easily	 * after unlock. The chain hash is then used to cache dependency	 * results.	 *	 * The 'key ID' is what is the most compact key value to drive	 * the hash, not class->key.	 */	id = class - lock_classes;	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))		return 0;	chain_key = curr->curr_chain_key;	if (!depth) {		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))			return 0;		chain_head = 1;	}	hlock->prev_chain_key = chain_key;#ifdef CONFIG_TRACE_IRQFLAGS	/*	 * Keep track of points where we cross into an interrupt context:	 */	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +				curr->softirq_context;	if (depth) {		struct held_lock *prev_hlock;		prev_hlock = curr->held_locks + depth-1;		/*		 * If we cross into another context, reset the		 * hash key (this also prevents the checking and the		 * adding of the dependency to 'prev'):		 */		if (prev_hlock->irq_context != hlock->irq_context) {			chain_key = 0;			chain_head = 1;		}	}#endif	chain_key = iterate_chain_key(chain_key, id);	curr->curr_chain_key = chain_key;	/*	 * Trylock needs to maintain the stack of held locks, but it	 * does not add new dependencies, because trylock can be done	 * in any order.	 *	 * We look up the chain_key and do the O(N^2) check and update of	 * the dependencies only if this is a new dependency chain.	 * (If lookup_chain_cache() returns with 1 it acquires	 * graph_lock for us)	 */	if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {		/*		 * Check whether last held lock:		 *		 * - is irq-safe, if this lock is irq-unsafe		 * - is softirq-safe, if this lock is hardirq-unsafe		 *		 * And check whether the new lock's dependency graph		 * could lead back to the previous lock.		 *		 * any of these scenarios could lead to a deadlock. If		 * All validations		 */		int ret = check_deadlock(curr, hlock, lock, read);		if (!ret)			return 0;		/*		 * Mark recursive read, as we jump over it when		 * building dependencies (just like we jump over		 * trylock entries):		 */		if (ret == 2)			hlock->read = 2;		/*		 * Add dependency only if this lock is not the head		 * of the chain, and if it's not a secondary read-lock:		 */		if (!chain_head && ret != 2)			if (!check_prevs_add(curr, hlock))				return 0;		graph_unlock();	} else		/* after lookup_chain_cache(): */		if (unlikely(!debug_locks))			return 0;	curr->lockdep_depth++;	check_chain_key(curr);#ifdef CONFIG_DEBUG_LOCKDEP	if (unlikely(!debug_locks))		return 0;#endif	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {		debug_locks_off();		printk("BUG: MAX_LOCK_DEPTH too low!\n");		printk("turning off the locking correctness validator.\n");		return 0;	}	if (unlikely(curr->lockdep_depth > max_lockdep_depth))		max_lockdep_depth = curr->lockdep_depth;	return 1;}static intprint_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,			   unsigned long ip){	if (!debug_locks_off())		return 0;	if (debug_locks_silent)		return 0;	printk("\n=====================================\n");	printk(  "[ BUG: bad unlock balance detected! ]\n");	printk(  "-------------------------------------\n");	printk("%s/%d is trying to release lock (",		curr->comm, curr->pid);	print_lockdep_cache(lock);	printk(") at:\n");	print_ip_sym(ip);	printk("but there are no more locks to release!\n");	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Common debugging checks for both nested and non-nested unlock: */static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,			unsigned long ip){	if (unlikely(!debug_locks))		return 0;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return 0;	if (curr->lockdep_depth <= 0)		return print_unlock_inbalance_bug(curr, lock, ip);	return 1;}/* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a * relatively rare operation, as all the unlock APIs default * to nested mode (which uses lock_release()): */static intlock_release_non_nested(struct task_struct *curr,			struct lockdep_map *lock, unsigned long ip){	struct held_lock *hlock, *prev_hlock;	unsigned int depth;	int i;	/*	 * Check whether the lock exists in the current stack	 * of held locks:	 */	depth = curr->lockdep_depth;	if (DEBUG_LOCKS_WARN_ON(!depth))		return 0;	prev_hlock = NULL;	for (i = depth-1; i >= 0; i--) {		hlock = curr->held_locks + i;		/*		 * We must not cross into another context:		 */		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)			break;		if (hlock->instance == lock)			goto found_it;		prev_hlock = hlock;	}	return print_unlock_inbalance_bug(curr, lock, ip);found_it:	/*	 * We have the right lock to unlock, 'hlock' points to it.	 * Now we remove it from the stack, and add back the other	 * entries (if any), recalculating the hash along the way:	 */	curr->lockdep_depth = i;	curr->curr_chain_key = hlock->prev_chain_key;	for (i++; i < depth; i++) {		hlock = curr->held_locks + i;		if (!__lock_acquire(hlock->instance,			hlock->class->subclass, hlock->trylock,				hlock->read, hlock->check, hlock->hardirqs_off,				hlock->acquire_ip))			return 0;	}	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))		return 0;	return 1;}/* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */static int lock_release_nested(struct task_struct *curr,			       struct lockdep_map *lock, unsigned long ip){	struct held_lock *hlock;	unsigned int depth;	/*	 * Pop off the top of the lock stack:	 */	depth = curr->lockdep_depth - 1;	hlock = curr->held_locks + depth;	/*	 * Is the unlock non-nested:	 */	if (hlock->instance != lock)		return lock_release_non_nested(curr, lock, ip);	curr->lockdep_depth--;	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))		return 0;	curr->curr_chain_key = hlock->prev_chain_key;#ifdef CONFIG_DEBUG_LOCKDEP	hlock->prev_chain_key = 0;	hlock->class = NULL;	hlock->acquire_ip = 0;	hlock->irq_context = 0;#endif	return 1;}/* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */static void__lock_release(struct lockdep_map *lock, int nested, unsigned long ip){	struct task_struct *curr = current;	if (!check_unlock(curr, lock, ip))		return;	if (nested) {		if (!lock_release_nested(curr, lock, ip))			return;	} else {		if (!lock_release_non_nested(curr, lock, ip))			return;	}	check_chain_key(curr);}/* * Check whether we follow the irq-flags state precisely: */static void check_flags(unsigned long flags){#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)	if (!debug_locks)		return;	if (irqs_disabled_flags(flags))		DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);	else		DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);	/*	 * We dont accurately track softirq state in e.g.	 * hardirq contexts (such as on 4KSTACKS), so only	 * check if not in hardirq contexts:	 */	if (!hardirq_count()) {		if (softirq_count())			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);		else			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);	}	if (!debug_locks)		print_irqtrace_events(current);#endif}/* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: */void lock_acquire(struct lockdep_map *lock, unsigned int subclass,		  int trylock, int read, int check, unsigned long ip){	unsigned long flags;	if (unlikely(current->lockdep_recursion))		return;	raw_local_irq_save(flags);	check_flags(flags);	current->lockdep_recursion = 1;	__lock_acquire(lock, subclass, trylock, read, check,		       irqs_disabled_flags(flags), ip);	current->lockdep_recursion = 0;	raw_local_irq_restore(flags);}EXPORT_SYMBOL_GPL(lock_acquire);void lock_release(struct lockdep_map *lock, int nested, unsigned long ip){	unsigned long flags;	if (unlikely(current->lockdep_recursion))		return;	raw_local_irq_save(flags);	check_flags(flags);	current->lockdep_recursion = 1;	__lock_release(lock, nested, ip);	current->lockdep_recursion = 0;	raw_local_irq_restore(flags);}EXPORT_SYMBOL_GPL(lock_release);/* * Used by the testsuite, sanitize the validator state * after a simulated failure: */void lockdep_reset(void){	unsigned long flags;	int i;	raw_local_irq_save(flags);	current->curr_chain_key = 0;	current->lockdep_depth = 0;	current->lockdep_recursion = 0;	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));	nr_hardirq_chains = 0;	nr_softirq_chains = 0;	nr_process_chains = 0;	debug_locks = 1;	for (i = 0; i < CHAINHASH_SIZE; i++)		INIT_LIST_HEAD(chainhash_table + i);	raw_local_irq_restore(flags);}static void zap_class(struct lock_class *class){	int i;	/*	 * Remove all dependencies this lock is	 * involved in:	 */	for (i = 0; i < nr_list_entries; i++) {		if (list_entries[i].class == class)			list_del_rcu(&list_entries[i].entry);	}	/*	 * Unhash the class and remove it from the all_lock_classes list:	 */	list_del_rcu(&class->hash_entry);	list_del_rcu(&class->lock_entry);}static inline int within(void *addr, void *start, unsigned long size){	return addr >= start && addr < start + size;}void lockdep_free_key_range(void *start, unsigned long size){	struct lock_class *class, *next;	struct list_head *head;	unsigned long flags;	int i;	raw_local_irq_save(flags);	graph_lock();	/*	 * Unhash all classes that were created by this module:	 */	for (i = 0; i < CLASSHASH_SIZE; i++) {		head = classhash_table + i;		if (list_empty(head))			continue;		list_for_each_entry_safe(class, next, head, hash_entry)			if (within(class->key, start, size))				zap_class(class);	}	graph_unlock();	raw_local_irq_restore(flags);}void lockdep_reset_lock(struct lockdep_map *lock){	struct lock_class *class, *next;	struct list_head *head;	unsigned long flags;	int i, j;	raw_local_irq_save(flags);	/*	 * Remove all classes this lock might have:	 */	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {		/*		 * If the class exists we look it up and zap it:		 */		class = look_up_lock_class(lock, j);		if (class)			zap_class(class);	}	/*	 * Debug check: in the end all mapped classes should	 * be gone.	 */	graph_lock();	for (i = 0; i < CLASSHASH_SIZE; i++) {		head = classhash_table + i;		if (list_empty(head))			continue;		list_for_each_entry_safe(class, next, head, hash_entry) {			if (unlikely(class == lock->class_cache)) {				if (debug_locks_off_graph_unlock())					WARN_ON(1);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -