⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
	unsigned long ret, flags;	local_irq_save(flags);	__raw_spin_lock(&lockdep_lock);	ret = __lockdep_count_backward_deps(class, 0);	__raw_spin_unlock(&lockdep_lock);	local_irq_restore(flags);	return ret;}/* * Prove that the dependency graph starting at <entry> can not * lead to <target>. Print an error and return 0 if it does. */static noinline intcheck_noncircular(struct lock_class *source, unsigned int depth){	struct lock_list *entry;	if (lockdep_dependency_visit(source, depth))		return 1;	debug_atomic_inc(&nr_cyclic_check_recursions);	if (depth > max_recursion_depth)		max_recursion_depth = depth;	if (depth >= RECURSION_LIMIT)		return print_infinite_recursion_bug();	/*	 * Check this lock's dependency list:	 */	list_for_each_entry(entry, &source->locks_after, entry) {		if (entry->class == hlock_class(check_target))			return print_circular_bug_header(entry, depth+1);		debug_atomic_inc(&nr_cyclic_checks);		if (!check_noncircular(entry->class, depth+1))			return print_circular_bug_entry(entry, depth+1);	}	return 1;}#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)/* * Forwards and backwards subgraph searching, for the purposes of * proving that two subgraphs can be connected by a new dependency * without creating any illegal irq-safe -> irq-unsafe lock dependency. */static enum lock_usage_bit find_usage_bit;static struct lock_class *forwards_match, *backwards_match;/* * Find a node in the forwards-direction dependency sub-graph starting * at <source> that matches <find_usage_bit>. * * Return 2 if such a node exists in the subgraph, and put that node * into <forwards_match>. * * Return 1 otherwise and keep <forwards_match> unchanged. * Return 0 on error. */static noinline intfind_usage_forwards(struct lock_class *source, unsigned int depth){	struct lock_list *entry;	int ret;	if (lockdep_dependency_visit(source, depth))		return 1;	if (depth > max_recursion_depth)		max_recursion_depth = depth;	if (depth >= RECURSION_LIMIT)		return print_infinite_recursion_bug();	debug_atomic_inc(&nr_find_usage_forwards_checks);	if (source->usage_mask & (1 << find_usage_bit)) {		forwards_match = source;		return 2;	}	/*	 * Check this lock's dependency list:	 */	list_for_each_entry(entry, &source->locks_after, entry) {		debug_atomic_inc(&nr_find_usage_forwards_recursions);		ret = find_usage_forwards(entry->class, depth+1);		if (ret == 2 || ret == 0)			return ret;	}	return 1;}/* * Find a node in the backwards-direction dependency sub-graph starting * at <source> that matches <find_usage_bit>. * * Return 2 if such a node exists in the subgraph, and put that node * into <backwards_match>. * * Return 1 otherwise and keep <backwards_match> unchanged. * Return 0 on error. */static noinline intfind_usage_backwards(struct lock_class *source, unsigned int depth){	struct lock_list *entry;	int ret;	if (lockdep_dependency_visit(source, depth))		return 1;	if (!__raw_spin_is_locked(&lockdep_lock))		return DEBUG_LOCKS_WARN_ON(1);	if (depth > max_recursion_depth)		max_recursion_depth = depth;	if (depth >= RECURSION_LIMIT)		return print_infinite_recursion_bug();	debug_atomic_inc(&nr_find_usage_backwards_checks);	if (source->usage_mask & (1 << find_usage_bit)) {		backwards_match = source;		return 2;	}	if (!source && debug_locks_off_graph_unlock()) {		WARN_ON(1);		return 0;	}	/*	 * Check this lock's dependency list:	 */	list_for_each_entry(entry, &source->locks_before, entry) {		debug_atomic_inc(&nr_find_usage_backwards_recursions);		ret = find_usage_backwards(entry->class, depth+1);		if (ret == 2 || ret == 0)			return ret;	}	return 1;}static intprint_bad_irq_dependency(struct task_struct *curr,			 struct held_lock *prev,			 struct held_lock *next,			 enum lock_usage_bit bit1,			 enum lock_usage_bit bit2,			 const char *irqclass){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n======================================================\n");	printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",		irqclass, irqclass);	print_kernel_version();	printk(  "------------------------------------------------------\n");	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",		curr->comm, task_pid_nr(curr),		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,		curr->hardirqs_enabled,		curr->softirqs_enabled);	print_lock(next);	printk("\nand this task is already holding:\n");	print_lock(prev);	printk("which would create a new lock dependency:\n");	print_lock_name(hlock_class(prev));	printk(" ->");	print_lock_name(hlock_class(next));	printk("\n");	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",		irqclass);	print_lock_name(backwards_match);	printk("\n... which became %s-irq-safe at:\n", irqclass);	print_stack_trace(backwards_match->usage_traces + bit1, 1);	printk("\nto a %s-irq-unsafe lock:\n", irqclass);	print_lock_name(forwards_match);	printk("\n... which became %s-irq-unsafe at:\n", irqclass);	printk("...");	print_stack_trace(forwards_match->usage_traces + bit2, 1);	printk("\nother info that might help us debug this:\n\n");	lockdep_print_held_locks(curr);	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);	print_lock_dependencies(backwards_match, 0);	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);	print_lock_dependencies(forwards_match, 0);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}static intcheck_usage(struct task_struct *curr, struct held_lock *prev,	    struct held_lock *next, enum lock_usage_bit bit_backwards,	    enum lock_usage_bit bit_forwards, const char *irqclass){	int ret;	find_usage_bit = bit_backwards;	/* fills in <backwards_match> */	ret = find_usage_backwards(hlock_class(prev), 0);	if (!ret || ret == 1)		return ret;	find_usage_bit = bit_forwards;	ret = find_usage_forwards(hlock_class(next), 0);	if (!ret || ret == 1)		return ret;	/* ret == 2 */	return print_bad_irq_dependency(curr, prev, next,			bit_backwards, bit_forwards, irqclass);}static intcheck_prev_add_irq(struct task_struct *curr, struct held_lock *prev,		struct held_lock *next){	/*	 * Prove that the new dependency does not connect a hardirq-safe	 * lock with a hardirq-unsafe lock - to achieve this we search	 * the backwards-subgraph starting at <prev>, and the	 * forwards-subgraph starting at <next>:	 */	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,					LOCK_ENABLED_HARDIRQS, "hard"))		return 0;	/*	 * Prove that the new dependency does not connect a hardirq-safe-read	 * lock with a hardirq-unsafe lock - to achieve this we search	 * the backwards-subgraph starting at <prev>, and the	 * forwards-subgraph starting at <next>:	 */	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,					LOCK_ENABLED_HARDIRQS, "hard-read"))		return 0;	/*	 * Prove that the new dependency does not connect a softirq-safe	 * lock with a softirq-unsafe lock - to achieve this we search	 * the backwards-subgraph starting at <prev>, and the	 * forwards-subgraph starting at <next>:	 */	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,					LOCK_ENABLED_SOFTIRQS, "soft"))		return 0;	/*	 * Prove that the new dependency does not connect a softirq-safe-read	 * lock with a softirq-unsafe lock - to achieve this we search	 * the backwards-subgraph starting at <prev>, and the	 * forwards-subgraph starting at <next>:	 */	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,					LOCK_ENABLED_SOFTIRQS, "soft"))		return 0;	return 1;}static void inc_chains(void){	if (current->hardirq_context)		nr_hardirq_chains++;	else {		if (current->softirq_context)			nr_softirq_chains++;		else			nr_process_chains++;	}}#elsestatic inline intcheck_prev_add_irq(struct task_struct *curr, struct held_lock *prev,		struct held_lock *next){	return 1;}static inline void inc_chains(void){	nr_process_chains++;}#endifstatic intprint_deadlock_bug(struct task_struct *curr, struct held_lock *prev,		   struct held_lock *next){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n=============================================\n");	printk(  "[ INFO: possible recursive locking detected ]\n");	print_kernel_version();	printk(  "---------------------------------------------\n");	printk("%s/%d is trying to acquire lock:\n",		curr->comm, task_pid_nr(curr));	print_lock(next);	printk("\nbut task is already holding lock:\n");	print_lock(prev);	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Check whether we are holding such a class already. * * (Note that this has to be done separately, because the graph cannot * detect such classes of deadlocks.) * * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read */static intcheck_deadlock(struct task_struct *curr, struct held_lock *next,	       struct lockdep_map *next_instance, int read){	struct held_lock *prev;	struct held_lock *nest = NULL;	int i;	for (i = 0; i < curr->lockdep_depth; i++) {		prev = curr->held_locks + i;		if (prev->instance == next->nest_lock)			nest = prev;		if (hlock_class(prev) != hlock_class(next))			continue;		/*		 * Allow read-after-read recursion of the same		 * lock class (i.e. read_lock(lock)+read_lock(lock)):		 */		if ((read == 2) && prev->read)			return 2;		/*		 * We're holding the nest_lock, which serializes this lock's		 * nesting behaviour.		 */		if (nest)			return 2;		return print_deadlock_bug(curr, prev, next);	}	return 1;}/* * There was a chain-cache miss, and we are about to add a new dependency * to a previous lock. We recursively validate the following rules: * *  - would the adding of the <prev> -> <next> dependency create a *    circular dependency in the graph? [== circular deadlock] * *  - does the new prev->next dependency connect any hardirq-safe lock *    (in the full backwards-subgraph starting at <prev>) with any *    hardirq-unsafe lock (in the full forwards-subgraph starting at *    <next>)? [== illegal lock inversion with hardirq contexts] * *  - does the new prev->next dependency connect any softirq-safe lock *    (in the full backwards-subgraph starting at <prev>) with any *    softirq-unsafe lock (in the full forwards-subgraph starting at *    <next>)? [== illegal lock inversion with softirq contexts] * * any of these scenarios could lead to a deadlock. * * Then if all the validations pass, we add the forwards and backwards * dependency. */static intcheck_prev_add(struct task_struct *curr, struct held_lock *prev,	       struct held_lock *next, int distance){	struct lock_list *entry;	int ret;	/*	 * Prove that the new <prev> -> <next> dependency would not	 * create a circular dependency in the graph. (We do this by	 * forward-recursing into the graph starting at <next>, and	 * checking whether we can reach <prev>.)	 *	 * We are using global variables to control the recursion, to	 * keep the stackframe size of the recursive functions low:	 */	check_source = next;	check_target = prev;	if (!(check_noncircular(hlock_class(next), 0)))		return print_circular_bug_tail();	if (!check_prev_add_irq(curr, prev, next))		return 0;	/*	 * For recursive read-locks we do all the dependency checks,	 * but we dont store read-triggered dependencies (only	 * write-triggered dependencies). This ensures that only the	 * write-side dependencies matter, and that if for example a	 * write-lock never takes any other locks, then the reads are	 * equivalent to a NOP.	 */	if (next->read == 2 || prev->read == 2)		return 1;	/*	 * Is the <prev> -> <next> dependency already present?	 *	 * (this may occur even though this is a new chain: consider	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3	 *  chains - the second one will be new, but L1 already has	 *  L2 added to its dependency list, due to the first chain.)	 */	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {		if (entry->class == hlock_class(next)) {			if (distance == 1)				entry->distance = 1;			return 2;		}	}	/*	 * Ok, all validations passed, add the new lock	 * to the previous lock's dependency list:	 */	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),			       &hlock_class(prev)->locks_after,			       next->acquire_ip, distance);	if (!ret)		return 0;	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),			       &hlock_class(next)->locks_before,			       next->acquire_ip, distance);	if (!ret)		return 0;	/*	 * Debugging printouts:	 */	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {		graph_unlock();		printk("\n new dependency: ");		print_lock_name(hlock_class(prev));		printk(" => ");		print_lock_name(hlock_class(next));		printk("\n");		dump_stack();		return graph_lock();	}	return 1;}/* * Add the dependency to all directly-previous locks that are 'relevant'. * The ones that are relevant are (in increasing distance from curr): * all consecutive trylock entries and the final non-trylock entry - or * the end of this context's lock-chain - whichever comes first. */static intcheck_prevs_add(struct task_struct *curr, struct held_lock *next){	int depth = curr->lockdep_depth;	struct held_lock *hlock;	/*	 * Debugging checks.	 *	 * Depth must not be zero for a non-head lock:	 */	if (!depth)		goto out_bug;	/*	 * At least two relevant locks must exist for this	 * to be a head:	 */	if (curr->held_locks[depth].irq_context !=			curr->held_locks[depth-1].irq_context)		goto out_bug;	for (;;) {		int distance = curr->lockdep_depth - depth + 1;		hlock = curr->held_locks + depth-1;		/*		 * Only non-recursive-read entries get new dependencies		 * added:		 */		if (hlock->read != 2) {			if (!check_prev_add(curr, hlock, next, distance))				return 0;			/*			 * Stop after the first non-trylock entry,			 * as non-trylock entries have added their			 * own direct dependencies already, so this			 * lock is connected to them indirectly:			 */			if (!hlock->trylock)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -