⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
				break;		}		depth--;		/*		 * End of lock-stack?		 */		if (!depth)			break;		/*		 * Stop the search if we cross into another context:		 */		if (curr->held_locks[depth].irq_context !=				curr->held_locks[depth-1].irq_context)			break;	}	return 1;out_bug:	if (!debug_locks_off_graph_unlock())		return 0;	WARN_ON(1);	return 0;}unsigned long nr_lock_chains;struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];int nr_chain_hlocks;static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i){	return lock_classes + chain_hlocks[chain->base + i];}/* * Look up a dependency chain. If the key is not present yet then * add it and return 1 - in this case the new dependency chain is * validated. If the key is already hashed, return 0. * (On return with 1 graph_lock is held.) */static inline int lookup_chain_cache(struct task_struct *curr,				     struct held_lock *hlock,				     u64 chain_key){	struct lock_class *class = hlock_class(hlock);	struct list_head *hash_head = chainhashentry(chain_key);	struct lock_chain *chain;	struct held_lock *hlock_curr, *hlock_next;	int i, j, n, cn;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return 0;	/*	 * We can walk it lock-free, because entries only get added	 * to the hash:	 */	list_for_each_entry(chain, hash_head, entry) {		if (chain->chain_key == chain_key) {cache_hit:			debug_atomic_inc(&chain_lookup_hits);			if (very_verbose(class))				printk("\nhash chain already cached, key: "					"%016Lx tail class: [%p] %s\n",					(unsigned long long)chain_key,					class->key, class->name);			return 0;		}	}	if (very_verbose(class))		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",			(unsigned long long)chain_key, class->key, class->name);	/*	 * Allocate a new chain entry from the static array, and add	 * it to the hash:	 */	if (!graph_lock())		return 0;	/*	 * We have to walk the chain again locked - to avoid duplicates:	 */	list_for_each_entry(chain, hash_head, entry) {		if (chain->chain_key == chain_key) {			graph_unlock();			goto cache_hit;		}	}	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {		if (!debug_locks_off_graph_unlock())			return 0;		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");		printk("turning off the locking correctness validator.\n");		return 0;	}	chain = lock_chains + nr_lock_chains++;	chain->chain_key = chain_key;	chain->irq_context = hlock->irq_context;	/* Find the first held_lock of current chain */	hlock_next = hlock;	for (i = curr->lockdep_depth - 1; i >= 0; i--) {		hlock_curr = curr->held_locks + i;		if (hlock_curr->irq_context != hlock_next->irq_context)			break;		hlock_next = hlock;	}	i++;	chain->depth = curr->lockdep_depth + 1 - i;	cn = nr_chain_hlocks;	while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {		n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);		if (n == cn)			break;		cn = n;	}	if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {		chain->base = cn;		for (j = 0; j < chain->depth - 1; j++, i++) {			int lock_id = curr->held_locks[i].class_idx - 1;			chain_hlocks[chain->base + j] = lock_id;		}		chain_hlocks[chain->base + j] = class - lock_classes;	}	list_add_tail_rcu(&chain->entry, hash_head);	debug_atomic_inc(&chain_lookup_misses);	inc_chains();	return 1;}static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,		struct held_lock *hlock, int chain_head, u64 chain_key){	/*	 * Trylock needs to maintain the stack of held locks, but it	 * does not add new dependencies, because trylock can be done	 * in any order.	 *	 * We look up the chain_key and do the O(N^2) check and update of	 * the dependencies only if this is a new dependency chain.	 * (If lookup_chain_cache() returns with 1 it acquires	 * graph_lock for us)	 */	if (!hlock->trylock && (hlock->check == 2) &&	    lookup_chain_cache(curr, hlock, chain_key)) {		/*		 * Check whether last held lock:		 *		 * - is irq-safe, if this lock is irq-unsafe		 * - is softirq-safe, if this lock is hardirq-unsafe		 *		 * And check whether the new lock's dependency graph		 * could lead back to the previous lock.		 *		 * any of these scenarios could lead to a deadlock. If		 * All validations		 */		int ret = check_deadlock(curr, hlock, lock, hlock->read);		if (!ret)			return 0;		/*		 * Mark recursive read, as we jump over it when		 * building dependencies (just like we jump over		 * trylock entries):		 */		if (ret == 2)			hlock->read = 2;		/*		 * Add dependency only if this lock is not the head		 * of the chain, and if it's not a secondary read-lock:		 */		if (!chain_head && ret != 2)			if (!check_prevs_add(curr, hlock))				return 0;		graph_unlock();	} else		/* after lookup_chain_cache(): */		if (unlikely(!debug_locks))			return 0;	return 1;}#elsestatic inline int validate_chain(struct task_struct *curr,	       	struct lockdep_map *lock, struct held_lock *hlock,		int chain_head, u64 chain_key){	return 1;}#endif/* * We are building curr_chain_key incrementally, so double-check * it from scratch, to make sure that it's done correctly: */static void check_chain_key(struct task_struct *curr){#ifdef CONFIG_DEBUG_LOCKDEP	struct held_lock *hlock, *prev_hlock = NULL;	unsigned int i, id;	u64 chain_key = 0;	for (i = 0; i < curr->lockdep_depth; i++) {		hlock = curr->held_locks + i;		if (chain_key != hlock->prev_chain_key) {			debug_locks_off();			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",				curr->lockdep_depth, i,				(unsigned long long)chain_key,				(unsigned long long)hlock->prev_chain_key);			return;		}		id = hlock->class_idx - 1;		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))			return;		if (prev_hlock && (prev_hlock->irq_context !=							hlock->irq_context))			chain_key = 0;		chain_key = iterate_chain_key(chain_key, id);		prev_hlock = hlock;	}	if (chain_key != curr->curr_chain_key) {		debug_locks_off();		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",			curr->lockdep_depth, i,			(unsigned long long)chain_key,			(unsigned long long)curr->curr_chain_key);	}#endif}static intprint_usage_bug(struct task_struct *curr, struct held_lock *this,		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n=================================\n");	printk(  "[ INFO: inconsistent lock state ]\n");	print_kernel_version();	printk(  "---------------------------------\n");	printk("inconsistent {%s} -> {%s} usage.\n",		usage_str[prev_bit], usage_str[new_bit]);	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",		curr->comm, task_pid_nr(curr),		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,		trace_hardirqs_enabled(curr),		trace_softirqs_enabled(curr));	print_lock(this);	printk("{%s} state was registered at:\n", usage_str[prev_bit]);	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);	print_irqtrace_events(curr);	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Print out an error if an invalid bit is set: */static inline intvalid_state(struct task_struct *curr, struct held_lock *this,	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit){	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))		return print_usage_bug(curr, this, bad_bit, new_bit);	return 1;}static int mark_lock(struct task_struct *curr, struct held_lock *this,		     enum lock_usage_bit new_bit);#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)/* * print irq inversion bug: */static intprint_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,			struct held_lock *this, int forwards,			const char *irqclass){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n=========================================================\n");	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");	print_kernel_version();	printk(  "---------------------------------------------------------\n");	printk("%s/%d just changed the state of lock:\n",		curr->comm, task_pid_nr(curr));	print_lock(this);	if (forwards)		printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);	else		printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);	print_lock_name(other);	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);	printk("\nthe first lock's dependencies:\n");	print_lock_dependencies(hlock_class(this), 0);	printk("\nthe second lock's dependencies:\n");	print_lock_dependencies(other, 0);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Prove that in the forwards-direction subgraph starting at <this> * there is no lock matching <mask>: */static intcheck_usage_forwards(struct task_struct *curr, struct held_lock *this,		     enum lock_usage_bit bit, const char *irqclass){	int ret;	find_usage_bit = bit;	/* fills in <forwards_match> */	ret = find_usage_forwards(hlock_class(this), 0);	if (!ret || ret == 1)		return ret;	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);}/* * Prove that in the backwards-direction subgraph starting at <this> * there is no lock matching <mask>: */static intcheck_usage_backwards(struct task_struct *curr, struct held_lock *this,		      enum lock_usage_bit bit, const char *irqclass){	int ret;	find_usage_bit = bit;	/* fills in <backwards_match> */	ret = find_usage_backwards(hlock_class(this), 0);	if (!ret || ret == 1)		return ret;	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);}void print_irqtrace_events(struct task_struct *curr){	printk("irq event stamp: %u\n", curr->irq_events);	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);	print_ip_sym(curr->hardirq_enable_ip);	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);	print_ip_sym(curr->hardirq_disable_ip);	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);	print_ip_sym(curr->softirq_enable_ip);	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);	print_ip_sym(curr->softirq_disable_ip);}static int hardirq_verbose(struct lock_class *class){#if HARDIRQ_VERBOSE	return class_filter(class);#endif	return 0;}static int softirq_verbose(struct lock_class *class){#if SOFTIRQ_VERBOSE	return class_filter(class);#endif	return 0;}#define STRICT_READ_CHECKS	1static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,		enum lock_usage_bit new_bit){	int ret = 1;	switch(new_bit) {	case LOCK_USED_IN_HARDIRQ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_ENABLED_HARDIRQS_READ))			return 0;		/*		 * just marked it hardirq-safe, check that this lock		 * took no hardirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_HARDIRQS, "hard"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-safe, check that this lock		 * took no hardirq-unsafe-read lock in the past:		 */		if (!check_usage_forwards(curr, this,				LOCK_ENABLED_HARDIRQS_READ, "hard-read"))			return 0;#endif		if (hardirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_USED_IN_SOFTIRQ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_ENABLED_SOFTIRQS_READ))			return 0;		/*		 * just marked it softirq-safe, check that this lock		 * took no softirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_SOFTIRQS, "soft"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it softirq-safe, check that this lock		 * took no softirq-unsafe-read lock in the past:		 */		if (!check_usage_forwards(curr, this,				LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))			return 0;#endif		if (softirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_USED_IN_HARDIRQ_READ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))			return 0;		/*		 * just marked it hardirq-read-safe, check that this lock		 * took no hardirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_HARDIRQS, "hard"))			return 0;		if (hardirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_USED_IN_SOFTIRQ_READ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))			return 0;		/*		 * just marked it softirq-read-safe, check that this lock		 * took no softirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_SOFTIRQS, "soft"))			return 0;		if (softirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_ENABLED_HARDIRQS:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_USED_IN_HARDIRQ_READ))			return 0;		/*		 * just marked it hardirq-unsafe, check that no hardirq-safe		 * lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_HARDIRQ, "hard"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-unsafe, check that no		 * hardirq-safe-read lock in the system ever took		 * it in the past:		 */		if (!check_usage_backwards(curr, this,				   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))			return 0;#endif		if (hardirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_ENABLED_SOFTIRQS:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_USED_IN_SOFTIRQ_READ))			return 0;		/*		 * just marked it softirq-unsafe, check that no softirq-safe		 * lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_SOFTIRQ, "soft"))			return 0;#if STRICT_READ_CHECKS		/*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -