⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	 * Debugging checks.	 *	 * Depth must not be zero for a non-head lock:	 */	if (!depth)		goto out_bug;	/*	 * At least two relevant locks must exist for this	 * to be a head:	 */	if (curr->held_locks[depth].irq_context !=			curr->held_locks[depth-1].irq_context)		goto out_bug;	for (;;) {		int distance = curr->lockdep_depth - depth + 1;		hlock = curr->held_locks + depth-1;		/*		 * Only non-recursive-read entries get new dependencies		 * added:		 */		if (hlock->read != 2) {			if (!check_prev_add(curr, hlock, next, distance))				return 0;			/*			 * Stop after the first non-trylock entry,			 * as non-trylock entries have added their			 * own direct dependencies already, so this			 * lock is connected to them indirectly:			 */			if (!hlock->trylock)				break;		}		depth--;		/*		 * End of lock-stack?		 */		if (!depth)			break;		/*		 * Stop the search if we cross into another context:		 */		if (curr->held_locks[depth].irq_context !=				curr->held_locks[depth-1].irq_context)			break;	}	return 1;out_bug:	if (!debug_locks_off_graph_unlock())		return 0;	WARN_ON(1);	return 0;}/* * Is this the address of a static object: */static int static_obj(void *obj){	unsigned long start = (unsigned long) &_stext,		      end   = (unsigned long) &_end,		      addr  = (unsigned long) obj;#ifdef CONFIG_SMP	int i;#endif	/*	 * static variable?	 */	if ((addr >= start) && (addr < end))		return 1;#ifdef CONFIG_SMP	/*	 * percpu var?	 */	for_each_possible_cpu(i) {		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM					+ per_cpu_offset(i);		if ((addr >= start) && (addr < end))			return 1;	}#endif	/*	 * module var?	 */	return is_module_address(addr);}/* * To make lock name printouts unique, we calculate a unique * class->name_version generation counter: */static int count_matching_names(struct lock_class *new_class){	struct lock_class *class;	int count = 0;	if (!new_class->name)		return 0;	list_for_each_entry(class, &all_lock_classes, lock_entry) {		if (new_class->key - new_class->subclass == class->key)			return class->name_version;		if (class->name && !strcmp(class->name, new_class->name))			count = max(count, class->name_version);	}	return count + 1;}/* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */static inline struct lock_class *look_up_lock_class(struct lockdep_map *lock, unsigned int subclass){	struct lockdep_subclass_key *key;	struct list_head *hash_head;	struct lock_class *class;#ifdef CONFIG_DEBUG_LOCKDEP	/*	 * If the architecture calls into lockdep before initializing	 * the hashes then we'll warn about it later. (we cannot printk	 * right now)	 */	if (unlikely(!lockdep_initialized)) {		lockdep_init();		lockdep_init_error = 1;	}#endif	/*	 * Static locks do not have their class-keys yet - for them the key	 * is the lock object itself:	 */	if (unlikely(!lock->key))		lock->key = (void *)lock;	/*	 * NOTE: the class-key must be unique. For dynamic locks, a static	 * lock_class_key variable is passed in through the mutex_init()	 * (or spin_lock_init()) call - which acts as the key. For static	 * locks we use the lock object itself as the key.	 */	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));	key = lock->key->subkeys + subclass;	hash_head = classhashentry(key);	/*	 * We can walk the hash lockfree, because the hash only	 * grows, and we are careful when adding entries to the end:	 */	list_for_each_entry(class, hash_head, hash_entry)		if (class->key == key)			return class;	return NULL;}/* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */static inline struct lock_class *register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force){	struct lockdep_subclass_key *key;	struct list_head *hash_head;	struct lock_class *class;	unsigned long flags;	class = look_up_lock_class(lock, subclass);	if (likely(class))		return class;	/*	 * Debug-check: all keys must be persistent! 	 */	if (!static_obj(lock->key)) {		debug_locks_off();		printk("INFO: trying to register non-static key.\n");		printk("the code is fine but needs lockdep annotation.\n");		printk("turning off the locking correctness validator.\n");		dump_stack();		return NULL;	}	key = lock->key->subkeys + subclass;	hash_head = classhashentry(key);	raw_local_irq_save(flags);	if (!graph_lock()) {		raw_local_irq_restore(flags);		return NULL;	}	/*	 * We have to do the hash-walk again, to avoid races	 * with another CPU:	 */	list_for_each_entry(class, hash_head, hash_entry)		if (class->key == key)			goto out_unlock_set;	/*	 * Allocate a new key from the static array, and add it to	 * the hash:	 */	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {		if (!debug_locks_off_graph_unlock()) {			raw_local_irq_restore(flags);			return NULL;		}		raw_local_irq_restore(flags);		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");		printk("turning off the locking correctness validator.\n");		return NULL;	}	class = lock_classes + nr_lock_classes++;	debug_atomic_inc(&nr_unused_locks);	class->key = key;	class->name = lock->name;	class->subclass = subclass;	INIT_LIST_HEAD(&class->lock_entry);	INIT_LIST_HEAD(&class->locks_before);	INIT_LIST_HEAD(&class->locks_after);	class->name_version = count_matching_names(class);	/*	 * We use RCU's safe list-add method to make	 * parallel walking of the hash-list safe:	 */	list_add_tail_rcu(&class->hash_entry, hash_head);	if (verbose(class)) {		graph_unlock();		raw_local_irq_restore(flags);		printk("\nnew class %p: %s", class->key, class->name);		if (class->name_version > 1)			printk("#%d", class->name_version);		printk("\n");		dump_stack();		raw_local_irq_save(flags);		if (!graph_lock()) {			raw_local_irq_restore(flags);			return NULL;		}	}out_unlock_set:	graph_unlock();	raw_local_irq_restore(flags);	if (!subclass || force)		lock->class_cache = class;	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))		return NULL;	return class;}/* * Look up a dependency chain. If the key is not present yet then * add it and return 1 - in this case the new dependency chain is * validated. If the key is already hashed, return 0. * (On return with 1 graph_lock is held.) */static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class){	struct list_head *hash_head = chainhashentry(chain_key);	struct lock_chain *chain;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return 0;	/*	 * We can walk it lock-free, because entries only get added	 * to the hash:	 */	list_for_each_entry(chain, hash_head, entry) {		if (chain->chain_key == chain_key) {cache_hit:			debug_atomic_inc(&chain_lookup_hits);			if (very_verbose(class))				printk("\nhash chain already cached, key: "					"%016Lx tail class: [%p] %s\n",					(unsigned long long)chain_key,					class->key, class->name);			return 0;		}	}	if (very_verbose(class))		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",			(unsigned long long)chain_key, class->key, class->name);	/*	 * Allocate a new chain entry from the static array, and add	 * it to the hash:	 */	if (!graph_lock())		return 0;	/*	 * We have to walk the chain again locked - to avoid duplicates:	 */	list_for_each_entry(chain, hash_head, entry) {		if (chain->chain_key == chain_key) {			graph_unlock();			goto cache_hit;		}	}	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {		if (!debug_locks_off_graph_unlock())			return 0;		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");		printk("turning off the locking correctness validator.\n");		return 0;	}	chain = lock_chains + nr_lock_chains++;	chain->chain_key = chain_key;	list_add_tail_rcu(&chain->entry, hash_head);	debug_atomic_inc(&chain_lookup_misses);#ifdef CONFIG_TRACE_IRQFLAGS	if (current->hardirq_context)		nr_hardirq_chains++;	else {		if (current->softirq_context)			nr_softirq_chains++;		else			nr_process_chains++;	}#else	nr_process_chains++;#endif	return 1;}/* * We are building curr_chain_key incrementally, so double-check * it from scratch, to make sure that it's done correctly: */static void check_chain_key(struct task_struct *curr){#ifdef CONFIG_DEBUG_LOCKDEP	struct held_lock *hlock, *prev_hlock = NULL;	unsigned int i, id;	u64 chain_key = 0;	for (i = 0; i < curr->lockdep_depth; i++) {		hlock = curr->held_locks + i;		if (chain_key != hlock->prev_chain_key) {			debug_locks_off();			printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",				curr->lockdep_depth, i,				(unsigned long long)chain_key,				(unsigned long long)hlock->prev_chain_key);			WARN_ON(1);			return;		}		id = hlock->class - lock_classes;		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))			return;		if (prev_hlock && (prev_hlock->irq_context !=							hlock->irq_context))			chain_key = 0;		chain_key = iterate_chain_key(chain_key, id);		prev_hlock = hlock;	}	if (chain_key != curr->curr_chain_key) {		debug_locks_off();		printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",			curr->lockdep_depth, i,			(unsigned long long)chain_key,			(unsigned long long)curr->curr_chain_key);		WARN_ON(1);	}#endif}#ifdef CONFIG_TRACE_IRQFLAGS/* * print irq inversion bug: */static intprint_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,			struct held_lock *this, int forwards,			const char *irqclass){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n=========================================================\n");	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");	print_kernel_version();	printk(  "---------------------------------------------------------\n");	printk("%s/%d just changed the state of lock:\n",		curr->comm, curr->pid);	print_lock(this);	if (forwards)		printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);	else		printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);	print_lock_name(other);	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);	printk("\nthe first lock's dependencies:\n");	print_lock_dependencies(this->class, 0);	printk("\nthe second lock's dependencies:\n");	print_lock_dependencies(other, 0);	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Prove that in the forwards-direction subgraph starting at <this> * there is no lock matching <mask>: */static intcheck_usage_forwards(struct task_struct *curr, struct held_lock *this,		     enum lock_usage_bit bit, const char *irqclass){	int ret;	find_usage_bit = bit;	/* fills in <forwards_match> */	ret = find_usage_forwards(this->class, 0);	if (!ret || ret == 1)		return ret;	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);}/* * Prove that in the backwards-direction subgraph starting at <this> * there is no lock matching <mask>: */static intcheck_usage_backwards(struct task_struct *curr, struct held_lock *this,		      enum lock_usage_bit bit, const char *irqclass){	int ret;	find_usage_bit = bit;	/* fills in <backwards_match> */	ret = find_usage_backwards(this->class, 0);	if (!ret || ret == 1)		return ret;	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);}void print_irqtrace_events(struct task_struct *curr){	printk("irq event stamp: %u\n", curr->irq_events);	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);	print_ip_sym(curr->hardirq_enable_ip);	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);	print_ip_sym(curr->hardirq_disable_ip);	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);	print_ip_sym(curr->softirq_enable_ip);	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);	print_ip_sym(curr->softirq_disable_ip);}#endifstatic intprint_usage_bug(struct task_struct *curr, struct held_lock *this,		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit){	if (!debug_locks_off_graph_unlock() || debug_locks_silent)		return 0;	printk("\n=================================\n");	printk(  "[ INFO: inconsistent lock state ]\n");	print_kernel_version();	printk(  "---------------------------------\n");	printk("inconsistent {%s} -> {%s} usage.\n",		usage_str[prev_bit], usage_str[new_bit]);	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",		curr->comm, curr->pid,		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,		trace_hardirqs_enabled(curr),		trace_softirqs_enabled(curr));	print_lock(this);	printk("{%s} state was registered at:\n", usage_str[prev_bit]);	print_stack_trace(this->class->usage_traces + prev_bit, 1);	print_irqtrace_events(curr);	printk("\nother info that might help us debug this:\n");	lockdep_print_held_locks(curr);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -