⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	printk("\nstack backtrace:\n");	dump_stack();	return 0;}/* * Print out an error if an invalid bit is set: */static inline intvalid_state(struct task_struct *curr, struct held_lock *this,	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit){	if (unlikely(this->class->usage_mask & (1 << bad_bit)))		return print_usage_bug(curr, this, bad_bit, new_bit);	return 1;}#define STRICT_READ_CHECKS	1/* * Mark a lock with a usage bit, and validate the state transition: */static int mark_lock(struct task_struct *curr, struct held_lock *this,		     enum lock_usage_bit new_bit){	unsigned int new_mask = 1 << new_bit, ret = 1;	/*	 * If already set then do not dirty the cacheline,	 * nor do any checks:	 */	if (likely(this->class->usage_mask & new_mask))		return 1;	if (!graph_lock())		return 0;	/*	 * Make sure we didnt race:	 */	if (unlikely(this->class->usage_mask & new_mask)) {		graph_unlock();		return 1;	}	this->class->usage_mask |= new_mask;	if (!save_trace(this->class->usage_traces + new_bit))		return 0;	switch (new_bit) {#ifdef CONFIG_TRACE_IRQFLAGS	case LOCK_USED_IN_HARDIRQ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_ENABLED_HARDIRQS_READ))			return 0;		/*		 * just marked it hardirq-safe, check that this lock		 * took no hardirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_HARDIRQS, "hard"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-safe, check that this lock		 * took no hardirq-unsafe-read lock in the past:		 */		if (!check_usage_forwards(curr, this,				LOCK_ENABLED_HARDIRQS_READ, "hard-read"))			return 0;#endif		if (hardirq_verbose(this->class))			ret = 2;		break;	case LOCK_USED_IN_SOFTIRQ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_ENABLED_SOFTIRQS_READ))			return 0;		/*		 * just marked it softirq-safe, check that this lock		 * took no softirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_SOFTIRQS, "soft"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it softirq-safe, check that this lock		 * took no softirq-unsafe-read lock in the past:		 */		if (!check_usage_forwards(curr, this,				LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))			return 0;#endif		if (softirq_verbose(this->class))			ret = 2;		break;	case LOCK_USED_IN_HARDIRQ_READ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))			return 0;		/*		 * just marked it hardirq-read-safe, check that this lock		 * took no hardirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_HARDIRQS, "hard"))			return 0;		if (hardirq_verbose(this->class))			ret = 2;		break;	case LOCK_USED_IN_SOFTIRQ_READ:		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))			return 0;		/*		 * just marked it softirq-read-safe, check that this lock		 * took no softirq-unsafe lock in the past:		 */		if (!check_usage_forwards(curr, this,					  LOCK_ENABLED_SOFTIRQS, "soft"))			return 0;		if (softirq_verbose(this->class))			ret = 2;		break;	case LOCK_ENABLED_HARDIRQS:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_USED_IN_HARDIRQ_READ))			return 0;		/*		 * just marked it hardirq-unsafe, check that no hardirq-safe		 * lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_HARDIRQ, "hard"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-unsafe, check that no		 * hardirq-safe-read lock in the system ever took		 * it in the past:		 */		if (!check_usage_backwards(curr, this,				   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))			return 0;#endif		if (hardirq_verbose(this->class))			ret = 2;		break;	case LOCK_ENABLED_SOFTIRQS:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))			return 0;		if (!valid_state(curr, this, new_bit,				 LOCK_USED_IN_SOFTIRQ_READ))			return 0;		/*		 * just marked it softirq-unsafe, check that no softirq-safe		 * lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_SOFTIRQ, "soft"))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it softirq-unsafe, check that no		 * softirq-safe-read lock in the system ever took		 * it in the past:		 */		if (!check_usage_backwards(curr, this,				   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))			return 0;#endif		if (softirq_verbose(this->class))			ret = 2;		break;	case LOCK_ENABLED_HARDIRQS_READ:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-read-unsafe, check that no		 * hardirq-safe lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_HARDIRQ, "hard"))			return 0;#endif		if (hardirq_verbose(this->class))			ret = 2;		break;	case LOCK_ENABLED_SOFTIRQS_READ:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it softirq-read-unsafe, check that no		 * softirq-safe lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_SOFTIRQ, "soft"))			return 0;#endif		if (softirq_verbose(this->class))			ret = 2;		break;#endif	case LOCK_USED:		/*		 * Add it to the global list of classes:		 */		list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);		debug_atomic_dec(&nr_unused_locks);		break;	default:		if (!debug_locks_off_graph_unlock())			return 0;		WARN_ON(1);		return 0;	}	graph_unlock();	/*	 * We must printk outside of the graph_lock:	 */	if (ret == 2) {		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);		print_lock(this);		print_irqtrace_events(curr);		dump_stack();	}	return ret;}#ifdef CONFIG_TRACE_IRQFLAGS/* * Mark all held locks with a usage bit: */static intmark_held_locks(struct task_struct *curr, int hardirq){	enum lock_usage_bit usage_bit;	struct held_lock *hlock;	int i;	for (i = 0; i < curr->lockdep_depth; i++) {		hlock = curr->held_locks + i;		if (hardirq) {			if (hlock->read)				usage_bit = LOCK_ENABLED_HARDIRQS_READ;			else				usage_bit = LOCK_ENABLED_HARDIRQS;		} else {			if (hlock->read)				usage_bit = LOCK_ENABLED_SOFTIRQS_READ;			else				usage_bit = LOCK_ENABLED_SOFTIRQS;		}		if (!mark_lock(curr, hlock, usage_bit))			return 0;	}	return 1;}/* * Debugging helper: via this flag we know that we are in * 'early bootup code', and will warn about any invalid irqs-on event: */static int early_boot_irqs_enabled;void early_boot_irqs_off(void){	early_boot_irqs_enabled = 0;}void early_boot_irqs_on(void){	early_boot_irqs_enabled = 1;}/* * Hardirqs will be enabled: */void trace_hardirqs_on(void){	struct task_struct *curr = current;	unsigned long ip;	if (unlikely(!debug_locks || current->lockdep_recursion))		return;	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))		return;	if (unlikely(curr->hardirqs_enabled)) {		debug_atomic_inc(&redundant_hardirqs_on);		return;	}	/* we'll do an OFF -> ON transition: */	curr->hardirqs_enabled = 1;	ip = (unsigned long) __builtin_return_address(0);	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))		return;	/*	 * We are going to turn hardirqs on, so set the	 * usage bit for all held locks:	 */	if (!mark_held_locks(curr, 1))		return;	/*	 * If we have softirqs enabled, then set the usage	 * bit for all held locks. (disabled hardirqs prevented	 * this bit from being set before)	 */	if (curr->softirqs_enabled)		if (!mark_held_locks(curr, 0))			return;	curr->hardirq_enable_ip = ip;	curr->hardirq_enable_event = ++curr->irq_events;	debug_atomic_inc(&hardirqs_on_events);}EXPORT_SYMBOL(trace_hardirqs_on);/* * Hardirqs were disabled: */void trace_hardirqs_off(void){	struct task_struct *curr = current;	if (unlikely(!debug_locks || current->lockdep_recursion))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->hardirqs_enabled) {		/*		 * We have done an ON -> OFF transition:		 */		curr->hardirqs_enabled = 0;		curr->hardirq_disable_ip = _RET_IP_;		curr->hardirq_disable_event = ++curr->irq_events;		debug_atomic_inc(&hardirqs_off_events);	} else		debug_atomic_inc(&redundant_hardirqs_off);}EXPORT_SYMBOL(trace_hardirqs_off);/* * Softirqs will be enabled: */void trace_softirqs_on(unsigned long ip){	struct task_struct *curr = current;	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->softirqs_enabled) {		debug_atomic_inc(&redundant_softirqs_on);		return;	}	/*	 * We'll do an OFF -> ON transition:	 */	curr->softirqs_enabled = 1;	curr->softirq_enable_ip = ip;	curr->softirq_enable_event = ++curr->irq_events;	debug_atomic_inc(&softirqs_on_events);	/*	 * We are going to turn softirqs on, so set the	 * usage bit for all held locks, if hardirqs are	 * enabled too:	 */	if (curr->hardirqs_enabled)		mark_held_locks(curr, 0);}/* * Softirqs were disabled: */void trace_softirqs_off(unsigned long ip){	struct task_struct *curr = current;	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->softirqs_enabled) {		/*		 * We have done an ON -> OFF transition:		 */		curr->softirqs_enabled = 0;		curr->softirq_disable_ip = ip;		curr->softirq_disable_event = ++curr->irq_events;		debug_atomic_inc(&softirqs_off_events);		DEBUG_LOCKS_WARN_ON(!softirq_count());	} else		debug_atomic_inc(&redundant_softirqs_off);}#endif/* * Initialize a lock instance's lock-class mapping info: */void lockdep_init_map(struct lockdep_map *lock, const char *name,		      struct lock_class_key *key, int subclass){	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!key))		return;	if (DEBUG_LOCKS_WARN_ON(!name))		return;	/*	 * Sanity check, the lock-class key must be persistent:	 */	if (!static_obj(key)) {		printk("BUG: key %p not in .data!\n", key);		DEBUG_LOCKS_WARN_ON(1);		return;	}	lock->name = name;	lock->key = key;	lock->class_cache = NULL;	if (subclass)		register_lock_class(lock, subclass, 1);}EXPORT_SYMBOL_GPL(lockdep_init_map);/* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: */static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,			  int trylock, int read, int check, int hardirqs_off,			  unsigned long ip){	struct task_struct *curr = current;	struct lock_class *class = NULL;	struct held_lock *hlock;	unsigned int depth, id;	int chain_head = 0;	u64 chain_key;	if (unlikely(!debug_locks))		return 0;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return 0;	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {		debug_locks_off();		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");		printk("turning off the locking correctness validator.\n");		return 0;	}	if (!subclass)		class = lock->class_cache;	/*	 * Not cached yet or subclass?	 */	if (unlikely(!class)) {		class = register_lock_class(lock, subclass, 0);		if (!class)			return 0;	}	debug_atomic_inc((atomic_t *)&class->ops);	if (very_verbose(class)) {		printk("\nacquire class [%p] %s", class->key, class->name);		if (class->name_version > 1)			printk("#%d", class->name_version);		printk("\n");		dump_stack();	}	/*	 * Add the lock to the list of currently held locks.	 * (we dont increase the depth just yet, up until the	 * dependency checks are done)	 */	depth = curr->lockdep_depth;	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))		return 0;	hlock = curr->held_locks + depth;	hlock->class = class;	hlock->acquire_ip = ip;	hlock->instance = lock;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -