⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
		 * just marked it softirq-unsafe, check that no		 * softirq-safe-read lock in the system ever took		 * it in the past:		 */		if (!check_usage_backwards(curr, this,				   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))			return 0;#endif		if (softirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_ENABLED_HARDIRQS_READ:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it hardirq-read-unsafe, check that no		 * hardirq-safe lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_HARDIRQ, "hard"))			return 0;#endif		if (hardirq_verbose(hlock_class(this)))			ret = 2;		break;	case LOCK_ENABLED_SOFTIRQS_READ:		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))			return 0;#if STRICT_READ_CHECKS		/*		 * just marked it softirq-read-unsafe, check that no		 * softirq-safe lock in the system ever took it in the past:		 */		if (!check_usage_backwards(curr, this,					   LOCK_USED_IN_SOFTIRQ, "soft"))			return 0;#endif		if (softirq_verbose(hlock_class(this)))			ret = 2;		break;	default:		WARN_ON(1);		break;	}	return ret;}/* * Mark all held locks with a usage bit: */static intmark_held_locks(struct task_struct *curr, int hardirq){	enum lock_usage_bit usage_bit;	struct held_lock *hlock;	int i;	for (i = 0; i < curr->lockdep_depth; i++) {		hlock = curr->held_locks + i;		if (hardirq) {			if (hlock->read)				usage_bit = LOCK_ENABLED_HARDIRQS_READ;			else				usage_bit = LOCK_ENABLED_HARDIRQS;		} else {			if (hlock->read)				usage_bit = LOCK_ENABLED_SOFTIRQS_READ;			else				usage_bit = LOCK_ENABLED_SOFTIRQS;		}		if (!mark_lock(curr, hlock, usage_bit))			return 0;	}	return 1;}/* * Debugging helper: via this flag we know that we are in * 'early bootup code', and will warn about any invalid irqs-on event: */static int early_boot_irqs_enabled;void early_boot_irqs_off(void){	early_boot_irqs_enabled = 0;}void early_boot_irqs_on(void){	early_boot_irqs_enabled = 1;}/* * Hardirqs will be enabled: */void trace_hardirqs_on_caller(unsigned long a0){	struct task_struct *curr = current;	unsigned long ip;	time_hardirqs_on(CALLER_ADDR0, a0);	if (unlikely(!debug_locks || current->lockdep_recursion))		return;	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))		return;	if (unlikely(curr->hardirqs_enabled)) {		debug_atomic_inc(&redundant_hardirqs_on);		return;	}	/* we'll do an OFF -> ON transition: */	curr->hardirqs_enabled = 1;	ip = (unsigned long) __builtin_return_address(0);	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))		return;	/*	 * We are going to turn hardirqs on, so set the	 * usage bit for all held locks:	 */	if (!mark_held_locks(curr, 1))		return;	/*	 * If we have softirqs enabled, then set the usage	 * bit for all held locks. (disabled hardirqs prevented	 * this bit from being set before)	 */	if (curr->softirqs_enabled)		if (!mark_held_locks(curr, 0))			return;	curr->hardirq_enable_ip = ip;	curr->hardirq_enable_event = ++curr->irq_events;	debug_atomic_inc(&hardirqs_on_events);}EXPORT_SYMBOL(trace_hardirqs_on_caller);void trace_hardirqs_on(void){	trace_hardirqs_on_caller(CALLER_ADDR0);}EXPORT_SYMBOL(trace_hardirqs_on);/* * Hardirqs were disabled: */void trace_hardirqs_off_caller(unsigned long a0){	struct task_struct *curr = current;	time_hardirqs_off(CALLER_ADDR0, a0);	if (unlikely(!debug_locks || current->lockdep_recursion))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->hardirqs_enabled) {		/*		 * We have done an ON -> OFF transition:		 */		curr->hardirqs_enabled = 0;		curr->hardirq_disable_ip = _RET_IP_;		curr->hardirq_disable_event = ++curr->irq_events;		debug_atomic_inc(&hardirqs_off_events);	} else		debug_atomic_inc(&redundant_hardirqs_off);}EXPORT_SYMBOL(trace_hardirqs_off_caller);void trace_hardirqs_off(void){	trace_hardirqs_off_caller(CALLER_ADDR0);}EXPORT_SYMBOL(trace_hardirqs_off);/* * Softirqs will be enabled: */void trace_softirqs_on(unsigned long ip){	struct task_struct *curr = current;	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->softirqs_enabled) {		debug_atomic_inc(&redundant_softirqs_on);		return;	}	/*	 * We'll do an OFF -> ON transition:	 */	curr->softirqs_enabled = 1;	curr->softirq_enable_ip = ip;	curr->softirq_enable_event = ++curr->irq_events;	debug_atomic_inc(&softirqs_on_events);	/*	 * We are going to turn softirqs on, so set the	 * usage bit for all held locks, if hardirqs are	 * enabled too:	 */	if (curr->hardirqs_enabled)		mark_held_locks(curr, 0);}/* * Softirqs were disabled: */void trace_softirqs_off(unsigned long ip){	struct task_struct *curr = current;	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return;	if (curr->softirqs_enabled) {		/*		 * We have done an ON -> OFF transition:		 */		curr->softirqs_enabled = 0;		curr->softirq_disable_ip = ip;		curr->softirq_disable_event = ++curr->irq_events;		debug_atomic_inc(&softirqs_off_events);		DEBUG_LOCKS_WARN_ON(!softirq_count());	} else		debug_atomic_inc(&redundant_softirqs_off);}static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock){	/*	 * If non-trylock use in a hardirq or softirq context, then	 * mark the lock as used in these contexts:	 */	if (!hlock->trylock) {		if (hlock->read) {			if (curr->hardirq_context)				if (!mark_lock(curr, hlock,						LOCK_USED_IN_HARDIRQ_READ))					return 0;			if (curr->softirq_context)				if (!mark_lock(curr, hlock,						LOCK_USED_IN_SOFTIRQ_READ))					return 0;		} else {			if (curr->hardirq_context)				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))					return 0;			if (curr->softirq_context)				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))					return 0;		}	}	if (!hlock->hardirqs_off) {		if (hlock->read) {			if (!mark_lock(curr, hlock,					LOCK_ENABLED_HARDIRQS_READ))				return 0;			if (curr->softirqs_enabled)				if (!mark_lock(curr, hlock,						LOCK_ENABLED_SOFTIRQS_READ))					return 0;		} else {			if (!mark_lock(curr, hlock,					LOCK_ENABLED_HARDIRQS))				return 0;			if (curr->softirqs_enabled)				if (!mark_lock(curr, hlock,						LOCK_ENABLED_SOFTIRQS))					return 0;		}	}	return 1;}static int separate_irq_context(struct task_struct *curr,		struct held_lock *hlock){	unsigned int depth = curr->lockdep_depth;	/*	 * Keep track of points where we cross into an interrupt context:	 */	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +				curr->softirq_context;	if (depth) {		struct held_lock *prev_hlock;		prev_hlock = curr->held_locks + depth-1;		/*		 * If we cross into another context, reset the		 * hash key (this also prevents the checking and the		 * adding of the dependency to 'prev'):		 */		if (prev_hlock->irq_context != hlock->irq_context)			return 1;	}	return 0;}#elsestatic inlineint mark_lock_irq(struct task_struct *curr, struct held_lock *this,		enum lock_usage_bit new_bit){	WARN_ON(1);	return 1;}static inline int mark_irqflags(struct task_struct *curr,		struct held_lock *hlock){	return 1;}static inline int separate_irq_context(struct task_struct *curr,		struct held_lock *hlock){	return 0;}#endif/* * Mark a lock with a usage bit, and validate the state transition: */static int mark_lock(struct task_struct *curr, struct held_lock *this,			     enum lock_usage_bit new_bit){	unsigned int new_mask = 1 << new_bit, ret = 1;	/*	 * If already set then do not dirty the cacheline,	 * nor do any checks:	 */	if (likely(hlock_class(this)->usage_mask & new_mask))		return 1;	if (!graph_lock())		return 0;	/*	 * Make sure we didnt race:	 */	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {		graph_unlock();		return 1;	}	hlock_class(this)->usage_mask |= new_mask;	if (!save_trace(hlock_class(this)->usage_traces + new_bit))		return 0;	switch (new_bit) {	case LOCK_USED_IN_HARDIRQ:	case LOCK_USED_IN_SOFTIRQ:	case LOCK_USED_IN_HARDIRQ_READ:	case LOCK_USED_IN_SOFTIRQ_READ:	case LOCK_ENABLED_HARDIRQS:	case LOCK_ENABLED_SOFTIRQS:	case LOCK_ENABLED_HARDIRQS_READ:	case LOCK_ENABLED_SOFTIRQS_READ:		ret = mark_lock_irq(curr, this, new_bit);		if (!ret)			return 0;		break;	case LOCK_USED:		debug_atomic_dec(&nr_unused_locks);		break;	default:		if (!debug_locks_off_graph_unlock())			return 0;		WARN_ON(1);		return 0;	}	graph_unlock();	/*	 * We must printk outside of the graph_lock:	 */	if (ret == 2) {		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);		print_lock(this);		print_irqtrace_events(curr);		dump_stack();	}	return ret;}/* * Initialize a lock instance's lock-class mapping info: */void lockdep_init_map(struct lockdep_map *lock, const char *name,		      struct lock_class_key *key, int subclass){	if (unlikely(!debug_locks))		return;	if (DEBUG_LOCKS_WARN_ON(!key))		return;	if (DEBUG_LOCKS_WARN_ON(!name))		return;	/*	 * Sanity check, the lock-class key must be persistent:	 */	if (!static_obj(key)) {		printk("BUG: key %p not in .data!\n", key);		DEBUG_LOCKS_WARN_ON(1);		return;	}	lock->name = name;	lock->key = key;	lock->class_cache = NULL;#ifdef CONFIG_LOCK_STAT	lock->cpu = raw_smp_processor_id();#endif	if (subclass)		register_lock_class(lock, subclass, 1);}EXPORT_SYMBOL_GPL(lockdep_init_map);/* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: */static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,			  int trylock, int read, int check, int hardirqs_off,			  struct lockdep_map *nest_lock, unsigned long ip){	struct task_struct *curr = current;	struct lock_class *class = NULL;	struct held_lock *hlock;	unsigned int depth, id;	int chain_head = 0;	u64 chain_key;	if (!prove_locking)		check = 1;	if (unlikely(!debug_locks))		return 0;	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))		return 0;	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {		debug_locks_off();		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");		printk("turning off the locking correctness validator.\n");		return 0;	}	if (!subclass)		class = lock->class_cache;	/*	 * Not cached yet or subclass?	 */	if (unlikely(!class)) {		class = register_lock_class(lock, subclass, 0);		if (!class)			return 0;	}	debug_atomic_inc((atomic_t *)&class->ops);	if (very_verbose(class)) {		printk("\nacquire class [%p] %s", class->key, class->name);		if (class->name_version > 1)			printk("#%d", class->name_version);		printk("\n");		dump_stack();	}	/*	 * Add the lock to the list of currently held locks.	 * (we dont increase the depth just yet, up until the	 * dependency checks are done)	 */	depth = curr->lockdep_depth;	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))		return 0;	hlock = curr->held_locks + depth;	if (DEBUG_LOCKS_WARN_ON(!class))		return 0;	hlock->class_idx = class - lock_classes + 1;	hlock->acquire_ip = ip;	hlock->instance = lock;	hlock->nest_lock = nest_lock;	hlock->trylock = trylock;	hlock->read = read;	hlock->check = check;	hlock->hardirqs_off = !!hardirqs_off;#ifdef CONFIG_LOCK_STAT	hlock->waittime_stamp = 0;	hlock->holdtime_stamp = sched_clock();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -