⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * kernel/lockdep.c * * Runtime locking correctness validator * * Started by Ingo Molnar: * *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: * * - lock inversion scenarios * - circular lock dependencies * - hardirq/softirq safe/unsafe locking bugs * * Bugs are reported even if the current locking scenario does not cause * any deadlock at this point. * * I.e. if anytime in the past two locks were taken in a different order, * even if it happened for another task, even if those were different * locks (but of the same class as this lock), this code will detect it. * * Thanks to Arjan van de Ven for coming up with the initial idea of * mapping lock dependencies runtime. */#include <linux/mutex.h>#include <linux/sched.h>#include <linux/delay.h>#include <linux/module.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <linux/spinlock.h>#include <linux/kallsyms.h>#include <linux/interrupt.h>#include <linux/stacktrace.h>#include <linux/debug_locks.h>#include <linux/irqflags.h>#include <linux/utsname.h>#include <asm/sections.h>#include "lockdep_internals.h"/* * lockdep_lock: protects the lockdep graph, the hashes and the *               class/list/hash allocators. * * This is one of the rare exceptions where it's justified * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;static int graph_lock(void){	__raw_spin_lock(&lockdep_lock);	/*	 * Make sure that if another CPU detected a bug while	 * walking the graph we dont change it (while the other	 * CPU is busy printing out stuff with the graph lock	 * dropped already)	 */	if (!debug_locks) {		__raw_spin_unlock(&lockdep_lock);		return 0;	}	return 1;}static inline int graph_unlock(void){	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))		return DEBUG_LOCKS_WARN_ON(1);	__raw_spin_unlock(&lockdep_lock);	return 0;}/* * Turn lock debugging off and return with 0 if it was off already, * and also release the graph lock: */static inline int debug_locks_off_graph_unlock(void){	int ret = debug_locks_off();	__raw_spin_unlock(&lockdep_lock);	return ret;}static int lockdep_initialized;unsigned long nr_list_entries;static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];/* * Allocate a lockdep entry. (assumes the graph_lock held, returns * with NULL on failure) */static struct lock_list *alloc_list_entry(void){	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {		if (!debug_locks_off_graph_unlock())			return NULL;		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");		printk("turning off the locking correctness validator.\n");		return NULL;	}	return list_entries + nr_list_entries++;}/* * All data structures here are protected by the global debug_lock. * * Mutex key structs only get allocated, once during bootup, and never * get freed - this significantly simplifies the debugging code. */unsigned long nr_lock_classes;static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];/* * We keep a global list of all lock classes. The list only grows, * never shrinks. The list is only accessed with the lockdep * spinlock lock held. */LIST_HEAD(all_lock_classes);/* * The lockdep classes are in a hash-table as well, for fast lookup: */#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)#define CLASSHASH_MASK		(CLASSHASH_SIZE - 1)#define __classhashfn(key)	((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)#define classhashentry(key)	(classhash_table + __classhashfn((key)))static struct list_head classhash_table[CLASSHASH_SIZE];unsigned long nr_lock_chains;static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];/* * We put the lock dependency chains into a hash-table as well, to cache * their existence: */#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)#define CHAINHASH_MASK		(CHAINHASH_SIZE - 1)#define __chainhashfn(chain) \		(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))static struct list_head chainhash_table[CHAINHASH_SIZE];/* * The hash key of the lock dependency chains is a hash itself too: * it's a hash of all locks taken up to that lock, including that lock. * It's a 64-bit hash, because it's important for the keys to be * unique. */#define iterate_chain_key(key1, key2) \	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \	(key2))void lockdep_off(void){	current->lockdep_recursion++;}EXPORT_SYMBOL(lockdep_off);void lockdep_on(void){	current->lockdep_recursion--;}EXPORT_SYMBOL(lockdep_on);/* * Debugging switches: */#define VERBOSE			0#define VERY_VERBOSE		0#if VERBOSE# define HARDIRQ_VERBOSE	1# define SOFTIRQ_VERBOSE	1#else# define HARDIRQ_VERBOSE	0# define SOFTIRQ_VERBOSE	0#endif#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE/* * Quick filtering for interesting events: */static int class_filter(struct lock_class *class){#if 0	/* Example */	if (class->name_version == 1 &&			!strcmp(class->name, "lockname"))		return 1;	if (class->name_version == 1 &&			!strcmp(class->name, "&struct->lockfield"))		return 1;#endif	/* Filter everything else. 1 would be to allow everything else */	return 0;}#endifstatic int verbose(struct lock_class *class){#if VERBOSE	return class_filter(class);#endif	return 0;}#ifdef CONFIG_TRACE_IRQFLAGSstatic int hardirq_verbose(struct lock_class *class){#if HARDIRQ_VERBOSE	return class_filter(class);#endif	return 0;}static int softirq_verbose(struct lock_class *class){#if SOFTIRQ_VERBOSE	return class_filter(class);#endif	return 0;}#endif/* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. */unsigned long nr_stack_trace_entries;static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];static int save_trace(struct stack_trace *trace){	trace->nr_entries = 0;	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;	trace->entries = stack_trace + nr_stack_trace_entries;	trace->skip = 3;	save_stack_trace(trace);	trace->max_entries = trace->nr_entries;	nr_stack_trace_entries += trace->nr_entries;	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {		if (!debug_locks_off_graph_unlock())			return 0;		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");		printk("turning off the locking correctness validator.\n");		dump_stack();		return 0;	}	return 1;}unsigned int nr_hardirq_chains;unsigned int nr_softirq_chains;unsigned int nr_process_chains;unsigned int max_lockdep_depth;unsigned int max_recursion_depth;#ifdef CONFIG_DEBUG_LOCKDEP/* * We cannot printk in early bootup code. Not even early_printk() * might work. So we mark any initialization errors and printk * about it later on, in lockdep_info(). */static int lockdep_init_error;/* * Various lockdep statistics: */atomic_t chain_lookup_hits;atomic_t chain_lookup_misses;atomic_t hardirqs_on_events;atomic_t hardirqs_off_events;atomic_t redundant_hardirqs_on;atomic_t redundant_hardirqs_off;atomic_t softirqs_on_events;atomic_t softirqs_off_events;atomic_t redundant_softirqs_on;atomic_t redundant_softirqs_off;atomic_t nr_unused_locks;atomic_t nr_cyclic_checks;atomic_t nr_cyclic_check_recursions;atomic_t nr_find_usage_forwards_checks;atomic_t nr_find_usage_forwards_recursions;atomic_t nr_find_usage_backwards_checks;atomic_t nr_find_usage_backwards_recursions;# define debug_atomic_inc(ptr)		atomic_inc(ptr)# define debug_atomic_dec(ptr)		atomic_dec(ptr)# define debug_atomic_read(ptr)		atomic_read(ptr)#else# define debug_atomic_inc(ptr)		do { } while (0)# define debug_atomic_dec(ptr)		do { } while (0)# define debug_atomic_read(ptr)		0#endif/* * Locking printouts: */static const char *usage_str[] ={	[LOCK_USED] =			"initial-use ",	[LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W",	[LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W",	[LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W",	[LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W",	[LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R",	[LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R",	[LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R",	[LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R",};const char * __get_key_name(struct lockdep_subclass_key *key, char *str){	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);}voidget_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4){	*c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)		*c1 = '+';	else		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)			*c1 = '-';	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)		*c2 = '+';	else		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)			*c2 = '-';	if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)		*c3 = '-';	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {		*c3 = '+';		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)			*c3 = '?';	}	if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)		*c4 = '-';	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {		*c4 = '+';		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)			*c4 = '?';	}}static void print_lock_name(struct lock_class *class){	char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;	const char *name;	get_usage_chars(class, &c1, &c2, &c3, &c4);	name = class->name;	if (!name) {		name = __get_key_name(class->key, str);		printk(" (%s", name);	} else {		printk(" (%s", name);		if (class->name_version > 1)			printk("#%d", class->name_version);		if (class->subclass)			printk("/%d", class->subclass);	}	printk("){%c%c%c%c}", c1, c2, c3, c4);}static void print_lockdep_cache(struct lockdep_map *lock){	const char *name;	char str[KSYM_NAME_LEN + 1];	name = lock->name;	if (!name)		name = __get_key_name(lock->key->subkeys, str);	printk("%s", name);}static void print_lock(struct held_lock *hlock){	print_lock_name(hlock->class);	printk(", at: ");	print_ip_sym(hlock->acquire_ip);}static void lockdep_print_held_locks(struct task_struct *curr){	int i, depth = curr->lockdep_depth;	if (!depth) {		printk("no locks held by %s/%d.\n", curr->comm, curr->pid);		return;	}	printk("%d lock%s held by %s/%d:\n",		depth, depth > 1 ? "s" : "", curr->comm, curr->pid);	for (i = 0; i < depth; i++) {		printk(" #%d: ", i);		print_lock(curr->held_locks + i);	}}static void print_lock_class_header(struct lock_class *class, int depth){	int bit;	printk("%*s->", depth, "");	print_lock_name(class);	printk(" ops: %lu", class->ops);	printk(" {\n");	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {		if (class->usage_mask & (1 << bit)) {			int len = depth;			len += printk("%*s   %s", depth, "", usage_str[bit]);			len += printk(" at:\n");			print_stack_trace(class->usage_traces + bit, len);		}	}	printk("%*s }\n", depth, "");	printk("%*s ... key      at: ",depth,"");	print_ip_sym((unsigned long)class->key);}/* * printk all lock dependencies starting at <entry>: */static void print_lock_dependencies(struct lock_class *class, int depth){	struct lock_list *entry;	if (DEBUG_LOCKS_WARN_ON(depth >= 20))		return;	print_lock_class_header(class, depth);	list_for_each_entry(entry, &class->locks_after, entry) {		if (DEBUG_LOCKS_WARN_ON(!entry->class))			return;		print_lock_dependencies(entry->class, depth + 1);		printk("%*s ... acquired at:\n",depth,"");		print_stack_trace(&entry->trace, 2);		printk("\n");	}}/* * Add a new dependency to the head of the list: */static int add_lock_to_list(struct lock_class *class, struct lock_class *this,			    struct list_head *head, unsigned long ip, int distance){	struct lock_list *entry;	/*	 * Lock not present yet - get a new dependency struct and	 * add it to the list:	 */	entry = alloc_list_entry();	if (!entry)		return 0;	entry->class = this;	entry->distance = distance;	if (!save_trace(&entry->trace))		return 0;	/*	 * Since we never remove from the dependency list, the list can	 * be walked lockless by other CPUs, it's only allocation	 * that must be protected by the spinlock. But this also means	 * we must make new entries visible only once writes to the	 * entry become visible - hence the RCU op:	 */	list_add_tail_rcu(&entry->entry, head);	return 1;}/*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -