⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lockdep.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * kernel/lockdep.c * * Runtime locking correctness validator * * Started by Ingo Molnar: * *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: * * - lock inversion scenarios * - circular lock dependencies * - hardirq/softirq safe/unsafe locking bugs * * Bugs are reported even if the current locking scenario does not cause * any deadlock at this point. * * I.e. if anytime in the past two locks were taken in a different order, * even if it happened for another task, even if those were different * locks (but of the same class as this lock), this code will detect it. * * Thanks to Arjan van de Ven for coming up with the initial idea of * mapping lock dependencies runtime. */#include <linux/mutex.h>#include <linux/sched.h>#include <linux/delay.h>#include <linux/module.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <linux/spinlock.h>#include <linux/kallsyms.h>#include <linux/interrupt.h>#include <linux/stacktrace.h>#include <linux/debug_locks.h>#include <linux/irqflags.h>#include <linux/utsname.h>#include <linux/hash.h>#include <linux/ftrace.h>#include <asm/sections.h>#include "lockdep_internals.h"#ifdef CONFIG_PROVE_LOCKINGint prove_locking = 1;module_param(prove_locking, int, 0644);#else#define prove_locking 0#endif#ifdef CONFIG_LOCK_STATint lock_stat = 1;module_param(lock_stat, int, 0644);#else#define lock_stat 0#endif/* * lockdep_lock: protects the lockdep graph, the hashes and the *               class/list/hash allocators. * * This is one of the rare exceptions where it's justified * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;static int graph_lock(void){	__raw_spin_lock(&lockdep_lock);	/*	 * Make sure that if another CPU detected a bug while	 * walking the graph we dont change it (while the other	 * CPU is busy printing out stuff with the graph lock	 * dropped already)	 */	if (!debug_locks) {		__raw_spin_unlock(&lockdep_lock);		return 0;	}	/* prevent any recursions within lockdep from causing deadlocks */	current->lockdep_recursion++;	return 1;}static inline int graph_unlock(void){	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))		return DEBUG_LOCKS_WARN_ON(1);	current->lockdep_recursion--;	__raw_spin_unlock(&lockdep_lock);	return 0;}/* * Turn lock debugging off and return with 0 if it was off already, * and also release the graph lock: */static inline int debug_locks_off_graph_unlock(void){	int ret = debug_locks_off();	__raw_spin_unlock(&lockdep_lock);	return ret;}static int lockdep_initialized;unsigned long nr_list_entries;static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];/* * All data structures here are protected by the global debug_lock. * * Mutex key structs only get allocated, once during bootup, and never * get freed - this significantly simplifies the debugging code. */unsigned long nr_lock_classes;static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];static inline struct lock_class *hlock_class(struct held_lock *hlock){	if (!hlock->class_idx) {		DEBUG_LOCKS_WARN_ON(1);		return NULL;	}	return lock_classes + hlock->class_idx - 1;}#ifdef CONFIG_LOCK_STATstatic DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);static int lock_contention_point(struct lock_class *class, unsigned long ip){	int i;	for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {		if (class->contention_point[i] == 0) {			class->contention_point[i] = ip;			break;		}		if (class->contention_point[i] == ip)			break;	}	return i;}static void lock_time_inc(struct lock_time *lt, s64 time){	if (time > lt->max)		lt->max = time;	if (time < lt->min || !lt->min)		lt->min = time;	lt->total += time;	lt->nr++;}static inline void lock_time_add(struct lock_time *src, struct lock_time *dst){	dst->min += src->min;	dst->max += src->max;	dst->total += src->total;	dst->nr += src->nr;}struct lock_class_stats lock_stats(struct lock_class *class){	struct lock_class_stats stats;	int cpu, i;	memset(&stats, 0, sizeof(struct lock_class_stats));	for_each_possible_cpu(cpu) {		struct lock_class_stats *pcs =			&per_cpu(lock_stats, cpu)[class - lock_classes];		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)			stats.contention_point[i] += pcs->contention_point[i];		lock_time_add(&pcs->read_waittime, &stats.read_waittime);		lock_time_add(&pcs->write_waittime, &stats.write_waittime);		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)			stats.bounces[i] += pcs->bounces[i];	}	return stats;}void clear_lock_stats(struct lock_class *class){	int cpu;	for_each_possible_cpu(cpu) {		struct lock_class_stats *cpu_stats =			&per_cpu(lock_stats, cpu)[class - lock_classes];		memset(cpu_stats, 0, sizeof(struct lock_class_stats));	}	memset(class->contention_point, 0, sizeof(class->contention_point));}static struct lock_class_stats *get_lock_stats(struct lock_class *class){	return &get_cpu_var(lock_stats)[class - lock_classes];}static void put_lock_stats(struct lock_class_stats *stats){	put_cpu_var(lock_stats);}static void lock_release_holdtime(struct held_lock *hlock){	struct lock_class_stats *stats;	s64 holdtime;	if (!lock_stat)		return;	holdtime = sched_clock() - hlock->holdtime_stamp;	stats = get_lock_stats(hlock_class(hlock));	if (hlock->read)		lock_time_inc(&stats->read_holdtime, holdtime);	else		lock_time_inc(&stats->write_holdtime, holdtime);	put_lock_stats(stats);}#elsestatic inline void lock_release_holdtime(struct held_lock *hlock){}#endif/* * We keep a global list of all lock classes. The list only grows, * never shrinks. The list is only accessed with the lockdep * spinlock lock held. */LIST_HEAD(all_lock_classes);/* * The lockdep classes are in a hash-table as well, for fast lookup: */#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)#define classhashentry(key)	(classhash_table + __classhashfn((key)))static struct list_head classhash_table[CLASSHASH_SIZE];/* * We put the lock dependency chains into a hash-table as well, to cache * their existence: */#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))static struct list_head chainhash_table[CHAINHASH_SIZE];/* * The hash key of the lock dependency chains is a hash itself too: * it's a hash of all locks taken up to that lock, including that lock. * It's a 64-bit hash, because it's important for the keys to be * unique. */#define iterate_chain_key(key1, key2) \	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \	(key2))void lockdep_off(void){	current->lockdep_recursion++;}EXPORT_SYMBOL(lockdep_off);void lockdep_on(void){	current->lockdep_recursion--;}EXPORT_SYMBOL(lockdep_on);/* * Debugging switches: */#define VERBOSE			0#define VERY_VERBOSE		0#if VERBOSE# define HARDIRQ_VERBOSE	1# define SOFTIRQ_VERBOSE	1#else# define HARDIRQ_VERBOSE	0# define SOFTIRQ_VERBOSE	0#endif#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE/* * Quick filtering for interesting events: */static int class_filter(struct lock_class *class){#if 0	/* Example */	if (class->name_version == 1 &&			!strcmp(class->name, "lockname"))		return 1;	if (class->name_version == 1 &&			!strcmp(class->name, "&struct->lockfield"))		return 1;#endif	/* Filter everything else. 1 would be to allow everything else */	return 0;}#endifstatic int verbose(struct lock_class *class){#if VERBOSE	return class_filter(class);#endif	return 0;}/* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. */unsigned long nr_stack_trace_entries;static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];static int save_trace(struct stack_trace *trace){	trace->nr_entries = 0;	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;	trace->entries = stack_trace + nr_stack_trace_entries;	trace->skip = 3;	save_stack_trace(trace);	trace->max_entries = trace->nr_entries;	nr_stack_trace_entries += trace->nr_entries;	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {		if (!debug_locks_off_graph_unlock())			return 0;		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");		printk("turning off the locking correctness validator.\n");		dump_stack();		return 0;	}	return 1;}unsigned int nr_hardirq_chains;unsigned int nr_softirq_chains;unsigned int nr_process_chains;unsigned int max_lockdep_depth;unsigned int max_recursion_depth;static unsigned int lockdep_dependency_gen_id;static bool lockdep_dependency_visit(struct lock_class *source,				     unsigned int depth){	if (!depth)		lockdep_dependency_gen_id++;	if (source->dep_gen_id == lockdep_dependency_gen_id)		return true;	source->dep_gen_id = lockdep_dependency_gen_id;	return false;}#ifdef CONFIG_DEBUG_LOCKDEP/* * We cannot printk in early bootup code. Not even early_printk() * might work. So we mark any initialization errors and printk * about it later on, in lockdep_info(). */static int lockdep_init_error;static unsigned long lockdep_init_trace_data[20];static struct stack_trace lockdep_init_trace = {	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),	.entries = lockdep_init_trace_data,};/* * Various lockdep statistics: */atomic_t chain_lookup_hits;atomic_t chain_lookup_misses;atomic_t hardirqs_on_events;atomic_t hardirqs_off_events;atomic_t redundant_hardirqs_on;atomic_t redundant_hardirqs_off;atomic_t softirqs_on_events;atomic_t softirqs_off_events;atomic_t redundant_softirqs_on;atomic_t redundant_softirqs_off;atomic_t nr_unused_locks;atomic_t nr_cyclic_checks;atomic_t nr_cyclic_check_recursions;atomic_t nr_find_usage_forwards_checks;atomic_t nr_find_usage_forwards_recursions;atomic_t nr_find_usage_backwards_checks;atomic_t nr_find_usage_backwards_recursions;# define debug_atomic_inc(ptr)		atomic_inc(ptr)# define debug_atomic_dec(ptr)		atomic_dec(ptr)# define debug_atomic_read(ptr)		atomic_read(ptr)#else# define debug_atomic_inc(ptr)		do { } while (0)# define debug_atomic_dec(ptr)		do { } while (0)# define debug_atomic_read(ptr)		0#endif/* * Locking printouts: */static const char *usage_str[] ={	[LOCK_USED] =			"initial-use ",	[LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W",	[LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W",	[LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W",	[LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W",	[LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R",	[LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R",	[LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R",	[LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R",};const char * __get_key_name(struct lockdep_subclass_key *key, char *str){	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);}voidget_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4){	*c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)		*c1 = '+';	else		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)			*c1 = '-';	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)		*c2 = '+';	else		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)			*c2 = '-';	if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)		*c3 = '-';	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {		*c3 = '+';		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)			*c3 = '?';	}	if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)		*c4 = '-';	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {		*c4 = '+';		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)			*c4 = '?';	}}static void print_lock_name(struct lock_class *class){	char str[KSYM_NAME_LEN], c1, c2, c3, c4;	const char *name;	get_usage_chars(class, &c1, &c2, &c3, &c4);	name = class->name;	if (!name) {		name = __get_key_name(class->key, str);		printk(" (%s", name);	} else {		printk(" (%s", name);		if (class->name_version > 1)			printk("#%d", class->name_version);		if (class->subclass)			printk("/%d", class->subclass);	}	printk("){%c%c%c%c}", c1, c2, c3, c4);}static void print_lockdep_cache(struct lockdep_map *lock){	const char *name;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -