⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 profile.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/kernel/profile.c *  Simple profiling. Manages a direct-mapped profile hit count buffer, *  with configurable resolution, support for restricting the cpus on *  which profiling is done, and switching between cpu time and *  schedule() calls via kernel command line parameters passed at boot. * *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar, *	Red Hat, July 2004 *  Consolidation of architecture support code for profiling, *	William Irwin, Oracle, July 2004 *  Amortized hit count accounting via per-cpu open-addressed hashtables *	to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 */#include <linux/module.h>#include <linux/profile.h>#include <linux/bootmem.h>#include <linux/notifier.h>#include <linux/mm.h>#include <linux/cpumask.h>#include <linux/cpu.h>#include <linux/profile.h>#include <linux/highmem.h>#include <linux/mutex.h>#include <asm/sections.h>#include <asm/semaphore.h>#include <asm/irq_regs.h>#include <asm/ptrace.h>struct profile_hit {	u32 pc, hits;};#define PROFILE_GRPSHIFT	3#define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)#define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))#define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)/* Oprofile timer tick hook */int (*timer_hook)(struct pt_regs *) __read_mostly;static atomic_t *prof_buffer;static unsigned long prof_len, prof_shift;int prof_on __read_mostly;EXPORT_SYMBOL_GPL(prof_on);static cpumask_t prof_cpu_mask = CPU_MASK_ALL;#ifdef CONFIG_SMPstatic DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);static DEFINE_PER_CPU(int, cpu_profile_flip);static DEFINE_MUTEX(profile_flip_mutex);#endif /* CONFIG_SMP */static int __init profile_setup(char * str){	static char __initdata schedstr[] = "schedule";	static char __initdata sleepstr[] = "sleep";	static char __initdata kvmstr[] = "kvm";	int par;	if (!strncmp(str, sleepstr, strlen(sleepstr))) {		prof_on = SLEEP_PROFILING;		if (str[strlen(sleepstr)] == ',')			str += strlen(sleepstr) + 1;		if (get_option(&str, &par))			prof_shift = par;		printk(KERN_INFO			"kernel sleep profiling enabled (shift: %ld)\n",			prof_shift);	} else if (!strncmp(str, schedstr, strlen(schedstr))) {		prof_on = SCHED_PROFILING;		if (str[strlen(schedstr)] == ',')			str += strlen(schedstr) + 1;		if (get_option(&str, &par))			prof_shift = par;		printk(KERN_INFO			"kernel schedule profiling enabled (shift: %ld)\n",			prof_shift);	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {		prof_on = KVM_PROFILING;		if (str[strlen(kvmstr)] == ',')			str += strlen(kvmstr) + 1;		if (get_option(&str, &par))			prof_shift = par;		printk(KERN_INFO			"kernel KVM profiling enabled (shift: %ld)\n",			prof_shift);	} else if (get_option(&str, &par)) {		prof_shift = par;		prof_on = CPU_PROFILING;		printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",			prof_shift);	}	return 1;}__setup("profile=", profile_setup);void __init profile_init(void){	if (!prof_on) 		return; 	/* only text is profiled */	prof_len = (_etext - _stext) >> prof_shift;	prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));}/* Profile event notifications */ #ifdef CONFIG_PROFILING static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);static ATOMIC_NOTIFIER_HEAD(task_free_notifier);static BLOCKING_NOTIFIER_HEAD(munmap_notifier); void profile_task_exit(struct task_struct * task){	blocking_notifier_call_chain(&task_exit_notifier, 0, task);} int profile_handoff_task(struct task_struct * task){	int ret;	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);	return (ret == NOTIFY_OK) ? 1 : 0;}void profile_munmap(unsigned long addr){	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);}int task_handoff_register(struct notifier_block * n){	return atomic_notifier_chain_register(&task_free_notifier, n);}int task_handoff_unregister(struct notifier_block * n){	return atomic_notifier_chain_unregister(&task_free_notifier, n);}int profile_event_register(enum profile_type type, struct notifier_block * n){	int err = -EINVAL; 	switch (type) {		case PROFILE_TASK_EXIT:			err = blocking_notifier_chain_register(					&task_exit_notifier, n);			break;		case PROFILE_MUNMAP:			err = blocking_notifier_chain_register(					&munmap_notifier, n);			break;	} 	return err;} int profile_event_unregister(enum profile_type type, struct notifier_block * n){	int err = -EINVAL; 	switch (type) {		case PROFILE_TASK_EXIT:			err = blocking_notifier_chain_unregister(					&task_exit_notifier, n);			break;		case PROFILE_MUNMAP:			err = blocking_notifier_chain_unregister(					&munmap_notifier, n);			break;	}	return err;}int register_timer_hook(int (*hook)(struct pt_regs *)){	if (timer_hook)		return -EBUSY;	timer_hook = hook;	return 0;}void unregister_timer_hook(int (*hook)(struct pt_regs *)){	WARN_ON(hook != timer_hook);	timer_hook = NULL;	/* make sure all CPUs see the NULL hook */	synchronize_sched();  /* Allow ongoing interrupts to complete. */}EXPORT_SYMBOL_GPL(register_timer_hook);EXPORT_SYMBOL_GPL(unregister_timer_hook);EXPORT_SYMBOL_GPL(task_handoff_register);EXPORT_SYMBOL_GPL(task_handoff_unregister);#endif /* CONFIG_PROFILING */EXPORT_SYMBOL_GPL(profile_event_register);EXPORT_SYMBOL_GPL(profile_event_unregister);#ifdef CONFIG_SMP/* * Each cpu has a pair of open-addressed hashtables for pending * profile hits. read_profile() IPI's all cpus to request them * to flip buffers and flushes their contents to prof_buffer itself. * Flip requests are serialized by the profile_flip_mutex. The sole * use of having a second hashtable is for avoiding cacheline * contention that would otherwise happen during flushes of pending * profile hits required for the accuracy of reported profile hits * and so resurrect the interrupt livelock issue. * * The open-addressed hashtables are indexed by profile buffer slot * and hold the number of pending hits to that profile buffer slot on * a cpu in an entry. When the hashtable overflows, all pending hits * are accounted to their corresponding profile buffer slots with * atomic_add() and the hashtable emptied. As numerous pending hits * may be accounted to a profile buffer slot in a hashtable entry, * this amortizes a number of atomic profile buffer increments likely * to be far larger than the number of entries in the hashtable, * particularly given that the number of distinct profile buffer * positions to which hits are accounted during short intervals (e.g. * several seconds) is usually very small. Exclusion from buffer * flipping is provided by interrupt disablement (note that for * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from * process context). * The hash function is meant to be lightweight as opposed to strong, * and was vaguely inspired by ppc64 firmware-supported inverted * pagetable hash functions, but uses a full hashtable full of finite * collision chains, not just pairs of them. * * -- wli */static void __profile_flip_buffers(void *unused){	int cpu = smp_processor_id();	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);}static void profile_flip_buffers(void){	int i, j, cpu;	mutex_lock(&profile_flip_mutex);	j = per_cpu(cpu_profile_flip, get_cpu());	put_cpu();	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);	for_each_online_cpu(cpu) {		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];		for (i = 0; i < NR_PROFILE_HIT; ++i) {			if (!hits[i].hits) {				if (hits[i].pc)					hits[i].pc = 0;				continue;			}			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);			hits[i].hits = hits[i].pc = 0;		}	}	mutex_unlock(&profile_flip_mutex);}static void profile_discard_flip_buffers(void){	int i, cpu;	mutex_lock(&profile_flip_mutex);	i = per_cpu(cpu_profile_flip, get_cpu());	put_cpu();	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);	for_each_online_cpu(cpu) {		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];		memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));	}	mutex_unlock(&profile_flip_mutex);}void profile_hits(int type, void *__pc, unsigned int nr_hits){	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;	int i, j, cpu;	struct profile_hit *hits;	if (prof_on != type || !prof_buffer)		return;	pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);	i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;	secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;	cpu = get_cpu();	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];	if (!hits) {		put_cpu();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -