⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfctr-watchdog.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* local apic based NMI watchdog for various CPUs.   This file also handles reservation of performance counters for coordination   with other users (like oprofile).   Note that these events normally don't tick when the CPU idles. This means   the frequency varies with CPU load.   Original code for K7/P6 written by Keith Owens */#include <linux/percpu.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/bitops.h>#include <linux/smp.h>#include <linux/nmi.h>#include <asm/apic.h>#include <asm/intel_arch_perfmon.h>struct nmi_watchdog_ctlblk {	unsigned int cccr_msr;	unsigned int perfctr_msr;  /* the MSR to reset in NMI handler */	unsigned int evntsel_msr;  /* the MSR to select the events to handle */};/* Interface defining a CPU specific perfctr watchdog */struct wd_ops {	int (*reserve)(void);	void (*unreserve)(void);	int (*setup)(unsigned nmi_hz);	void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);	void (*stop)(void);	unsigned perfctr;	unsigned evntsel;	u64 checkbit;};static const struct wd_ops *wd_ops;/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now) */#define NMI_MAX_COUNTER_BITS 66/* perfctr_nmi_owner tracks the ownership of the perfctr registers: * evtsel_nmi_owner tracks the ownership of the event selection * - different performance counters/ event selection may be reserved for *   different subsystems this reservation system just tries to coordinate *   things a little */static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);/* converts an msr to an appropriate reservation bit */static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr){	/* returns the bit offset of the performance counter register */	switch (boot_cpu_data.x86_vendor) {	case X86_VENDOR_AMD:		return (msr - MSR_K7_PERFCTR0);	case X86_VENDOR_INTEL:		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))			return (msr - MSR_ARCH_PERFMON_PERFCTR0);		switch (boot_cpu_data.x86) {		case 6:			return (msr - MSR_P6_PERFCTR0);		case 15:			return (msr - MSR_P4_BPU_PERFCTR0);		}	}	return 0;}/* converts an msr to an appropriate reservation bit *//* returns the bit offset of the event selection register */static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr){	/* returns the bit offset of the event selection register */	switch (boot_cpu_data.x86_vendor) {	case X86_VENDOR_AMD:		return (msr - MSR_K7_EVNTSEL0);	case X86_VENDOR_INTEL:		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))			return (msr - MSR_ARCH_PERFMON_EVENTSEL0);		switch (boot_cpu_data.x86) {		case 6:			return (msr - MSR_P6_EVNTSEL0);		case 15:			return (msr - MSR_P4_BSU_ESCR0);		}	}	return 0;}/* checks for a bit availability (hack for oprofile) */int avail_to_resrv_perfctr_nmi_bit(unsigned int counter){	BUG_ON(counter > NMI_MAX_COUNTER_BITS);	return (!test_bit(counter, perfctr_nmi_owner));}/* checks the an msr for availability */int avail_to_resrv_perfctr_nmi(unsigned int msr){	unsigned int counter;	counter = nmi_perfctr_msr_to_bit(msr);	BUG_ON(counter > NMI_MAX_COUNTER_BITS);	return (!test_bit(counter, perfctr_nmi_owner));}int reserve_perfctr_nmi(unsigned int msr){	unsigned int counter;	counter = nmi_perfctr_msr_to_bit(msr);	/* register not managed by the allocator? */	if (counter > NMI_MAX_COUNTER_BITS)		return 1;	if (!test_and_set_bit(counter, perfctr_nmi_owner))		return 1;	return 0;}void release_perfctr_nmi(unsigned int msr){	unsigned int counter;	counter = nmi_perfctr_msr_to_bit(msr);	/* register not managed by the allocator? */	if (counter > NMI_MAX_COUNTER_BITS)		return;	clear_bit(counter, perfctr_nmi_owner);}int reserve_evntsel_nmi(unsigned int msr){	unsigned int counter;	counter = nmi_evntsel_msr_to_bit(msr);	/* register not managed by the allocator? */	if (counter > NMI_MAX_COUNTER_BITS)		return 1;	if (!test_and_set_bit(counter, evntsel_nmi_owner))		return 1;	return 0;}void release_evntsel_nmi(unsigned int msr){	unsigned int counter;	counter = nmi_evntsel_msr_to_bit(msr);	/* register not managed by the allocator? */	if (counter > NMI_MAX_COUNTER_BITS)		return;	clear_bit(counter, evntsel_nmi_owner);}EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);EXPORT_SYMBOL(reserve_perfctr_nmi);EXPORT_SYMBOL(release_perfctr_nmi);EXPORT_SYMBOL(reserve_evntsel_nmi);EXPORT_SYMBOL(release_evntsel_nmi);void disable_lapic_nmi_watchdog(void){	BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);	if (atomic_read(&nmi_active) <= 0)		return;	on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);	wd_ops->unreserve();	BUG_ON(atomic_read(&nmi_active) != 0);}void enable_lapic_nmi_watchdog(void){	BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);	/* are we already enabled */	if (atomic_read(&nmi_active) != 0)		return;	/* are we lapic aware */	if (!wd_ops)		return;	if (!wd_ops->reserve()) {		printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");		return;	}	on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);	touch_nmi_watchdog();}/* * Activate the NMI watchdog via the local APIC. */static unsigned int adjust_for_32bit_ctr(unsigned int hz){	u64 counter_val;	unsigned int retval = hz;	/*	 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter	 * are writable, with higher bits sign extending from bit 31.	 * So, we can only program the counter with 31 bit values and	 * 32nd bit should be 1, for 33.. to be 1.	 * Find the appropriate nmi_hz	 */	counter_val = (u64)cpu_khz * 1000;	do_div(counter_val, retval); 	if (counter_val > 0x7fffffffULL) {		u64 count = (u64)cpu_khz * 1000;		do_div(count, 0x7fffffffUL);		retval = count + 1;	}	return retval;}static voidwrite_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz){	u64 count = (u64)cpu_khz * 1000;	do_div(count, nmi_hz);	if(descr)		Dprintk("setting %s to -0x%08Lx\n", descr, count);	wrmsrl(perfctr_msr, 0 - count);}static void write_watchdog_counter32(unsigned int perfctr_msr,		const char *descr, unsigned nmi_hz){	u64 count = (u64)cpu_khz * 1000;	do_div(count, nmi_hz);	if(descr)		Dprintk("setting %s to -0x%08Lx\n", descr, count);	wrmsr(perfctr_msr, (u32)(-count), 0);}/* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface   nicely stable so there is not much variety */#define K7_EVNTSEL_ENABLE	(1 << 22)#define K7_EVNTSEL_INT		(1 << 20)#define K7_EVNTSEL_OS		(1 << 17)#define K7_EVNTSEL_USR		(1 << 16)#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING	0x76#define K7_NMI_EVENT		K7_EVENT_CYCLES_PROCESSOR_IS_RUNNINGstatic int setup_k7_watchdog(unsigned nmi_hz){	unsigned int perfctr_msr, evntsel_msr;	unsigned int evntsel;	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);	perfctr_msr = wd_ops->perfctr;	evntsel_msr = wd_ops->evntsel;	wrmsrl(perfctr_msr, 0UL);	evntsel = K7_EVNTSEL_INT		| K7_EVNTSEL_OS		| K7_EVNTSEL_USR		| K7_NMI_EVENT;	/* setup the timer */	wrmsr(evntsel_msr, evntsel, 0);	write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);	apic_write(APIC_LVTPC, APIC_DM_NMI);	evntsel |= K7_EVNTSEL_ENABLE;	wrmsr(evntsel_msr, evntsel, 0);	wd->perfctr_msr = perfctr_msr;	wd->evntsel_msr = evntsel_msr;	wd->cccr_msr = 0;  //unused	return 1;}static void single_msr_stop_watchdog(void){	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);	wrmsr(wd->evntsel_msr, 0, 0);}static int single_msr_reserve(void){	if (!reserve_perfctr_nmi(wd_ops->perfctr))		return 0;	if (!reserve_evntsel_nmi(wd_ops->evntsel)) {		release_perfctr_nmi(wd_ops->perfctr);		return 0;	}	return 1;}static void single_msr_unreserve(void){	release_evntsel_nmi(wd_ops->evntsel);	release_perfctr_nmi(wd_ops->perfctr);}static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz){	/* start the cycle over again */	write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);}static const struct wd_ops k7_wd_ops = {	.reserve = single_msr_reserve,	.unreserve = single_msr_unreserve,	.setup = setup_k7_watchdog,	.rearm = single_msr_rearm,	.stop = single_msr_stop_watchdog,	.perfctr = MSR_K7_PERFCTR0,	.evntsel = MSR_K7_EVNTSEL0,	.checkbit = 1ULL<<47,};/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */#define P6_EVNTSEL0_ENABLE	(1 << 22)#define P6_EVNTSEL_INT		(1 << 20)#define P6_EVNTSEL_OS		(1 << 17)#define P6_EVNTSEL_USR		(1 << 16)#define P6_EVENT_CPU_CLOCKS_NOT_HALTED	0x79#define P6_NMI_EVENT		P6_EVENT_CPU_CLOCKS_NOT_HALTEDstatic int setup_p6_watchdog(unsigned nmi_hz){	unsigned int perfctr_msr, evntsel_msr;	unsigned int evntsel;	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);	perfctr_msr = wd_ops->perfctr;	evntsel_msr = wd_ops->evntsel;	/* KVM doesn't implement this MSR */	if (wrmsr_safe(perfctr_msr, 0, 0) < 0)		return 0;	evntsel = P6_EVNTSEL_INT		| P6_EVNTSEL_OS

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -