⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 op_model_p4.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0)#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0)#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))/* this assigns a "stagger" to the current CPU, which is used throughout   the code in this module as an extra array offset, to select the "even"   or "odd" part of all the divided resources. */static unsigned int get_stagger(void){#ifdef CONFIG_SMP	int cpu = smp_processor_id();	return (cpu != first_cpu(cpu_sibling_map[cpu]));#endif		return 0;}/* finally, mediate access to a real hardware counter   by passing a "virtual" counter numer to this macro,   along with your stagger setting. */#define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))static unsigned long reset_value[NUM_COUNTERS_NON_HT];static void p4_fill_in_addresses(struct op_msrs * const msrs){	unsigned int i;	unsigned int addr, stag;	setup_num_counters();	stag = get_stagger();	/* the counter registers we pay attention to */	for (i = 0; i < num_counters; ++i) {		msrs->counters[i].addr = 			p4_counters[VIRT_CTR(stag, i)].counter_address;	}	/* FIXME: bad feeling, we don't save the 10 counters we don't use. */	/* 18 CCCR registers */	for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag;	     addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) {		msrs->controls[i].addr = addr;	}		/* 43 ESCR registers in three or four discontiguous group */	for (addr = MSR_P4_BSU_ESCR0 + stag;	     addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) {		msrs->controls[i].addr = addr;	}	/* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1	 * to avoid special case in nmi_{save|restore}_registers() */	if (boot_cpu_data.x86_model >= 0x3) {		for (addr = MSR_P4_BSU_ESCR0 + stag;		     addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) {			msrs->controls[i].addr = addr;		}	} else {		for (addr = MSR_P4_IQ_ESCR0 + stag;		     addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) {			msrs->controls[i].addr = addr;		}	}	for (addr = MSR_P4_RAT_ESCR0 + stag;	     addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {		msrs->controls[i].addr = addr;	}		for (addr = MSR_P4_MS_ESCR0 + stag;	     addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 		msrs->controls[i].addr = addr;	}		for (addr = MSR_P4_IX_ESCR0 + stag;	     addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 		msrs->controls[i].addr = addr;	}	/* there are 2 remaining non-contiguously located ESCRs */	if (num_counters == NUM_COUNTERS_NON_HT) {				/* standard non-HT CPUs handle both remaining ESCRs*/		msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;		msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;	} else if (stag == 0) {		/* HT CPUs give the first remainder to the even thread, as		   the 32nd control register */		msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;	} else {		/* and two copies of the second to the odd thread,		   for the 22st and 23nd control registers */		msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;		msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;	}}static void pmc_setup_one_p4_counter(unsigned int ctr){	int i;	int const maxbind = 2;	unsigned int cccr = 0;	unsigned int escr = 0;	unsigned int high = 0;	unsigned int counter_bit;	struct p4_event_binding *ev = NULL;	unsigned int stag;	stag = get_stagger();		/* convert from counter *number* to counter *bit* */	counter_bit = 1 << VIRT_CTR(stag, ctr);		/* find our event binding structure. */	if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {		printk(KERN_ERR 		       "oprofile: P4 event code 0x%lx out of range\n", 		       counter_config[ctr].event);		return;	}		ev = &(p4_events[counter_config[ctr].event - 1]);		for (i = 0; i < maxbind; i++) {		if (ev->bindings[i].virt_counter & counter_bit) {			/* modify ESCR */			ESCR_READ(escr, high, ev, i);			ESCR_CLEAR(escr);			if (stag == 0) {				ESCR_SET_USR_0(escr, counter_config[ctr].user);				ESCR_SET_OS_0(escr, counter_config[ctr].kernel);			} else {				ESCR_SET_USR_1(escr, counter_config[ctr].user);				ESCR_SET_OS_1(escr, counter_config[ctr].kernel);			}			ESCR_SET_EVENT_SELECT(escr, ev->event_select);			ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);						ESCR_WRITE(escr, high, ev, i);		       			/* modify CCCR */			CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));			CCCR_CLEAR(cccr);			CCCR_SET_REQUIRED_BITS(cccr);			CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);			if (stag == 0) {				CCCR_SET_PMI_OVF_0(cccr);			} else {				CCCR_SET_PMI_OVF_1(cccr);			}			CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));			return;		}	}	printk(KERN_ERR 	       "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",	       counter_config[ctr].event, stag, ctr);}static void p4_setup_ctrs(struct op_msrs const * const msrs){	unsigned int i;	unsigned int low, high;	unsigned int addr;	unsigned int stag;	stag = get_stagger();	rdmsr(MSR_IA32_MISC_ENABLE, low, high);	if (! MISC_PMC_ENABLED_P(low)) {		printk(KERN_ERR "oprofile: P4 PMC not available\n");		return;	}	/* clear the cccrs we will use */	for (i = 0 ; i < num_counters ; i++) {		rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);		CCCR_CLEAR(low);		CCCR_SET_REQUIRED_BITS(low);		wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);	}	/* clear cccrs outside our concern */	for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) {		rdmsr(p4_unused_cccr[i], low, high);		CCCR_CLEAR(low);		CCCR_SET_REQUIRED_BITS(low);		wrmsr(p4_unused_cccr[i], low, high);	}	/* clear all escrs (including those outside our concern) */	for (addr = MSR_P4_BSU_ESCR0 + stag;	     addr <  MSR_P4_IQ_ESCR0; addr += addr_increment()) {		wrmsr(addr, 0, 0);	}	/* On older models clear also MSR_P4_IQ_ESCR0/1 */	if (boot_cpu_data.x86_model < 0x3) {		wrmsr(MSR_P4_IQ_ESCR0, 0, 0);		wrmsr(MSR_P4_IQ_ESCR1, 0, 0);	}	for (addr = MSR_P4_RAT_ESCR0 + stag;	     addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {		wrmsr(addr, 0, 0);	}		for (addr = MSR_P4_MS_ESCR0 + stag;	     addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ 		wrmsr(addr, 0, 0);	}		for (addr = MSR_P4_IX_ESCR0 + stag;	     addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ 		wrmsr(addr, 0, 0);	}	if (num_counters == NUM_COUNTERS_NON_HT) {				wrmsr(MSR_P4_CRU_ESCR4, 0, 0);		wrmsr(MSR_P4_CRU_ESCR5, 0, 0);	} else if (stag == 0) {		wrmsr(MSR_P4_CRU_ESCR4, 0, 0);	} else {		wrmsr(MSR_P4_CRU_ESCR5, 0, 0);	}				/* setup all counters */	for (i = 0 ; i < num_counters ; ++i) {		if (counter_config[i].enabled) {			reset_value[i] = counter_config[i].count;			pmc_setup_one_p4_counter(i);			CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));		} else {			reset_value[i] = 0;		}	}}extern void xenoprof_log_event(struct vcpu *v, struct cpu_user_regs * regs, 			       unsigned long eip, int mode, int event);extern int xenoprofile_get_mode(struct vcpu *v,				struct cpu_user_regs * const regs);static int p4_check_ctrs(unsigned int const cpu,                         struct op_msrs const * const msrs,                         struct cpu_user_regs * const regs){	unsigned long ctr, low, high, stag, real;	int i;	int ovf = 0;	unsigned long eip = regs->eip;	int mode = xenoprofile_get_mode(current, regs);	stag = get_stagger();	for (i = 0; i < num_counters; ++i) {				if (!reset_value[i]) 			continue;		/* 		 * there is some eccentricity in the hardware which		 * requires that we perform 2 extra corrections:		 *		 * - check both the CCCR:OVF flag for overflow and the		 *   counter high bit for un-flagged overflows.		 *		 * - write the counter back twice to ensure it gets		 *   updated properly.		 * 		 * the former seems to be related to extra NMIs happening		 * during the current NMI; the latter is reported as errata		 * N15 in intel doc 249199-029, pentium 4 specification		 * update, though their suggested work-around does not		 * appear to solve the problem.		 */				real = VIRT_CTR(stag, i);		CCCR_READ(low, high, real); 		CTR_READ(ctr, high, real);		if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {			xenoprof_log_event(current, regs, eip, mode, i);			CTR_WRITE(reset_value[i], real);			CCCR_CLEAR_OVF(low);			CCCR_WRITE(low, high, real); 			CTR_WRITE(reset_value[i], real);			ovf = 1;		}	}	/* P4 quirk: you have to re-unmask the apic vector */	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);	return ovf;}static void p4_start(struct op_msrs const * const msrs){	unsigned int low, high, stag;	int i;	stag = get_stagger();	for (i = 0; i < num_counters; ++i) {		if (!reset_value[i])			continue;		CCCR_READ(low, high, VIRT_CTR(stag, i));		CCCR_SET_ENABLE(low);		CCCR_WRITE(low, high, VIRT_CTR(stag, i));	}}static void p4_stop(struct op_msrs const * const msrs){	unsigned int low, high, stag;	int i;	stag = get_stagger();	for (i = 0; i < num_counters; ++i) {		CCCR_READ(low, high, VIRT_CTR(stag, i));		CCCR_SET_DISABLE(low);		CCCR_WRITE(low, high, VIRT_CTR(stag, i));	}}#ifdef CONFIG_SMPstruct op_x86_model_spec const op_p4_ht2_spec = {	.num_counters = NUM_COUNTERS_HT2,	.num_controls = NUM_CONTROLS_HT2,	.fill_in_addresses = &p4_fill_in_addresses,	.setup_ctrs = &p4_setup_ctrs,	.check_ctrs = &p4_check_ctrs,	.start = &p4_start,	.stop = &p4_stop};#endifstruct op_x86_model_spec const op_p4_spec = {	.num_counters = NUM_COUNTERS_NON_HT,	.num_controls = NUM_CONTROLS_NON_HT,	.fill_in_addresses = &p4_fill_in_addresses,	.setup_ctrs = &p4_setup_ctrs,	.check_ctrs = &p4_check_ctrs,	.start = &p4_start,	.stop = &p4_stop};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -