⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mca.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
		unsigned long now = jiffies;		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {			if (now - cpe_history[i] <= HZ)				count++;		}		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);		if (count >= CPE_HISTORY_LENGTH) {			cpe_poll_enabled = 1;			spin_unlock(&cpe_history_lock);			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));			/*			 * Corrected errors will still be corrected, but			 * make sure there's a log somewhere that indicates			 * something is generating more than we can handle.			 */			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);			/* lock already released, get out now */			goto out;		} else {			cpe_history[index++] = now;			if (index == CPE_HISTORY_LENGTH)				index = 0;		}	}	spin_unlock(&cpe_history_lock);out:	/* Get the CPE error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);	return IRQ_HANDLED;}#endif /* CONFIG_ACPI */#ifdef CONFIG_ACPI/* * ia64_mca_register_cpev * *  Register the corrected platform error vector with SAL. * *  Inputs *      cpev        Corrected Platform Error Vector number * *  Outputs *      None */voidia64_mca_register_cpev (int cpev){	/* Register the CPE interrupt vector with SAL */	struct ia64_sal_retval isrv;	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);	if (isrv.status) {		printk(KERN_ERR "Failed to register Corrected Platform "		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);		return;	}	IA64_MCA_DEBUG("%s: corrected platform error "		       "vector %#x registered\n", __FUNCTION__, cpev);}#endif /* CONFIG_ACPI *//* * ia64_mca_cmc_vector_setup * *  Setup the corrected machine check vector register in the processor. *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.) *  This function is invoked on a per-processor basis. * * Inputs *      None * * Outputs *	None */void __cpuinitia64_mca_cmc_vector_setup (void){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval	= 0;	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */	cmcv.cmcv_vector	= IA64_CMC_VECTOR;	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x registered.\n",		       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",		       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));}/* * ia64_mca_cmc_vector_disable * *  Mask the corrected machine check vector register in the processor. *  This function is invoked on a per-processor basis. * * Inputs *      dummy(unused) * * Outputs *	None */static voidia64_mca_cmc_vector_disable (void *dummy){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x disabled.\n",		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_enable * *  Unmask the corrected machine check vector register in the processor. *  This function is invoked on a per-processor basis. * * Inputs *      dummy(unused) * * Outputs *	None */static voidia64_mca_cmc_vector_enable (void *dummy){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x enabled.\n",		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */static voidia64_mca_cmc_vector_disable_keventd(struct work_struct *unused){	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);}/* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */static voidia64_mca_cmc_vector_enable_keventd(struct work_struct *unused){	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);}/* * ia64_mca_wakeup * *	Send an inter-cpu interrupt to wake-up a particular cpu. * *  Inputs  :   cpuid *  Outputs :   None */static voidia64_mca_wakeup(int cpu){	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);}/* * ia64_mca_wakeup_all * *	Wakeup all the slave cpus which have rendez'ed previously. * *  Inputs  :   None *  Outputs :   None */static voidia64_mca_wakeup_all(void){	int cpu;	/* Clear the Rendez checkin flag for all cpus */	for_each_online_cpu(cpu) {		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)			ia64_mca_wakeup(cpu);	}}/* * ia64_mca_rendez_interrupt_handler * *	This is handler used to put slave processors into spinloop *	while the monarch processor does the mca handling and later *	wake each slave up once the monarch is done.  The state *	IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed *	in SAL.  The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates *	the cpu has come out of OS rendezvous. * *  Inputs  :   None *  Outputs :   None */static irqreturn_tia64_mca_rendez_int_handler(int rendez_irq, void *arg){	unsigned long flags;	int cpu = smp_processor_id();	struct ia64_mca_notify_die nd =		{ .sos = NULL, .monarch_cpu = &monarch_cpu };	/* Mask all interrupts */	local_irq_save(flags);	if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),		       (long)&nd, 0, 0) == NOTIFY_STOP)		ia64_mca_spin(__FUNCTION__);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;	/* Register with the SAL monarch that the slave has	 * reached SAL	 */	ia64_sal_mc_rendez();	if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),		       (long)&nd, 0, 0) == NOTIFY_STOP)		ia64_mca_spin(__FUNCTION__);	/* Wait for the monarch cpu to exit. */	while (monarch_cpu != -1)	       cpu_relax();	/* spin until monarch leaves */	if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),		       (long)&nd, 0, 0) == NOTIFY_STOP)		ia64_mca_spin(__FUNCTION__);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;	/* Enable all interrupts */	local_irq_restore(flags);	return IRQ_HANDLED;}/* * ia64_mca_wakeup_int_handler * *	The interrupt handler for processing the inter-cpu interrupt to the *	slave cpu which was spinning in the rendez loop. *	Since this spinning is done by turning off the interrupts and *	polling on the wakeup-interrupt bit in the IRR, there is *	nothing useful to be done in the handler. * *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit) *	arg		(Interrupt handler specific argument) *  Outputs :   None * */static irqreturn_tia64_mca_wakeup_int_handler(int wakeup_irq, void *arg){	return IRQ_HANDLED;}/* Function pointer for extra MCA recovery */int (*ia64_mca_ucmc_extension)	(void*,struct ia64_sal_os_state*)	= NULL;intia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)){	if (ia64_mca_ucmc_extension)		return 1;	ia64_mca_ucmc_extension = fn;	return 0;}voidia64_unreg_MCA_extension(void){	if (ia64_mca_ucmc_extension)		ia64_mca_ucmc_extension = NULL;}EXPORT_SYMBOL(ia64_reg_MCA_extension);EXPORT_SYMBOL(ia64_unreg_MCA_extension);static inline voidcopy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat){	u64 fslot, tslot, nat;	*tr = *fr;	fslot = ((unsigned long)fr >> 3) & 63;	tslot = ((unsigned long)tr >> 3) & 63;	*tnat &= ~(1UL << tslot);	nat = (fnat >> fslot) & 1;	*tnat |= (nat << tslot);}/* Change the comm field on the MCA/INT task to include the pid that * was interrupted, it makes for easier debugging.  If that pid was 0 * (swapper or nested MCA/INIT) then use the start of the previous comm * field suffixed with its cpu. */static voidia64_mca_modify_comm(const struct task_struct *previous_current){	char *p, comm[sizeof(current->comm)];	if (previous_current->pid)		snprintf(comm, sizeof(comm), "%s %d",			current->comm, previous_current->pid);	else {		int l;		if ((p = strchr(previous_current->comm, ' ')))			l = p - previous_current->comm;		else			l = strlen(previous_current->comm);		snprintf(comm, sizeof(comm), "%s %*s %d",			current->comm, l, previous_current->comm,			task_thread_info(previous_current)->cpu);	}	memcpy(current->comm, comm, sizeof(current->comm));}/* On entry to this routine, we are running on the per cpu stack, see * mca_asm.h.  The original stack has not been touched by this event.  Some of * the original stack's registers will be in the RBS on this stack.  This stack * also contains a partial pt_regs and switch_stack, the rest of the data is in * PAL minstate. * * The first thing to do is modify the original stack to look like a blocked * task so we can run backtrace on the original task.  Also mark the per cpu * stack as current to ensure that we use the correct task state, it also means * that we can do backtrace on the MCA/INIT handler code itself. */static struct task_struct *ia64_mca_modify_original_stack(struct pt_regs *regs,		const struct switch_stack *sw,		struct ia64_sal_os_state *sos,		const char *type){	char *p;	ia64_va va;	extern char ia64_leave_kernel[];	/* Need asm address, not function descriptor */	const pal_min_state_area_t *ms = sos->pal_min_state;	struct task_struct *previous_current;	struct pt_regs *old_regs;	struct switch_stack *old_sw;	unsigned size = sizeof(struct pt_regs) +			sizeof(struct switch_stack) + 16;	u64 *old_bspstore, *old_bsp;	u64 *new_bspstore, *new_bsp;	u64 old_unat, old_rnat, new_rnat, nat;	u64 slots, loadrs = regs->loadrs;	u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];	u64 ar_bspstore = regs->ar_bspstore;	u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);	const u64 *bank;	const char *msg;	int cpu = smp_processor_id();	previous_current = curr_task(cpu);	set_curr_task(cpu, current);	if ((p = strchr(current->comm, ' ')))		*p = '\0';	/* Best effort attempt to cope with MCA/INIT delivered while in	 * physical mode.	 */	regs->cr_ipsr = ms->pmsa_ipsr;	if (ia64_psr(regs)->dt == 0) {		va.l = r12;		if (va.f.reg == 0) {			va.f.reg = 7;			r12 = va.l;		}		va.l = r13;		if (va.f.reg == 0) {			va.f.reg = 7;			r13 = va.l;		}	}	if (ia64_psr(regs)->rt == 0) {		va.l = ar_bspstore;		if (va.f.reg == 0) {			va.f.reg = 7;			ar_bspstore = va.l;		}		va.l = ar_bsp;		if (va.f.reg == 0) {			va.f.reg = 7;			ar_bsp = va.l;		}	}	/* mca_asm.S ia64_old_stack() cannot assume that the dirty registers	 * have been copied to the old stack, the old stack may fail the	 * validation tests below.  So ia64_old_stack() must restore the dirty	 * registers from the new stack.  The old and new bspstore probably	 * have different alignments, so loadrs calculated on the old bsp	 * cannot be used to restore from the new bsp.  Calculate a suitable	 * loadrs for the new stack and save it in the new pt_regs, where	 * ia64_old_stack() can get it.	 */	old_bspstore = (u64 *)ar_bspstore;	old_bsp = (u64 *)ar_bsp;	slots = ia64_rse_num_regs(old_bspstore, old_bsp);	new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);	new_bsp = ia64_rse_skip_regs(new_bspstore, slots);	regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;	/* Verify the previous stack state before we change it */	if (user_mode(regs)) {		msg = "occurred in user space";		/* previous_current is guaranteed to be valid when the task was		 * in user space, so ...		 */		ia64_mca_modify_comm(previous_current);		goto no_mod;	}	if (r13 != sos->prev_IA64_KR_CURRENT) {		msg = "inconsistent previous current and r13";		goto no_mod;	}	if (!mca_recover_range(ms->pmsa_iip)) {		if ((r12 - r13) >= KERNEL_STACK_SIZE) {			msg = "inconsistent r12 and r13";			goto no_mod;		}		if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {			msg = "inconsistent ar.bspstore and r13";			goto no_mod;		}		va.p = old_bspstore;		if (va.f.reg < 5) {			msg = "old_bspstore is in the wrong region";			goto no_mod;		}		if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {			msg = "inconsistent ar.bsp and r13";			goto no_mod;		}		size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;		if (ar_bspstore + size > r12) {			msg = "no room for blocked state";			goto no_mod;		}	}	ia64_mca_modify_comm(previous_current);	/* Make the original task look blocked.  First stack a struct pt_regs,	 * describing the state at the time of interrupt.  mca_asm.S built a	 * partial pt_regs, copy it and fill in the blanks using minstate.	 */	p = (char *)r12 - sizeof(*regs);	old_regs = (struct pt_regs *)p;	memcpy(old_regs, regs, sizeof(*regs));	/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use	 * pmsa_{xip,xpsr,xfs}	 */	if (ia64_psr(regs)->ic) {		old_regs->cr_iip = ms->pmsa_iip;		old_regs->cr_ipsr = ms->pmsa_ipsr;		old_regs->cr_ifs = ms->pmsa_ifs;	} else {		old_regs->cr_iip = ms->pmsa_xip;		old_regs->cr_ipsr = ms->pmsa_xpsr;		old_regs->cr_ifs = ms->pmsa_xfs;	}	old_regs->pr = ms->pmsa_pr;	old_regs->b0 = ms->pmsa_br0;	old_regs->loadrs = loadrs;	old_regs->ar_rsc = ms->pmsa_rsc;	old_unat = old_regs->ar_unat;	copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);	copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);	copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);	copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);	copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);	copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);	copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);	copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);	copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);	copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);	copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);	if (ia64_psr(old_regs)->bn)		bank = ms->pmsa_bank1_gr;	else		bank = ms->pmsa_bank0_gr;	copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);	copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -