⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mca.c

📁 h内核
💻 C
📖 第 1 页 / 共 3 页
字号:
	ia64_do_show_stack(&info, NULL);#ifdef CONFIG_SMP	/* read_trylock() would be handy... */	if (!tasklist_lock.write_lock)		read_lock(&tasklist_lock);#endif	{		struct task_struct *g, *t;		do_each_thread (g, t) {			if (t == current)				continue;			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);			show_stack(t, NULL);		} while_each_thread (g, t);	}#ifdef CONFIG_SMP	if (!tasklist_lock.write_lock)		read_unlock(&tasklist_lock);#endif	printk("\nINIT dump complete.  Please reboot now.\n");	while (1);			/* hang city if no debugger */}#ifdef CONFIG_ACPI/* * ia64_mca_register_cpev * *  Register the corrected platform error vector with SAL. * *  Inputs *      cpev        Corrected Platform Error Vector number * *  Outputs *      None */static voidia64_mca_register_cpev (int cpev){	/* Register the CPE interrupt vector with SAL */	struct ia64_sal_retval isrv;	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);	if (isrv.status) {		printk(KERN_ERR "Failed to register Corrected Platform "		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);		return;	}	IA64_MCA_DEBUG("%s: corrected platform error "		       "vector %#x registered\n", __FUNCTION__, cpev);}#endif /* CONFIG_ACPI */#endif /* PLATFORM_MCA_HANDLERS *//* * ia64_mca_cmc_vector_setup * *  Setup the corrected machine check vector register in the processor. *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.) *  This function is invoked on a per-processor basis. * * Inputs *      None * * Outputs *	None */voidia64_mca_cmc_vector_setup (void){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval	= 0;	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */	cmcv.cmcv_vector	= IA64_CMC_VECTOR;	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x registered.\n",		       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",		       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));}/* * ia64_mca_cmc_vector_disable * *  Mask the corrected machine check vector register in the processor. *  This function is invoked on a per-processor basis. * * Inputs *      dummy(unused) * * Outputs *	None */static voidia64_mca_cmc_vector_disable (void *dummy){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x disabled.\n",		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_enable * *  Unmask the corrected machine check vector register in the processor. *  This function is invoked on a per-processor basis. * * Inputs *      dummy(unused) * * Outputs *	None */static voidia64_mca_cmc_vector_enable (void *dummy){	cmcv_reg_t	cmcv;	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);	IA64_MCA_DEBUG("%s: CPU %d corrected "		       "machine check vector %#x enabled.\n",		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */static voidia64_mca_cmc_vector_disable_keventd(void *unused){	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);}/* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */static voidia64_mca_cmc_vector_enable_keventd(void *unused){	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);}/* * ia64_mca_wakeup_ipi_wait * *	Wait for the inter-cpu interrupt to be sent by the *	monarch processor once it is done with handling the *	MCA. * *  Inputs  :   None *  Outputs :   None */static voidia64_mca_wakeup_ipi_wait(void){	int	irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);	int	irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);	u64	irr = 0;	do {		switch(irr_num) {		      case 0:			irr = ia64_getreg(_IA64_REG_CR_IRR0);			break;		      case 1:			irr = ia64_getreg(_IA64_REG_CR_IRR1);			break;		      case 2:			irr = ia64_getreg(_IA64_REG_CR_IRR2);			break;		      case 3:			irr = ia64_getreg(_IA64_REG_CR_IRR3);			break;		}		cpu_relax();	} while (!(irr & (1UL << irr_bit))) ;}/* * ia64_mca_wakeup * *	Send an inter-cpu interrupt to wake-up a particular cpu *	and mark that cpu to be out of rendez. * *  Inputs  :   cpuid *  Outputs :   None */static voidia64_mca_wakeup(int cpu){	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;}/* * ia64_mca_wakeup_all * *	Wakeup all the cpus which have rendez'ed previously. * *  Inputs  :   None *  Outputs :   None */static voidia64_mca_wakeup_all(void){	int cpu;	/* Clear the Rendez checkin flag for all cpus */	for(cpu = 0; cpu < NR_CPUS; cpu++) {		if (!cpu_online(cpu))			continue;		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)			ia64_mca_wakeup(cpu);	}}/* * ia64_mca_rendez_interrupt_handler * *	This is handler used to put slave processors into spinloop *	while the monarch processor does the mca handling and later *	wake each slave up once the monarch is done. * *  Inputs  :   None *  Outputs :   None */static irqreturn_tia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs){	unsigned long flags;	int cpu = smp_processor_id();	/* Mask all interrupts */	local_irq_save(flags);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;	/* Register with the SAL monarch that the slave has	 * reached SAL	 */	ia64_sal_mc_rendez();	/* Wait for the wakeup IPI from the monarch	 * This waiting is done by polling on the wakeup-interrupt	 * vector bit in the processor's IRRs	 */	ia64_mca_wakeup_ipi_wait();	/* Enable all interrupts */	local_irq_restore(flags);	return IRQ_HANDLED;}/* * ia64_mca_wakeup_int_handler * *	The interrupt handler for processing the inter-cpu interrupt to the *	slave cpu which was spinning in the rendez loop. *	Since this spinning is done by turning off the interrupts and *	polling on the wakeup-interrupt bit in the IRR, there is *	nothing useful to be done in the handler. * *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit) *	arg		(Interrupt handler specific argument) *	ptregs		(Exception frame at the time of the interrupt) *  Outputs :   None * */static irqreturn_tia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs){	return IRQ_HANDLED;}/* * ia64_return_to_sal_check * *	This is function called before going back from the OS_MCA handler *	to the OS_MCA dispatch code which finally takes the control back *	to the SAL. *	The main purpose of this routine is to setup the OS_MCA to SAL *	return state which can be used by the OS_MCA dispatch code *	just before going back to SAL. * *  Inputs  :   None *  Outputs :   None */static voidia64_return_to_sal_check(int recover){	/* Copy over some relevant stuff from the sal_to_os_mca_handoff	 * so that it can be used at the time of os_mca_to_sal_handoff	 */	ia64_os_to_sal_handoff_state.imots_sal_gp =		ia64_sal_to_os_handoff_state.imsto_sal_gp;	ia64_os_to_sal_handoff_state.imots_sal_check_ra =		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;	if (recover)		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;	else		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;	/* Default = tell SAL to return to same context */	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;	ia64_os_to_sal_handoff_state.imots_new_min_state =		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;}/* Function pointer for extra MCA recovery */int (*ia64_mca_ucmc_extension)	(void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)	= NULL;intia64_reg_MCA_extension(void *fn){	if (ia64_mca_ucmc_extension)		return 1;	ia64_mca_ucmc_extension = fn;	return 0;}voidia64_unreg_MCA_extension(void){	if (ia64_mca_ucmc_extension)		ia64_mca_ucmc_extension = NULL;}EXPORT_SYMBOL(ia64_reg_MCA_extension);EXPORT_SYMBOL(ia64_unreg_MCA_extension);/* * ia64_mca_ucmc_handler * *	This is uncorrectable machine check handler called from OS_MCA *	dispatch code which is in turn called from SAL_CHECK(). *	This is the place where the core of OS MCA handling is done. *	Right now the logs are extracted and displayed in a well-defined *	format. This handler code is supposed to be run only on the *	monarch processor. Once the monarch is done with MCA handling *	further MCA logging is enabled by clearing logs. *	Monarch also has the duty of sending wakeup-IPIs to pull the *	slave processors out of rendezvous spinloop. * *  Inputs  :   None *  Outputs :   None */voidia64_mca_ucmc_handler(void){	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)		&ia64_sal_to_os_handoff_state.proc_state_param;	int recover; 	/* Get the MCA error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);	/* TLB error is only exist in this SAL error record */	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))	/* other error recovery */	   || (ia64_mca_ucmc_extension 		&& ia64_mca_ucmc_extension(			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),			&ia64_sal_to_os_handoff_state,			&ia64_os_to_sal_handoff_state)); 	if (recover) {		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);		rh->severity = sal_log_severity_corrected;		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);	}	/*	 *  Wakeup all the processors which are spinning in the rendezvous	 *  loop.	 */	ia64_mca_wakeup_all();	/* Return to SAL */	ia64_return_to_sal_check(recover);}static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);/* * ia64_mca_cmc_int_handler * *  This is corrected machine check interrupt handler. *	Right now the logs are extracted and displayed in a well-defined *	format. * * Inputs *      interrupt number *      client data arg ptr *      saved registers ptr * * Outputs *	None */static irqreturn_tia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs){	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];	static int		index;	static DEFINE_SPINLOCK(cmc_history_lock);	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",		       __FUNCTION__, cmc_irq, smp_processor_id());	/* SAL spec states this should run w/ interrupts enabled */	local_irq_enable();	/* Get the CMC error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);	spin_lock(&cmc_history_lock);	if (!cmc_polling_enabled) {		int i, count = 1; /* we know 1 happened now */		unsigned long now = jiffies;		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {			if (now - cmc_history[i] <= HZ)				count++;		}		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);		if (count >= CMC_HISTORY_LENGTH) {			cmc_polling_enabled = 1;			spin_unlock(&cmc_history_lock);			schedule_work(&cmc_disable_work);			/*			 * Corrected errors will still be corrected, but			 * make sure there's a log somewhere that indicates			 * something is generating more than we can handle.			 */			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);			/* lock already released, get out now */			return IRQ_HANDLED;		} else {			cmc_history[index++] = now;			if (index == CMC_HISTORY_LENGTH)				index = 0;		}	}	spin_unlock(&cmc_history_lock);	return IRQ_HANDLED;}/* *  ia64_mca_cmc_int_caller * * 	Triggered by sw interrupt from CMC polling routine.  Calls

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -