📄 mca.c
字号:
* Inputs : None * Outputs : None */static voidia64_mca_wakeup_ipi_wait(void){ int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); u64 irr = 0; do { switch(irr_num) { case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; } cpu_relax(); } while (!(irr & (1UL << irr_bit))) ;}/* * ia64_mca_wakeup * * Send an inter-cpu interrupt to wake-up a particular cpu * and mark that cpu to be out of rendez. * * Inputs : cpuid * Outputs : None */static voidia64_mca_wakeup(int cpu){ platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;}/* * ia64_mca_wakeup_all * * Wakeup all the cpus which have rendez'ed previously. * * Inputs : None * Outputs : None */static voidia64_mca_wakeup_all(void){ int cpu; /* Clear the Rendez checkin flag for all cpus */ for(cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu)) continue; if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) ia64_mca_wakeup(cpu); }}/* * ia64_mca_rendez_interrupt_handler * * This is handler used to put slave processors into spinloop * while the monarch processor does the mca handling and later * wake each slave up once the monarch is done. * * Inputs : None * Outputs : None */static irqreturn_tia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs){ unsigned long flags; int cpu = smp_processor_id(); /* Mask all interrupts */ local_irq_save(flags); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; /* Register with the SAL monarch that the slave has * reached SAL */ ia64_sal_mc_rendez(); /* Wait for the wakeup IPI from the monarch * This waiting is done by polling on the wakeup-interrupt * vector bit in the processor's IRRs */ ia64_mca_wakeup_ipi_wait(); /* Enable all interrupts */ local_irq_restore(flags); return IRQ_HANDLED;}/* * ia64_mca_wakeup_int_handler * * The interrupt handler for processing the inter-cpu interrupt to the * slave cpu which was spinning in the rendez loop. * Since this spinning is done by turning off the interrupts and * polling on the wakeup-interrupt bit in the IRR, there is * nothing useful to be done in the handler. * * Inputs : wakeup_irq (Wakeup-interrupt bit) * arg (Interrupt handler specific argument) * ptregs (Exception frame at the time of the interrupt) * Outputs : None * */static irqreturn_tia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs){ return IRQ_HANDLED;}/* * ia64_return_to_sal_check * * This is function called before going back from the OS_MCA handler * to the OS_MCA dispatch code which finally takes the control back * to the SAL. * The main purpose of this routine is to setup the OS_MCA to SAL * return state which can be used by the OS_MCA dispatch code * just before going back to SAL. * * Inputs : None * Outputs : None */static voidia64_return_to_sal_check(int recover){#ifdef XEN int cpu = smp_processor_id();#endif /* Copy over some relevant stuff from the sal_to_os_mca_handoff * so that it can be used at the time of os_mca_to_sal_handoff */#ifdef XEN ia64_os_to_sal_handoff_state.imots_sal_gp = ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp; ia64_os_to_sal_handoff_state.imots_sal_check_ra = ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;#else ia64_os_to_sal_handoff_state.imots_sal_gp = ia64_sal_to_os_handoff_state.imsto_sal_gp; ia64_os_to_sal_handoff_state.imots_sal_check_ra = ia64_sal_to_os_handoff_state.imsto_sal_check_ra;#endif if (recover) ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; else ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; /* Default = tell SAL to return to same context */ ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;#ifdef XEN ia64_os_to_sal_handoff_state.imots_new_min_state = (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;#else ia64_os_to_sal_handoff_state.imots_new_min_state = (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;#endif}/* Function pointer for extra MCA recovery */int (*ia64_mca_ucmc_extension) (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) = NULL;intia64_reg_MCA_extension(void *fn){ if (ia64_mca_ucmc_extension) return 1; ia64_mca_ucmc_extension = fn; return 0;}voidia64_unreg_MCA_extension(void){ if (ia64_mca_ucmc_extension) ia64_mca_ucmc_extension = NULL;}EXPORT_SYMBOL(ia64_reg_MCA_extension);EXPORT_SYMBOL(ia64_unreg_MCA_extension);/* * ia64_mca_ucmc_handler * * This is uncorrectable machine check handler called from OS_MCA * dispatch code which is in turn called from SAL_CHECK(). * This is the place where the core of OS MCA handling is done. * Right now the logs are extracted and displayed in a well-defined * format. This handler code is supposed to be run only on the * monarch processor. Once the monarch is done with MCA handling * further MCA logging is enabled by clearing logs. * Monarch also has the duty of sending wakeup-IPIs to pull the * slave processors out of rendezvous spinloop. * * Inputs : None * Outputs : None */voidia64_mca_ucmc_handler(void){#ifdef XEN int cpu = smp_processor_id(); pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &ia64_sal_to_os_handoff_state[cpu].proc_state_param;#else pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &ia64_sal_to_os_handoff_state.proc_state_param;#endif int recover; #ifndef XEN /* Get the MCA error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);#else ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC); send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);#endif /* TLB error is only exist in this SAL error record */ recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) /* other error recovery */#ifndef XEN || (ia64_mca_ucmc_extension && ia64_mca_ucmc_extension( IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), &ia64_sal_to_os_handoff_state, &ia64_os_to_sal_handoff_state)); #else ;#endif#ifndef XEN if (recover) { sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); rh->severity = sal_log_severity_corrected; ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); }#endif /* * Wakeup all the processors which are spinning in the rendezvous * loop. */ ia64_mca_wakeup_all(); /* Return to SAL */ ia64_return_to_sal_check(recover);}#ifndef XENstatic DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);#endif/* * ia64_mca_cmc_int_handler * * This is corrected machine check interrupt handler. * Right now the logs are extracted and displayed in a well-defined * format. * * Inputs * interrupt number * client data arg ptr * saved registers ptr * * Outputs * None */static irqreturn_tia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs){ static unsigned long cmc_history[CMC_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cmc_history_lock); IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __FUNCTION__, cmc_irq, smp_processor_id()); /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable();#ifndef XEN /* Get the CMC error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);#else ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC); send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);#endif spin_lock(&cmc_history_lock); if (!cmc_polling_enabled) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CMC_HISTORY_LENGTH; i++) { if (now - cmc_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); if (count >= CMC_HISTORY_LENGTH) { cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock);#ifndef XEN /* XXX FIXME */ schedule_work(&cmc_disable_work);#else cpumask_raise_softirq(cpu_online_map, CMC_DISABLE_SOFTIRQ);#endif /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); /* lock already released, get out now */ return IRQ_HANDLED; } else { cmc_history[index++] = now; if (index == CMC_HISTORY_LENGTH) index = 0; } } spin_unlock(&cmc_history_lock); return IRQ_HANDLED;}/* * ia64_mca_cmc_int_caller * * Triggered by sw interrupt from CMC polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * saved registers ptr * Outputs * handled */static irqreturn_tia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs){ static int start_count = -1; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);#ifndef XEN ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);#else IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n", __FUNCTION__, cmc_irq, smp_processor_id()); ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);#endif for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* If no log record, switch out of polling mode */ if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");#ifndef XEN /* XXX FIXME */ schedule_work(&cmc_enable_work);#else cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);#endif cmc_polling_enabled = 0; } else { mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); } start_count = -1; } return IRQ_HANDLED;}/* * ia64_mca_cmc_poll * * Poll for Corrected Machine Checks (CMCs) * * Inputs : dummy(unused) * Outputs : None * */static void#ifndef XENia64_mca_cmc_poll (unsigned long dummy)#elseia64_mca_cmc_poll (void *dummy)#endif{ /* Trigger a CMC interrupt cascade */ platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);}/* * ia64_mca_cpe_int_caller * * Triggered by sw interrupt from CPE polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * saved registers ptr * Outputs * handled */#ifdef CONFIG_ACPIstatic irqreturn_tia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs){ static int start_count = -1;#ifdef XEN static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;#else static int poll_time = MIN_CPE_POLL_INTERVAL;#endif unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);#ifndef XEN ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);#else IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n", __FUNCTION__, cpe_irq, smp_processor_id()); ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);#endif for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* * If a log was recorded, increase our polling frequency, * otherwise, backoff or return to interrupt mode.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -