📄 mca.c
字号:
* make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); /* lock already released, get out now */ return; } else { cmc_history[index++] = now; if (index == CMC_HISTORY_LENGTH) index = 0; } } spin_unlock(&cmc_history_lock);}/* * ia64_mca_cmc_int_caller * * Triggered by sw interrupt from CMC polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * saved registers ptr * Outputs * None */static voidia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs){ static int start_count = -1; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs); for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* If no log record, switch out of polling mode */ if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); schedule_task(&cmc_enable_tq); cmc_polling_enabled = 0; } else { mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); } start_count = -1; }}/* * ia64_mca_cmc_poll * * Poll for Corrected Machine Checks (CMCs) * * Inputs : dummy(unused) * Outputs : None * */static voidia64_mca_cmc_poll (unsigned long dummy){ /* Trigger a CMC interrupt cascade */ platform_send_ipi(__ffs(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);}/* * ia64_mca_cpe_int_caller * * Triggered by sw interrupt from CPE polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * saved registers ptr * Outputs * None */static voidia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs){ static int start_count = -1; static int poll_time = MAX_CPE_POLL_INTERVAL; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs); for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* * If a log was recorded, increase our polling frequency, * otherwise, backoff. */ if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); } else { poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); } start_count = -1; mod_timer(&cpe_poll_timer, jiffies + poll_time); }}/* * ia64_mca_cpe_poll * * Poll for Corrected Platform Errors (CPEs), trigger interrupt * on first cpu, from there it will trickle through all the cpus. * * Inputs : dummy(unused) * Outputs : None * */static voidia64_mca_cpe_poll (unsigned long dummy){ /* Trigger a CPE interrupt cascade */ platform_send_ipi(__ffs(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);}/* * C portion of the OS INIT handler * * Called from ia64_monarch_init_handler * * Inputs: pointer to pt_regs where processor info was saved. * * Returns: * 0 if SAL must warm boot the System * 1 if SAL must return to interrupted context using PAL_MC_RESUME * */voidia64_init_handler (struct pt_regs *pt, struct switch_stack *sw){ pal_min_state_area_t *ms; oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", ia64_sal_to_os_handoff_state.proc_state_param); /* * Address of minstate area provided by PAL is physical, * uncacheable (bit 63 set). Convert to Linux virtual * address in region 6. */ ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); init_handler_platform(ms, pt, sw); /* call platform specific routines */}static int __initia64_mca_disable_cpe_polling(char *str){ cpe_poll_enabled = 0; return 1;}__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, .flags = SA_INTERRUPT, .name = "cmc_hndlr"};static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, .flags = SA_INTERRUPT, .name = "cmc_poll"};static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, .flags = SA_INTERRUPT, .name = "mca_rdzv"};static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, .flags = SA_INTERRUPT, .name = "mca_wkup"};#ifdef CONFIG_ACPIstatic struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, .flags = SA_INTERRUPT, .name = "cpe_hndlr"};static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, .flags = SA_INTERRUPT, .name = "cpe_poll"};#endif /* CONFIG_ACPI *//* * ia64_mca_init * * Do all the system level mca specific initialization. * * 1. Register spinloop and wakeup request interrupt vectors * * 2. Register OS_MCA handler entry point * * 3. Register OS_INIT handler entry point * * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. * * Note that this initialization is done very early before some kernel * services are available. * * Inputs : None * * Outputs : None */void __initia64_mca_init(void){ ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; s64 rc; struct ia64_sal_retval isrv; u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); INIT_TQUEUE(&cmc_disable_tq, ia64_mca_cmc_vector_disable_keventd, NULL); INIT_TQUEUE(&cmc_enable_tq, ia64_mca_cmc_vector_enable_keventd, NULL); /* Clear the Rendez checkin flag for all cpus */ for(i = 0 ; i < NR_CPUS; i++) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* * Register the rendezvous spinloop and wakeup mechanism with SAL */ /* Register the rendezvous interrupt vector with SAL */ while (1) { isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_RENDEZ_VECTOR, timeout, SAL_MC_PARAM_RZ_ALWAYS); rc = isrv.status; if (rc == 0) break; if (rc == -2) { printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " "with SAL (status %ld)\n", rc); return; } /* Register the wakeup interrupt vector with SAL */ isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_WAKEUP_VECTOR, 0, 0); rc = isrv.status; if (rc) { printk(KERN_ERR "Failed to register wakeup interrupt with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { printk(KERN_ERR "Failed to register OS MCA handler with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * size of the actual init handler in mca_asm.S. */ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, ia64_mc_info.imi_monarch_init_handler); /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, ia64_tpa(ia64_get_gp()), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, ia64_tpa(ia64_get_gp()), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). */ register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); ia64_mca_cmc_vector_setup(); /* Setup vector on BSP & enable */ /* Setup the MCA rendezvous interrupt vector */ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); /* Setup the MCA wakeup interrupt vector */ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);#ifdef CONFIG_ACPI /* Setup the CPE interrupt vector */ { irq_desc_t *desc; unsigned int irq; int cpev = acpi_request_vector(ACPI_INTERRUPT_CPEI); if (cpev >= 0) { for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == cpev) { desc = irq_desc(irq); desc->status |= IRQ_PER_CPU; desc->handler = &irq_type_iosapic_level; setup_irq(irq, &mca_cpe_irqaction); } ia64_mca_register_cpev(cpev); } }#endif /* Initialize the areas set aside by the OS to buffer the * platform/processor error states for MCA/INIT/CMC * handling. */ ia64_log_init(SAL_INFO_TYPE_MCA); ia64_log_init(SAL_INFO_TYPE_INIT); ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CPE); printk(KERN_INFO "MCA related initialization done\n");}/* * ia64_mca_late_init * * Opportunity to setup things that require initialization later * than ia64_mca_init. Setup a timer to poll for CPEs if the * platform doesn't support an interrupt driven mechanism. * * Inputs : None * Outputs : Status */static int __initia64_mca_late_init(void){ init_timer(&cmc_poll_timer); cmc_poll_timer.function = ia64_mca_cmc_poll; /* Reset to the correct state */ cmc_polling_enabled = 0; init_timer(&cpe_poll_timer); cpe_poll_timer.function = ia64_mca_cpe_poll;#ifdef CONFIG_ACPI /* If platform doesn't support CPEI, get the timer going. */ if (acpi_request_vector(ACPI_INTERRUPT_CPEI) < 0 && cpe_poll_enabled) { register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); ia64_mca_cpe_poll(0UL); }#endif return 0;}module_init(ia64_mca_late_init);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -