📄 mca.c
字号:
*/ if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); } else if (cpe_vector < 0) { poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); } else { poll_time = MIN_CPE_POLL_INTERVAL; printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); cpe_poll_enabled = 0; } if (cpe_poll_enabled) mod_timer(&cpe_poll_timer, jiffies + poll_time); start_count = -1; } return IRQ_HANDLED;}/* * ia64_mca_cpe_poll * * Poll for Corrected Platform Errors (CPEs), trigger interrupt * on first cpu, from there it will trickle through all the cpus. * * Inputs : dummy(unused) * Outputs : None * */static void#ifndef XENia64_mca_cpe_poll (unsigned long dummy)#elseia64_mca_cpe_poll (void *dummy)#endif{ /* Trigger a CPE interrupt cascade */ platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);}#endif /* CONFIG_ACPI *//* * C portion of the OS INIT handler * * Called from ia64_monarch_init_handler * * Inputs: pointer to pt_regs where processor info was saved. * * Returns: * 0 if SAL must warm boot the System * 1 if SAL must return to interrupted context using PAL_MC_RESUME * */voidia64_init_handler (struct pt_regs *pt, struct switch_stack *sw){ pal_min_state_area_t *ms;#ifndef XEN oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ console_loglevel = 15; /* make sure printks make it to console */ printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", ia64_sal_to_os_handoff_state.proc_state_param); /* * Address of minstate area provided by PAL is physical, * uncacheable (bit 63 set). Convert to Linux virtual * address in region 6. */ ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));#else int cpu = smp_processor_id(); console_start_sync(); printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", ia64_sal_to_os_handoff_state[cpu].proc_state_param); /* Xen virtual address in region 7. */ ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));#endif init_handler_platform(ms, pt, sw); /* call platform specific routines */}static int __initia64_mca_disable_cpe_polling(char *str){ cpe_poll_enabled = 0; return 1;}__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "cmc_hndlr"};static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "cmc_poll"};static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "mca_rdzv"};static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "mca_wkup"};#ifdef CONFIG_ACPIstatic struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "cpe_hndlr"};static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller,#ifndef XEN .flags = SA_INTERRUPT,#endif .name = "cpe_poll"};#endif /* CONFIG_ACPI *//* Do per-CPU MCA-related initialization. */void __devinitia64_mca_cpu_init(void *cpu_data){ void *pal_vaddr; if (smp_processor_id() == 0) { void *mca_data; int cpu;#ifdef XEN unsigned int pageorder; pageorder = get_order_from_bytes(sizeof(struct ia64_mca_cpu));#else mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS);#endif for (cpu = 0; cpu < NR_CPUS; cpu++) {#ifdef XEN struct page_info *page; page = alloc_domheap_pages(NULL, pageorder, 0); mca_data = page? page_to_virt(page): NULL; __per_cpu_mca[cpu] = __pa(mca_data); IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx" "(mca_data[%d]=%lx)\n", __FUNCTION__, cpu, __per_cpu_mca[cpu], cpu, (u64)mca_data);#else __per_cpu_mca[cpu] = __pa(mca_data); mca_data += sizeof(struct ia64_mca_cpu);#endif } }#ifdef XEN else if (sal_queue) { int i; for (i = 0; i < IA64_MAX_LOG_TYPES; i++) ia64_log_queue(i, 0); }#endif /* * The MCA info structure was allocated earlier and its * physical address saved in __per_cpu_mca[cpu]. Copy that * address * to ia64_mca_data so we can access it as a per-CPU * variable. */ __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];#ifdef XEN IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__, smp_processor_id(), __get_cpu_var(ia64_mca_data)); /* sal_to_os_handoff for smp support */ __get_cpu_var(ia64_sal_to_os_handoff_state_addr) = __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]); IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__, smp_processor_id(), __get_cpu_var(ia64_sal_to_os_handoff_state_addr));#endif /* * Stash away a copy of the PTE needed to map the per-CPU page. * We may need it during MCA recovery. */ __get_cpu_var(ia64_mca_per_cpu_pte) = pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); /* * Also, stash away a copy of the PAL address and the PTE * needed to map it. */ pal_vaddr = efi_get_pal_addr(); if (!pal_vaddr) return; __get_cpu_var(ia64_mca_pal_base) = GRANULEROUNDDOWN((unsigned long) pal_vaddr); __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), PAGE_KERNEL));}/* * ia64_mca_init * * Do all the system level mca specific initialization. * * 1. Register spinloop and wakeup request interrupt vectors * * 2. Register OS_MCA handler entry point * * 3. Register OS_INIT handler entry point * * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. * * Note that this initialization is done very early before some kernel * services are available. * * Inputs : None * * Outputs : None */void __initia64_mca_init(void){ ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; s64 rc; struct ia64_sal_retval isrv; u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */#ifdef XEN slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;#endif IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); /* Clear the Rendez checkin flag for all cpus */ for(i = 0 ; i < NR_CPUS; i++) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* * Register the rendezvous spinloop and wakeup mechanism with SAL */ /* Register the rendezvous interrupt vector with SAL */ while (1) { isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_RENDEZ_VECTOR, timeout, SAL_MC_PARAM_RZ_ALWAYS); rc = isrv.status; if (rc == 0) break; if (rc == -2) { printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " "with SAL (status %ld)\n", rc); return; } /* Register the wakeup interrupt vector with SAL */ isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_WAKEUP_VECTOR, 0, 0); rc = isrv.status; if (rc) { printk(KERN_ERR "Failed to register wakeup interrupt with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { printk(KERN_ERR "Failed to register OS MCA handler with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * size of the actual init handler in mca_asm.S. */ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, ia64_mc_info.imi_monarch_init_handler); /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). */ register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ /* Setup the MCA rendezvous interrupt vector */ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); /* Setup the MCA wakeup interrupt vector */ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);#ifdef CONFIG_ACPI /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);#endif /* Initialize the areas set aside by the OS to buffer the * platform/processor error states for MCA/INIT/CMC * handling. */ ia64_log_init(SAL_INFO_TYPE_MCA); ia64_log_init(SAL_INFO_TYPE_INIT); ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CPE);#ifdef XEN INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_MCA]); INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_INIT]); INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CMC]); INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CPE]); /* NULL sal_queue used elsewhere to determine MCA init state */ sal_queue = sal_log_queues; open_softirq(CMC_DISABLE_SOFTIRQ, (softirq_handler)ia64_mca_cmc_vector_disable); open_softirq(CMC_ENABLE_SOFTIRQ, (softirq_handler)ia64_mca_cmc_vector_enable); for (i = 0; i < IA64_MAX_LOG_TYPES; i++) ia64_log_queue(i, 0);#endif mca_init = 1; printk(KERN_INFO "MCA related initialization done\n");}/* * ia64_mca_late_init * * Opportunity to setup things that require initialization later * than ia64_mca_init. Setup a timer to poll for CPEs if the * platform doesn't support an interrupt driven mechanism. * * Inputs : None * Outputs : Status */static int __initia64_mca_late_init(void){ if (!mca_init) return 0; /* Setup the CMCI/P vector and handler */#ifndef XEN init_timer(&cmc_poll_timer); cmc_poll_timer.function = ia64_mca_cmc_poll;#else init_timer(&cmc_poll_timer, ia64_mca_cmc_poll, NULL, smp_processor_id());#endif /* Unmask/enable the vector */ cmc_polling_enabled = 0;#ifndef XEN /* XXX FIXME */ schedule_work(&cmc_enable_work);#else cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);#endif IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);#ifdef CONFIG_ACPI /* Setup the CPEI/P vector and handler */ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);#ifndef XEN init_timer(&cpe_poll_timer); cpe_poll_timer.function = ia64_mca_cpe_poll;#else init_timer(&cpe_poll_timer, ia64_mca_cpe_poll, NULL,smp_processor_id());#endif { irq_desc_t *desc; unsigned int irq; if (cpe_vector >= 0) { /* If platform supports CPEI, enable the irq. */ cpe_poll_enabled = 0; for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == cpe_vector) { desc = irq_descp(irq); desc->status |= IRQ_PER_CPU; setup_irq(irq, &mca_cpe_irqaction); } ia64_mca_register_cpev(cpe_vector); IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); } else { /* If platform doesn't support CPEI, get the timer going. */ if (cpe_poll_enabled) { ia64_mca_cpe_poll(0UL); IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); } } }#endif return 0;}#ifndef XENdevice_initcall(ia64_mca_late_init);#else__initcall(ia64_mca_late_init);#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -