📄 mca.c
字号:
/* * C portion of the OS INIT handler * * Called from ia64_os_init_dispatch * * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for * this event. This code is used for both monarch and slave INIT events, see * sos->monarch. * * All INIT events switch to the INIT stack and change the previous process to * blocked status. If one of the INIT events is the monarch then we are * probably processing the nmi button/command. Use the monarch cpu to dump all * the processes. The slave INIT events all spin until the monarch cpu * returns. We can also get INIT slave events for MCA, in which case the MCA * process is the monarch. */voidia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, struct ia64_sal_os_state *sos){ static atomic_t slaves; static atomic_t monarchs; struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); sos->os_status = IA64_INIT_RESUME; /* FIXME: Workaround for broken proms that drive all INIT events as * slaves. The last slave that enters is promoted to be a monarch. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", __FUNCTION__, cpu); atomic_dec(&slaves); sos->monarch = 1; } /* FIXME: Workaround for broken proms that drive all INIT events as * monarchs. Second and subsequent monarchs are demoted to slaves. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", __FUNCTION__, cpu); atomic_dec(&monarchs); sos->monarch = 0; } if (!sos->monarch) { ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; while (monarch_cpu == -1) cpu_relax(); /* spin until monarch enters */ if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); mprintk("Slave on cpu %d returning to normal service.\n", cpu); set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; atomic_dec(&slaves); return; } monarch_cpu = cpu; if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); /* * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * generated via the BMC's command-line interface, but since the console is on the * same serial line, the user will need some time to switch out of the BMC before * the dump begins. */ mprintk("Delaying for 5 seconds...\n"); udelay(5*1000000); ia64_wait_for_slaves(cpu, "INIT"); /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through * to default_monarch_init_process() above and just print all the * tasks. */ if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); atomic_dec(&monarchs); set_curr_task(cpu, previous_current); monarch_cpu = -1; return;}static int __initia64_mca_disable_cpe_polling(char *str){ cpe_poll_enabled = 0; return 1;}__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, .flags = IRQF_DISABLED, .name = "cmc_hndlr"};static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, .flags = IRQF_DISABLED, .name = "cmc_poll"};static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, .flags = IRQF_DISABLED, .name = "mca_rdzv"};static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, .flags = IRQF_DISABLED, .name = "mca_wkup"};#ifdef CONFIG_ACPIstatic struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, .flags = IRQF_DISABLED, .name = "cpe_hndlr"};static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, .flags = IRQF_DISABLED, .name = "cpe_poll"};#endif /* CONFIG_ACPI *//* Minimal format of the MCA/INIT stacks. The pseudo processes that run on * these stacks can never sleep, they cannot return from the kernel to user * space, they do not appear in a normal ps listing. So there is no need to * format most of the fields. */static void __cpuinitformat_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu){ struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); struct thread_info *ti; memset(p, 0, KERNEL_STACK_SIZE); ti = task_thread_info(p); ti->flags = _TIF_MCA_INIT; ti->preempt_count = 1; ti->task = p; ti->cpu = cpu; p->stack = ti; p->state = TASK_UNINTERRUPTIBLE; cpu_set(cpu, p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); strncpy(p->comm, type, sizeof(p->comm)-1);}/* Caller prevents this from being called after init */static void * __init_refok mca_bootmem(void){ void *p; p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS + KERNEL_STACK_SIZE); return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);}/* Do per-CPU MCA-related initialization. */void __cpuinitia64_mca_cpu_init(void *cpu_data){ void *pal_vaddr; static int first_time = 1; if (first_time) { void *mca_data; int cpu; first_time = 0; mca_data = mca_bootmem(); for (cpu = 0; cpu < NR_CPUS; cpu++) { format_mca_init_stack(mca_data, offsetof(struct ia64_mca_cpu, mca_stack), "MCA", cpu); format_mca_init_stack(mca_data, offsetof(struct ia64_mca_cpu, init_stack), "INIT", cpu); __per_cpu_mca[cpu] = __pa(mca_data); mca_data += sizeof(struct ia64_mca_cpu); } } /* * The MCA info structure was allocated earlier and its * physical address saved in __per_cpu_mca[cpu]. Copy that * address * to ia64_mca_data so we can access it as a per-CPU * variable. */ __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; /* * Stash away a copy of the PTE needed to map the per-CPU page. * We may need it during MCA recovery. */ __get_cpu_var(ia64_mca_per_cpu_pte) = pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); /* * Also, stash away a copy of the PAL address and the PTE * needed to map it. */ pal_vaddr = efi_get_pal_addr(); if (!pal_vaddr) return; __get_cpu_var(ia64_mca_pal_base) = GRANULEROUNDDOWN((unsigned long) pal_vaddr); __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), PAGE_KERNEL));}static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy){ unsigned long flags; local_irq_save(flags); if (!cmc_polling_enabled) ia64_mca_cmc_vector_enable(NULL); local_irq_restore(flags);}static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu){ int hotcpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, NULL, 1, 0); break; } return NOTIFY_OK;}static struct notifier_block mca_cpu_notifier __cpuinitdata = { .notifier_call = mca_cpu_callback};/* * ia64_mca_init * * Do all the system level mca specific initialization. * * 1. Register spinloop and wakeup request interrupt vectors * * 2. Register OS_MCA handler entry point * * 3. Register OS_INIT handler entry point * * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. * * Note that this initialization is done very early before some kernel * services are available. * * Inputs : None * * Outputs : None */void __initia64_mca_init(void){ ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; s64 rc; struct ia64_sal_retval isrv; u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ static struct notifier_block default_init_monarch_nb = { .notifier_call = default_monarch_init_process, .priority = 0/* we need to notified last */ }; IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); /* Clear the Rendez checkin flag for all cpus */ for(i = 0 ; i < NR_CPUS; i++) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* * Register the rendezvous spinloop and wakeup mechanism with SAL */ /* Register the rendezvous interrupt vector with SAL */ while (1) { isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_RENDEZ_VECTOR, timeout, SAL_MC_PARAM_RZ_ALWAYS); rc = isrv.status; if (rc == 0) break; if (rc == -2) { printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " "with SAL (status %ld)\n", rc); return; } /* Register the wakeup interrupt vector with SAL */ isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_WAKEUP_VECTOR, 0, 0); rc = isrv.status; if (rc) { printk(KERN_ERR "Failed to register wakeup interrupt with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { printk(KERN_ERR "Failed to register OS MCA handler with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * size of the actual init handler in mca_asm.S. */ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, ia64_mc_info.imi_monarch_init_handler); /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " "(status %ld)\n", rc); return; } if (register_die_notifier(&default_init_monarch_nb)) { printk(KERN_ERR "Failed to register default monarch INIT process\n"); return; } IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). */ register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ /* Setup the MCA rendezvous interrupt vector */ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); /* Setup the MCA wakeup interrupt vector */ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);#ifdef CONFIG_ACPI /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);#endif /* Initialize the areas set aside by the OS to buffer the * platform/processor error states for MCA/INIT/CMC * handling. */ ia64_log_init(SAL_INFO_TYPE_MCA); ia64_log_init(SAL_INFO_TYPE_INIT); ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CPE); mca_init = 1; printk(KERN_INFO "MCA related initialization done\n");}/* * ia64_mca_late_init * * Opportunity to setup things that require initialization later * than ia64_mca_init. Setup a timer to poll for CPEs if the * platform doesn't support an interrupt driven mechanism. * * Inputs : None * Outputs : Status */static int __initia64_mca_late_init(void){ if (!mca_init) return 0; register_hotcpu_notifier(&mca_cpu_notifier); /* Setup the CMCI/P vector and handler */ init_timer(&cmc_poll_timer); cmc_poll_timer.function = ia64_mca_cmc_poll; /* Unmask/enable the vector */ cmc_polling_enabled = 0; schedule_work(&cmc_enable_work); IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);#ifdef CONFIG_ACPI /* Setup the CPEI/P vector and handler */ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); init_timer(&cpe_poll_timer); cpe_poll_timer.function = ia64_mca_cpe_poll; { irq_desc_t *desc; unsigned int irq; if (cpe_vector >= 0) { /* If platform supports CPEI, enable the irq. */ irq = local_vector_to_irq(cpe_vector); if (irq > 0) { cpe_poll_enabled = 0; desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; setup_irq(irq, &mca_cpe_irqaction); ia64_cpe_irq = irq; ia64_mca_register_cpev(cpe_vector); IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); return 0; } printk(KERN_ERR "%s: Failed to find irq for CPE " "interrupt handler, vector %d\n", __FUNCTION__, cpe_vector); } /* If platform doesn't support CPEI, get the timer going. */ if (cpe_poll_enabled) { ia64_mca_cpe_poll(0UL); IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); } }#endif return 0;}device_initcall(ia64_mca_late_init);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -