📄 mca.c
字号:
local_irq_enable();#ifndef XEN /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);#else ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE); /* CPE error does not inform to dom0 but the following codes are reserved for future implementation *//* send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */#endif spin_lock(&cpe_history_lock); if (!cpe_poll_enabled && cpe_vector >= 0) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CPE_HISTORY_LENGTH; i++) { if (now - cpe_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); if (count >= CPE_HISTORY_LENGTH) { cpe_poll_enabled = 1; spin_unlock(&cpe_history_lock); disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); /* lock already released, get out now */ return IRQ_HANDLED; } else { cpe_history[index++] = now; if (index == CPE_HISTORY_LENGTH) index = 0; } } spin_unlock(&cpe_history_lock); return IRQ_HANDLED;}#endif /* CONFIG_ACPI */static voidshow_min_state (pal_min_state_area_t *minstate){ u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); printk("pr\t\t%016lx\n", minstate->pmsa_pr); printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); printk("b1\t\t%016lx ", minstate->pmsa_br1); print_symbol("%s\n", minstate->pmsa_br1); printk("\nstatic registers r0-r15:\n"); printk(" r0- 3 %016lx %016lx %016lx %016lx\n", 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); printk(" r4- 7 %016lx %016lx %016lx %016lx\n", minstate->pmsa_gr[3], minstate->pmsa_gr[4], minstate->pmsa_gr[5], minstate->pmsa_gr[6]); printk(" r8-11 %016lx %016lx %016lx %016lx\n", minstate->pmsa_gr[7], minstate->pmsa_gr[8], minstate->pmsa_gr[9], minstate->pmsa_gr[10]); printk("r12-15 %016lx %016lx %016lx %016lx\n", minstate->pmsa_gr[11], minstate->pmsa_gr[12], minstate->pmsa_gr[13], minstate->pmsa_gr[14]); printk("\nbank 0:\n"); printk("r16-19 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); printk("r20-23 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); printk("r24-27 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); printk("r28-31 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); printk("\nbank 1:\n"); printk("r16-19 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); printk("r20-23 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); printk("r24-27 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); printk("r28-31 %016lx %016lx %016lx %016lx\n", minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);}static voidfetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw){ u64 *dst_banked, *src_banked, bit, shift, nat_bits; int i; /* * First, update the pt-regs and switch-stack structures with the contents stored * in the min-state area: */ if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { pt->cr_ipsr = ms->pmsa_xpsr; pt->cr_iip = ms->pmsa_xip; pt->cr_ifs = ms->pmsa_xfs; } else { pt->cr_ipsr = ms->pmsa_ipsr; pt->cr_iip = ms->pmsa_iip; pt->cr_ifs = ms->pmsa_ifs; } pt->ar_rsc = ms->pmsa_rsc; pt->pr = ms->pmsa_pr; pt->r1 = ms->pmsa_gr[0]; pt->r2 = ms->pmsa_gr[1]; pt->r3 = ms->pmsa_gr[2]; sw->r4 = ms->pmsa_gr[3]; sw->r5 = ms->pmsa_gr[4]; sw->r6 = ms->pmsa_gr[5]; sw->r7 = ms->pmsa_gr[6]; pt->r8 = ms->pmsa_gr[7]; pt->r9 = ms->pmsa_gr[8]; pt->r10 = ms->pmsa_gr[9]; pt->r11 = ms->pmsa_gr[10]; pt->r12 = ms->pmsa_gr[11]; pt->r13 = ms->pmsa_gr[12]; pt->r14 = ms->pmsa_gr[13]; pt->r15 = ms->pmsa_gr[14]; dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ src_banked = ms->pmsa_bank1_gr; for (i = 0; i < 16; ++i) dst_banked[i] = src_banked[i]; pt->b0 = ms->pmsa_br0; sw->b1 = ms->pmsa_br1; /* construct the NaT bits for the pt-regs structure: */# define PUT_NAT_BIT(dst, addr) \ do { \ bit = nat_bits & 1; nat_bits >>= 1; \ shift = ((unsigned long) addr >> 3) & 0x3f; \ dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ } while (0) /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); PUT_NAT_BIT(sw->caller_unat, &pt->r1); PUT_NAT_BIT(sw->caller_unat, &pt->r2); PUT_NAT_BIT(sw->caller_unat, &pt->r3); PUT_NAT_BIT(sw->ar_unat, &sw->r4); PUT_NAT_BIT(sw->ar_unat, &sw->r5); PUT_NAT_BIT(sw->ar_unat, &sw->r6); PUT_NAT_BIT(sw->ar_unat, &sw->r7); PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); nat_bits >>= 16; /* skip over bank0 NaT bits */ PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);}#ifdef XENstatic spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;static atomic_t num_stopped_cpus = ATOMIC_INIT(0);extern void show_stack (struct task_struct *, unsigned long *);#define CPU_FLUSH_RETRY_MAX 5static voidinit_cache_flush (void){ unsigned long flags; int i; s64 rval = 0; u64 vector, progress = 0; for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) { local_irq_save(flags); rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA, 0, &progress, &vector); local_irq_restore(flags); if (rval == 0){ printk("\nPAL cache flush success\n"); return; } } printk("\nPAL cache flush failed. status=%ld\n",rval);}static void inlinesave_ksp (struct unw_frame_info *info){ current->arch._thread.ksp = (__u64)(info->sw) - 16; wmb(); init_cache_flush();} static voidfreeze_cpu_osinit (struct unw_frame_info *info, void *arg){ save_ksp(info); atomic_inc(&num_stopped_cpus); printk("%s: CPU%d init handler done\n", __FUNCTION__, smp_processor_id()); for (;;) local_irq_disable();}/* FIXME */static voidtry_crashdump(struct unw_frame_info *info, void *arg){ save_ksp(info); printk("\nINIT dump complete. Please reboot now.\n"); for (;;) local_irq_disable();}#endif /* XEN */static voidinit_handler_platform (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw){ struct unw_frame_info info; /* if a kernel debugger is available call it here else just dump the registers */ /* * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * generated via the BMC's command-line interface, but since the console is on the * same serial line, the user will need some time to switch out of the BMC before * the dump begins. */ printk("Delaying for 5 seconds...\n"); udelay(5*1000000);#ifdef XEN fetch_min_state(ms, pt, sw); spin_lock(&show_stack_lock);#endif show_min_state(ms);#ifdef XEN printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n", current->vcpu_id, current->domain->domain_id);#else printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); fetch_min_state(ms, pt, sw);#endif unw_init_from_interruption(&info, current, pt, sw); ia64_do_show_stack(&info, NULL);#ifdef XEN spin_unlock(&show_stack_lock); if (spin_trylock(&init_dump_lock)) { struct domain *d; struct vcpu *v;#ifdef CONFIG_SMP int other_cpus = num_online_cpus() - 1; int wait = 1000 * other_cpus; while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--) udelay(1000); if (other_cpus && wait < 0) printk("timeout %d\n", atomic_read(&num_stopped_cpus));#endif if (opt_noreboot) { /* this route is for dump routine */ unw_init_running(try_crashdump, pt); } else { rcu_read_lock(&domlist_read_lock); for_each_domain(d) { for_each_vcpu(d, v) { printk("Backtrace of current vcpu " "(vcpu_id %d of domid %d)\n", v->vcpu_id, d->domain_id); show_stack(v, NULL); } } rcu_read_unlock(&domlist_read_lock); } } unw_init_running(freeze_cpu_osinit, NULL);#else /* XEN */#ifdef CONFIG_SMP /* read_trylock() would be handy... */ if (!tasklist_lock.write_lock) read_lock(&tasklist_lock);#endif { struct task_struct *g, *t; do_each_thread (g, t) { if (t == current) continue; printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); show_stack(t, NULL); } while_each_thread (g, t); }#ifdef CONFIG_SMP if (!tasklist_lock.write_lock) read_unlock(&tasklist_lock);#endif printk("\nINIT dump complete. Please reboot now.\n");#endif /* XEN */ while (1); /* hang city if no debugger */}#ifdef CONFIG_ACPI/* * ia64_mca_register_cpev * * Register the corrected platform error vector with SAL. * * Inputs * cpev Corrected Platform Error Vector number * * Outputs * None */static voidia64_mca_register_cpev (int cpev){ /* Register the CPE interrupt vector with SAL */ struct ia64_sal_retval isrv; isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); if (isrv.status) { printk(KERN_ERR "Failed to register Corrected Platform " "Error interrupt vector with SAL (status %ld)\n", isrv.status); return; } IA64_MCA_DEBUG("%s: corrected platform error " "vector %#x registered\n", __FUNCTION__, cpev);}#endif /* CONFIG_ACPI */#endif /* PLATFORM_MCA_HANDLERS *//* * ia64_mca_cmc_vector_setup * * Setup the corrected machine check vector register in the processor. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) * This function is invoked on a per-processor basis. * * Inputs * None * * Outputs * None */voidia64_mca_cmc_vector_setup (void){ cmcv_reg_t cmcv; cmcv.cmcv_regval = 0; cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ cmcv.cmcv_vector = IA64_CMC_VECTOR; ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected " "machine check vector %#x registered.\n", __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR); IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));}/* * ia64_mca_cmc_vector_disable * * Mask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */static voidia64_mca_cmc_vector_disable (void *dummy){ cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected " "machine check vector %#x disabled.\n", __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_enable * * Unmask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */static voidia64_mca_cmc_vector_enable (void *dummy){ cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected " "machine check vector %#x enabled.\n", __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}#ifndef XEN/* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */static voidia64_mca_cmc_vector_disable_keventd(void *unused){ on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);}/* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */static voidia64_mca_cmc_vector_enable_keventd(void *unused){ on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);}#endif /* !XEN *//* * ia64_mca_wakeup_ipi_wait * * Wait for the inter-cpu interrupt to be sent by the * monarch processor once it is done with handling the * MCA. *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -