📄 mca.c
字号:
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected " "machine check vector %#x disabled.\n", __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_enable * * Unmask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */static voidia64_mca_cmc_vector_enable (void *dummy){ cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected " "machine check vector %#x enabled.\n", __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);}/* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */static voidia64_mca_cmc_vector_disable_keventd(void *unused){ on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);}/* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */static voidia64_mca_cmc_vector_enable_keventd(void *unused){ on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);}/* * ia64_mca_wakeup * * Send an inter-cpu interrupt to wake-up a particular cpu * and mark that cpu to be out of rendez. * * Inputs : cpuid * Outputs : None */static voidia64_mca_wakeup(int cpu){ platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;}/* * ia64_mca_wakeup_all * * Wakeup all the cpus which have rendez'ed previously. * * Inputs : None * Outputs : None */static voidia64_mca_wakeup_all(void){ int cpu; /* Clear the Rendez checkin flag for all cpus */ for_each_online_cpu(cpu) { if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) ia64_mca_wakeup(cpu); }}/* * ia64_mca_rendez_interrupt_handler * * This is handler used to put slave processors into spinloop * while the monarch processor does the mca handling and later * wake each slave up once the monarch is done. * * Inputs : None * Outputs : None */static irqreturn_tia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs){ unsigned long flags; int cpu = smp_processor_id(); /* Mask all interrupts */ local_irq_save(flags); if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; /* Register with the SAL monarch that the slave has * reached SAL */ ia64_sal_mc_rendez(); if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); /* Wait for the monarch cpu to exit. */ while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); /* Enable all interrupts */ local_irq_restore(flags); return IRQ_HANDLED;}/* * ia64_mca_wakeup_int_handler * * The interrupt handler for processing the inter-cpu interrupt to the * slave cpu which was spinning in the rendez loop. * Since this spinning is done by turning off the interrupts and * polling on the wakeup-interrupt bit in the IRR, there is * nothing useful to be done in the handler. * * Inputs : wakeup_irq (Wakeup-interrupt bit) * arg (Interrupt handler specific argument) * ptregs (Exception frame at the time of the interrupt) * Outputs : None * */static irqreturn_tia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs){ return IRQ_HANDLED;}/* Function pointer for extra MCA recovery */int (*ia64_mca_ucmc_extension) (void*,struct ia64_sal_os_state*) = NULL;intia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)){ if (ia64_mca_ucmc_extension) return 1; ia64_mca_ucmc_extension = fn; return 0;}voidia64_unreg_MCA_extension(void){ if (ia64_mca_ucmc_extension) ia64_mca_ucmc_extension = NULL;}EXPORT_SYMBOL(ia64_reg_MCA_extension);EXPORT_SYMBOL(ia64_unreg_MCA_extension);static inline voidcopy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat){ u64 fslot, tslot, nat; *tr = *fr; fslot = ((unsigned long)fr >> 3) & 63; tslot = ((unsigned long)tr >> 3) & 63; *tnat &= ~(1UL << tslot); nat = (fnat >> fslot) & 1; *tnat |= (nat << tslot);}/* On entry to this routine, we are running on the per cpu stack, see * mca_asm.h. The original stack has not been touched by this event. Some of * the original stack's registers will be in the RBS on this stack. This stack * also contains a partial pt_regs and switch_stack, the rest of the data is in * PAL minstate. * * The first thing to do is modify the original stack to look like a blocked * task so we can run backtrace on the original task. Also mark the per cpu * stack as current to ensure that we use the correct task state, it also means * that we can do backtrace on the MCA/INIT handler code itself. */static task_t *ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, const char *type){ char *p, comm[sizeof(current->comm)]; ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const pal_min_state_area_t *ms = sos->pal_min_state; task_t *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + sizeof(struct switch_stack) + 16; u64 *old_bspstore, *old_bsp; u64 *new_bspstore, *new_bsp; u64 old_unat, old_rnat, new_rnat, nat; u64 slots, loadrs = regs->loadrs; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); const u64 *bank; const char *msg; int cpu = smp_processor_id(); previous_current = curr_task(cpu); set_curr_task(cpu, current); if ((p = strchr(current->comm, ' '))) *p = '\0'; /* Best effort attempt to cope with MCA/INIT delivered while in * physical mode. */ regs->cr_ipsr = ms->pmsa_ipsr; if (ia64_psr(regs)->dt == 0) { va.l = r12; if (va.f.reg == 0) { va.f.reg = 7; r12 = va.l; } va.l = r13; if (va.f.reg == 0) { va.f.reg = 7; r13 = va.l; } } if (ia64_psr(regs)->rt == 0) { va.l = ar_bspstore; if (va.f.reg == 0) { va.f.reg = 7; ar_bspstore = va.l; } va.l = ar_bsp; if (va.f.reg == 0) { va.f.reg = 7; ar_bsp = va.l; } } /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers * have been copied to the old stack, the old stack may fail the * validation tests below. So ia64_old_stack() must restore the dirty * registers from the new stack. The old and new bspstore probably * have different alignments, so loadrs calculated on the old bsp * cannot be used to restore from the new bsp. Calculate a suitable * loadrs for the new stack and save it in the new pt_regs, where * ia64_old_stack() can get it. */ old_bspstore = (u64 *)ar_bspstore; old_bsp = (u64 *)ar_bsp; slots = ia64_rse_num_regs(old_bspstore, old_bsp); new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); new_bsp = ia64_rse_skip_regs(new_bspstore, slots); regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; /* Verify the previous stack state before we change it */ if (user_mode(regs)) { msg = "occurred in user space"; goto no_mod; } if (r13 != sos->prev_IA64_KR_CURRENT) { msg = "inconsistent previous current and r13"; goto no_mod; } if ((r12 - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent r12 and r13"; goto no_mod; } if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bspstore and r13"; goto no_mod; } va.p = old_bspstore; if (va.f.reg < 5) { msg = "old_bspstore is in the wrong region"; goto no_mod; } if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bsp and r13"; goto no_mod; } size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; if (ar_bspstore + size > r12) { msg = "no room for blocked state"; goto no_mod; } /* Change the comm field on the MCA/INT task to include the pid that * was interrupted, it makes for easier debugging. If that pid was 0 * (swapper or nested MCA/INIT) then use the start of the previous comm * field suffixed with its cpu. */ if (previous_current->pid) snprintf(comm, sizeof(comm), "%s %d", current->comm, previous_current->pid); else { int l; if ((p = strchr(previous_current->comm, ' '))) l = p - previous_current->comm; else l = strlen(previous_current->comm); snprintf(comm, sizeof(comm), "%s %*s %d", current->comm, l, previous_current->comm, task_thread_info(previous_current)->cpu); } memcpy(current->comm, comm, sizeof(current->comm)); /* Make the original task look blocked. First stack a struct pt_regs, * describing the state at the time of interrupt. mca_asm.S built a * partial pt_regs, copy it and fill in the blanks using minstate. */ p = (char *)r12 - sizeof(*regs); old_regs = (struct pt_regs *)p; memcpy(old_regs, regs, sizeof(*regs)); /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use * pmsa_{xip,xpsr,xfs} */ if (ia64_psr(regs)->ic) { old_regs->cr_iip = ms->pmsa_iip; old_regs->cr_ipsr = ms->pmsa_ipsr; old_regs->cr_ifs = ms->pmsa_ifs; } else { old_regs->cr_iip = ms->pmsa_xip; old_regs->cr_ipsr = ms->pmsa_xpsr; old_regs->cr_ifs = ms->pmsa_xfs; } old_regs->pr = ms->pmsa_pr; old_regs->b0 = ms->pmsa_br0; old_regs->loadrs = loadrs; old_regs->ar_rsc = ms->pmsa_rsc; old_unat = old_regs->ar_unat; copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); if (ia64_psr(old_regs)->bn) bank = ms->pmsa_bank1_gr; else bank = ms->pmsa_bank0_gr; copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and * minstate. * * In the synthesized switch_stack, b0 points to ia64_leave_kernel, * ar.pfs is set to 0. * * unwind.c::unw_unwind() does special processing for interrupt frames. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not * that this is documented, of course. Set PRED_NON_SYSCALL in the * switch_stack on the original stack so it will unwind correctly when * unwind.c reads pt_regs. * * thread.ksp is updated to point to the synthesized switch_stack. */ p -= sizeof(struct switch_stack); old_sw = (struct switch_stack *)p; memcpy(old_sw, sw, sizeof(*sw)); old_sw->caller_unat = old_unat; old_sw->ar_fpsr = old_regs->ar_fpsr; copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); old_sw->b0 = (u64)ia64_leave_kernel; old_sw->b1 = ms->pmsa_br1; old_sw->ar_pfs = 0; old_sw->ar_unat = old_unat; old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); previous_current->thread.ksp = (u64)p - 16; /* Finally copy the original stack's registers back to its RBS. * Registers from ar.bspstore through ar.bsp at the time of the event
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -