📄 traps.c
字号:
printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->dcache_index, info->dcache_tag, info->dcache_utag, info->dcache_stag); printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->dcache_data[0], info->dcache_data[1], info->dcache_data[2], info->dcache_data[3]); printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] " "u[%016lx] l[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->icache_index, info->icache_tag, info->icache_utag, info->icache_stag, info->icache_upper, info->icache_lower); printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[0], info->icache_data[1], info->icache_data[2], info->icache_data[3]); printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[4], info->icache_data[5], info->icache_data[6], info->icache_data[7]); printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->ecache_index, info->ecache_tag); printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->ecache_data[0], info->ecache_data[1], info->ecache_data[2], info->ecache_data[3]); afsr = (afsr & ~hipri) & cheetah_afsr_errors; while (afsr != 0UL) { unsigned long bit = cheetah_get_hipri(afsr); printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n", (recoverable ? KERN_WARNING : KERN_CRIT), bit, cheetah_get_string(bit)); afsr &= ~bit; } if (!recoverable) printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");}static int cheetah_recheck_errors(struct cheetah_err_info *logp){ unsigned long afsr, afar; int ret = 0; __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afsr) : "i" (ASI_AFSR)); if ((afsr & cheetah_afsr_errors) != 0) { if (logp != NULL) { __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afar) : "i" (ASI_AFAR)); logp->afsr = afsr; logp->afar = afar; } ret = 1; } __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync\n\t" : : "r" (afsr), "i" (ASI_AFSR)); return ret;}void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar){ struct cheetah_err_info local_snapshot, *p; int recoverable; /* Flush E-cache */ cheetah_flush_ecache(); p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Fast-ECC error trap.\n"); /* Flush E-cache to kick the error trap handlers out. */ cheetah_flush_ecache();}/* Try to fix a correctable error by pushing the line out from * the E-cache. Recheck error reporting registers to see if the * problem is intermittent. */static int cheetah_fix_ce(unsigned long physaddr){ unsigned long orig_estate; unsigned long alias1, alias2; int ret; /* Make sure correctable error traps are disabled. */ __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t" "andn %0, %1, %%g1\n\t" "stxa %%g1, [%%g0] %2\n\t" "membar #Sync" : "=&r" (orig_estate) : "i" (ESTATE_ERROR_CEEN), "i" (ASI_ESTATE_ERROR_EN) : "g1"); /* We calculate alias addresses that will force the * cache line in question out of the E-cache. Then * we bring it back in with an atomic instruction so * that we get it in some modified/exclusive state, * then we displace it again to try and get proper ECC * pushed back into the system. */ physaddr &= ~(8UL - 1UL); alias1 = (ecache_flush_physbase + (physaddr & ((ecache_flush_size >> 1) - 1))); alias2 = alias1 + (ecache_flush_size >> 1); __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "casxa [%2] %3, %%g0, %%g0\n\t" "membar #StoreLoad | #StoreStore\n\t" "ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "membar #Sync" : /* no outputs */ : "r" (alias1), "r" (alias2), "r" (physaddr), "i" (ASI_PHYS_USE_EC)); /* Did that trigger another error? */ if (cheetah_recheck_errors(NULL)) { /* Try one more time. */ __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t" "membar #Sync" : : "r" (physaddr), "i" (ASI_PHYS_USE_EC)); if (cheetah_recheck_errors(NULL)) ret = 2; else ret = 1; } else { /* No new error, intermittent problem. */ ret = 0; } /* Restore error enables. */ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN)); return ret;}/* Return non-zero if PADDR is a valid physical memory address. */static int cheetah_check_main_memory(unsigned long paddr){ int i; for (i = 0; ; i++) { if (sp_banks[i].num_bytes == 0) break; if (paddr >= sp_banks[i].base_addr && paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes)) return 1; } return 0;}void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar){ struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); if (is_memory && (afsr & CHAFSR_CE) != 0UL) { /* XXX Might want to log the results of this operation * XXX somewhere... -DaveM */ cheetah_fix_ce(afar); } { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_CPC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC) flush_line = 1; else flush_all = 1; } /* Trap handler only disabled I-cache, flush it. */ cheetah_flush_icache(); /* Re-enable I-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR */ (void) cheetah_recheck_errors(&local_snapshot); /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Correctable-ECC error trap.\n");}void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar){ struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory;#ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); (void) cheetah_recheck_errors(NULL); pci_poke_faulted = 1; regs->tpc += 4; regs->tnpc = regs->tpc + 4; return; }#endif p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDU) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_BERR) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR) flush_line = 1; else flush_all = 1; } cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I/D caches */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC | DCU_DC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); /* "Recoverable" here means we try to yank the page from ever * being newly used again. This depends upon a few things: * 1) Must be main memory, and AFAR must be valid. * 2) If we trapped from use, OK. * 3) Else, if we trapped from kernel we must find exception * table entry (ie. we have to have been accessing user * space). * * If AFAR is not in main memory, or we trapped from kernel * and cannot find an exception table entry, it is unacceptable * to try and continue.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -