📄 excarchlib.c
字号:
pExcInfo->instrReg = ((ESF68K_BA *)pEsf)->ir; size = sizeof (ESF68K_BA); } else { pExcInfo->valid |= EXC_PC | EXC_STATUS_REG; pExcInfo->pc = ((ESF68K *)pEsf)->pc; pExcInfo->statusReg = ((ESF68K *)pEsf)->sr; size = sizeof (ESF68K); }#else /* (CPU==MC680[12346]0 || CPU==CPU32) */ /* switch on ESF type */ switch ((pEsf->vectorOffset & 0xf000) >> 12) { case 0: case 1: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG; pExcInfo->pc = pEsf->pc; pExcInfo->statusReg = pEsf->sr; size = (sizeof (ESF0)); break; case 2: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR; pExcInfo->pc = ((ESF2 *)pEsf)->pc; pExcInfo->statusReg = ((ESF2 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF2 *)pEsf)->aa; size = (sizeof (ESF2)); break;#if ((CPU == MC68040) || (CPU == MC68LC040) || (CPU == MC68060)) case 3: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR; pExcInfo->pc = ((ESF3 *)pEsf)->pc; pExcInfo->statusReg = ((ESF3 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF3 *)pEsf)->effectiveAddr; size = sizeof (ESF3); break;#endif /* (CPU == MC68040) || (CPU == MC68LC040) || (CPU == MC68060)) */#if (CPU == MC68040 || CPU == MC68LC040) case 4: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR; pExcInfo->pc = ((ESF3 *)pEsf)->pc; pExcInfo->statusReg = ((ESF3 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF3 *)pEsf)->effectiveAddr; size = sizeof (ESF3) + 2; break;#endif /* (CPU == MC68040) || (CPU == MC68LC040) */#if (CPU == MC68060) case 4: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FSLW; pExcInfo->pc = ((ESF4 *)pEsf)->pc; pExcInfo->statusReg = ((ESF4 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF4 *)pEsf)->effectiveAddr; pExcInfo->funcCode = ((ESF4 *)pEsf)->fslw; size = sizeof (ESF4); break;#endif /* (CPU == MC68060) */#if (CPU == MC68040 || CPU == MC68LC040) case 7: { FAST ESF7 *pEsf7 = (ESF7 *)pEsf; static int sizeTbl[4] = {4, 1, 2, 0}; /* bytes per ssw:siz field */ pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FUNC_CODE; pExcInfo->pc = pEsf7->pc; pExcInfo->statusReg = pEsf7->sr; pExcInfo->funcCode = pEsf7->fn; if (pEsf7->fn & 0xf000) /* if any continuation flags */ pExcInfo->accessAddr = pEsf7->effectiveAddr; else /* else, get original fault */ pExcInfo->accessAddr = pEsf7->aa; /* In traditional operating systems, the code here would correct * the fault by paging in the appropriate page and completing the * cycle manually. VxWorks currently runs in a linear address * space fully consistent with physical memory. Legitimate bus * errors take place when accesses to empty regions of the address * space occur. Even when an MMU is employed to protect memory * regions, unwarranted accesses should not be corrected and * completed but rather undergo the standard VxWorks exception * processing. * * So we make no attempt to complete or repair the offending cycle. * With regard to an instruction ATC fault, performing an rte will * retry the instruction. A data access fault, however, must be * completed here manually if the access is desired because an rte * will not retry the cycle. Future revisions of VxWorks may * require the completion of faulted cycles, but for now these data * accesses are lost. Note that completion of the data access * must occur before the writebacks are handled. * * With copyback caching mode, the processor may have pending * writebacks to complete. These writebacks could have nothing to * do with the offending cycle, but must be completed to * maintain consistancy of memory. An instruction access error * will always allow pending accesses to complete before reporting * the instruction fault. Therefore, no pending writebacks will * be contained in the stack frame. Data accesses, on the other * hand, require completion of the writebacks. * * Many possible types data accesses exist; a normal user or * supervisor access, a move16 operation, a cache push operation, * or an MMU tablewalk access. Each of these faults set the * SSW-TT and TM bits for identification. Completion of the * cycles differ slightly for each type, but as stated we do not * wish to complete these cycles. The writebacks are completed * for writeback2 and writeback3. Writeback1 is either associated * with the faulted cycle or invalid as is the case for move16 and * cache push faults. * * With regard to a cache push fault, be aware that the push buffer * is invalidated after the fault and memory coherency is lost * because the only valid copy of the cache line now resides * in the exception frame which cannot be snooped. Pages should * pushed to memory before invalidating. */ /* check if access is inst. fault and writebacks are necessary */ if (!(((pEsf7->fn & 0x0007) == 6) || ((pEsf7->fn & 0x0007) == 2))) { /* perform writeback 2 if necessary */ if (pEsf7->wb2stat & 0x80) vxMemProbe ((char *) pEsf7->wb2addr, VX_WRITE, sizeTbl[(pEsf7->wb2stat & 0x0060) >> 5], (char *) &pEsf7->wb2data); /* perform writeback 3 if necessary */ if (pEsf7->wb3stat & 0x80) vxMemProbe ((char *) pEsf7->wb3addr, VX_WRITE, sizeTbl[(pEsf7->wb3stat & 0x0060) >> 5], (char *) &pEsf7->wb3data); } size = sizeof (ESF7); break; }#endif /* (CPU == MC68040 || CPU == MC68LC040) */#if (CPU != MC68060) case 8: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FUNC_CODE | EXC_INSTR_REG; pExcInfo->pc = ((ESF8 *)pEsf)->pc; pExcInfo->statusReg = ((ESF8 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF8 *)pEsf)->aa; pExcInfo->funcCode = ((ESF8 *)pEsf)->fn; pExcInfo->instrReg = ((ESF8 *)pEsf)->ir; size = sizeof (ESF8); break; case 9: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR; pExcInfo->pc = ((ESF9 *)pEsf)->pc; pExcInfo->statusReg = ((ESF9 *)pEsf)->sr; pExcInfo->accessAddr = ((ESF9 *)pEsf)->aa; size = sizeof (ESF9); break; case 10: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FUNC_CODE | EXC_INSTR_REG; pExcInfo->pc = ((ESFA *)pEsf)->pc; pExcInfo->statusReg = ((ESFA *)pEsf)->sr; pExcInfo->accessAddr = ((ESFA *)pEsf)->aa; pExcInfo->funcCode = ((ESFA *)pEsf)->fn; pExcInfo->instrReg = ((ESFA *)pEsf)->instPipeC; size = sizeof (ESFA); break; case 11: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FUNC_CODE | EXC_INSTR_REG; pExcInfo->pc = ((ESFA *)pEsf)->pc; pExcInfo->statusReg = ((ESFA *)pEsf)->sr; pExcInfo->accessAddr = ((ESFA *)pEsf)->aa; pExcInfo->funcCode = ((ESFA *)pEsf)->fn; pExcInfo->instrReg = ((ESFA *)pEsf)->instPipeC; size = sizeof (ESFB); break;#endif /* (CPU != MC68060) */#if CPU == CPU32 case 12: pExcInfo->valid |= EXC_PC | EXC_STATUS_REG | EXC_ACCESS_ADDR | EXC_FUNC_CODE; pExcInfo->pc = ((ESFC *)pEsf)->pc; pExcInfo->statusReg = ((ESFC *)pEsf)->sr; pExcInfo->accessAddr = ((ESFC *)pEsf)->aa; pExcInfo->funcCode = ((ESFC *)pEsf)->fn; size = sizeof (ESFC); break;#endif /* CPU == CPU32 */ default: pExcInfo->valid |= EXC_INVALID_TYPE; pExcInfo->funcCode = ((pEsf->vectorOffset & 0xf000) >> 12); size = 0; break; }#endif /* (CPU==MC680[12346]0 || CPU==CPU32) */ pRegs->spReg = (ULONG)((char *) pEsf + size); /* bump up stack ptr */ }/********************************************************************************* excTasRetry - retry a TAS instruction** If this was a bus error involving a RMW cycle (TAS instruction) we* return to the handler to retry it. Such is the case in a vme* bus deadlock cycle, where the local CPU initiates a TAS instuction* (or RMW cycle) at the same time it's dual port arbiter grants the local bus* to an external access. The cpu backs off by signaling a bus error and* setting the RM bit in the special status word of the bus error exception* frame. The solution is simply to retry the instruction hoping that the* external access has been resolved. Even if a card such as a disk controller* has grabed the bus for DMA accesses for a long time, the worst that will* happen is we'll end up back here again, and we can keep trying until we get* through.** RETURNS: TRUE if retry desired, FALSE if not TAS cycle.* NOMANUAL*/LOCAL BOOL excTasRetry ( int vecNum, /* exception vector number */ ESF0 * pEsf, /* pointer to exception stack frame */ REG_SET * pRegs /* pointer to register info on stack */ ) {#if (CPU==MC68000) if (FALSE) /* no way to tell if this was a RMW - just return FALSE */#endif /* (CPU==MC68000) */#if (CPU==MC68010) if ((((pEsf->vectorOffset & 0xf000) >> 12) == 8) && /* BERR! */ (((ESF8 *)pEsf)->fn & 0x800)) /* RMW cycle */#endif /* (CPU==MC68010) */#if (CPU==MC68020) if (((((pEsf->vectorOffset & 0xf000) >> 12) == 10) || (((pEsf->vectorOffset & 0xf000) >> 12) == 11)) && /* BERR! */ (((ESFA *)pEsf)->fn & 0x80)) /* RMW cycle */#endif /* (CPU==MC68020) */#if (CPU==CPU32) if (((((pEsf->vectorOffset & 0xf000) >> 12) == 10) || (((pEsf->vectorOffset & 0xf000) >> 12) == 11)) && /* BERR! */ (((ESFA *)pEsf)->fn & 0x100)) /* RMW cycle */#endif /* (CPU==CPU32) */#if ((CPU==MC68040) || (CPU==MC68LC040) || (CPU==MC68060))#if (CPU==MC68040) || (CPU==MC68LC040) if ((((pEsf->vectorOffset & 0xf000) >> 12) == 7) && /* BERR! */ (((ESF7 *)pEsf)->fn & 0x200)) /* ATC Fault */#elseif ((((pEsf->vectorOffset & 0xf000) >> 12) == 4) && /* BERR! */ ((((ESF4 *)pEsf)->fslw & 0x03800020) == 0x03800020)) /* LCK+RMW+RE */#endif/* The 68040/68060 has NO data retry capability. Just returning from here would * cause the access to "seem" OK, but throw away any writeback data that * was contained in the stack frame, corrupting memory. On the other hand, * the address that were to be written to could ALSO be invalid. * * It is a problem. * * the vxTas call prevents this by flushing the write pipeline with a "nop" * before doing the TAS. In fact, we can simplify everything for the vxTas * case by placing the retry logic at the application (vxTas) level. * * We tell vxTas that an error occured by placing a -1 in "d0" */#endif /* ((CPU==MC68040) || (CPU==MC68LC040) || (CPU==MC68060)) */ { ++excTasErrors; /* keep count of TAS errors */#if (CPU==MC68040 || CPU==MC68LC040 || CPU==MC68060) pRegs->dataReg[0] = -1; /* and place a -1 in "d0" */#endif /* (CPU==MC68040 || CPU==MC68LC040 || CPU==MC68060) */ return (TRUE); /* retry the instruction */ } return (FALSE); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -