📄 intarchlib.c
字号:
** RETURNS:* The interrupt level currently stored in the interrupt lock-out mask.** SEE ALSO: intLockLevelSet()*/int intLockLevelGet (void) { return intLockMask >> 4; }/******************************************************************************** intVecBaseSet - set the vector base address** This routine sets the vector base address. The CPU's vector base register* is set to the specified value, and subsequent calls to intVecGet() or* intVecSet() will use this base address. The vector base address is* initially 0, until changed by calls to this routine.** RETURNS: N/A** SEE ALSO: intVecBaseGet(), intVecGet(), intVecSet()** INTERNAL:* This routine is called from usrInit(), with VEC_BASE_ADRS (configAll.h)* as baseAddr. At the time of this routine call, cacheLibInit() is done* but cacheEnable() is not. Namely whole cache is invalidated and not* enabled. Therefore no cache invalidation is necessary before copying* intStub, even if the specified baseAddr is in a non-cache region.*/void intVecBaseSet ( FUNCPTR *baseAddr /* new vector base address */ ) {#if (CPU==SH7750 || CPU==SH7700) UINT virtAddr = (UINT)intVecBaseSet; /* where am I? */ UINT virtBase = ((UINT)baseAddr & 0x1fffffff) | (virtAddr & 0xe0000000); UINT stubAddr; bzero ((char *)virtBase, 0x1000); /* clean VBR relative stub space */ /* load INTEVT register address */ stubAddr = virtBase + SH7700_INT_EVT_ADRS_OFFSET; *(UINT32 *)stubAddr = intEvtAdrs; CACHE_TEXT_UPDATE ((void *)stubAddr, sizeof (UINT32)); /* load interrupt handling stub */ stubAddr = virtBase + SH7700_INT_STUB_OFFSET; bcopy ((char *)intStub, (char *)stubAddr, intStubSize); CACHE_TEXT_UPDATE ((void *)stubAddr, intStubSize); /* load intExit stub */ stubAddr = virtBase + SH7700_INT_EXIT_STUB_OFFSET; bcopy ((char *)intExitStub, (char *)stubAddr, intExitStubSize); CACHE_TEXT_UPDATE ((void *)stubAddr, intExitStubSize); /* load task dispatch stub */ stubAddr = virtBase + SH7700_DISPATCH_STUB_OFFSET; bcopy ((char *)dispatchStub, (char *)stubAddr, dispatchStubSize); CACHE_TEXT_UPDATE ((void *)stubAddr, dispatchStubSize); /* load intPrioTable[] */ stubAddr = virtBase + SH7700_INT_PRIO_TABLE_OFFSET; bcopy ((char *)intPrioTable, (char *)stubAddr, intPrioTableSize); CACHE_TEXT_UPDATE ((void *)stubAddr, intPrioTableSize); intVecBase = baseAddr; /* keep the base address in a static variable */ intVBRSet (baseAddr); /* set the actual vector base register */#else intVecBase = baseAddr; /* keep the base address in a static variable */ intVBRSet (baseAddr); /* set the actual vector base register */ CACHE_TEXT_UPDATE ((void *)baseAddr, 256 * sizeof (FUNCPTR));#endif }/******************************************************************************** intVecBaseGet - get the vector base address** This routine returns the current vector base address that has been set* with the intVecBaseSet() routine.** RETURNS: The current vector base address.** SEE ALSO: intVecBaseSet()*/FUNCPTR *intVecBaseGet (void) { return intVecBase; }/******************************************************************************** windIntStackSet - set the interrupt stack pointer** This routine sets the interrupt stack pointer to the specified address.* It is only valid on architectures with an interrupt stack pointer.** NOMANUAL*/void windIntStackSet ( char *pBotStack /* pointer to bottom of interrupt stack */ ) {#if (CPU==SH7750 || CPU==SH7700) UINT virtAddr = (UINT)windIntStackSet; /* where am I? */ UINT virtBase = ((UINT)intVecBase & 0x1fffffff) | (virtAddr & 0xe0000000); UINT stubAddr; stubAddr = virtBase + SH7700_INT_STACK_BASE_OFFSET; *(char **)stubAddr = pBotStack; CACHE_TEXT_UPDATE ((void *)stubAddr, sizeof (char *)); stubAddr = virtBase + SH7700_ARE_WE_NESTED_OFFSET; *(UINT32 *)stubAddr = 0x80000000; CACHE_TEXT_UPDATE ((void *)stubAddr, sizeof (UINT32)); stubAddr = virtBase + SH7700_NULL_EVT_CNT_OFFSET; *(UINT32 *)stubAddr = 0xabcd0000; CACHE_TEXT_UPDATE ((void *)stubAddr, sizeof (UINT32));#endif /* CPU==SH7750 || CPU==SH7700 */ }/******************************************************************************** intVecSet - set a CPU vector** This routine sets an exception/interrupt vector to a specified address.* The vector is specified as an offset into the CPU's vector table.* On SH CPUs, the vector table may be set to start at any address with the* intVecBaseSet() routine. The vector table is set up in usrInit() and* starts at the lowest available memory address.** RETURNS: N/A** SEE ALSO: intVecBaseSet(), intVecGet()*/void intVecSet ( FUNCPTR *vector, /* vector offset */ FUNCPTR function /* address to place in vector */ ) { FUNCPTR *newVector;#if (CPU==SH7750 || CPU==SH7700) BOOL writeProtected = FALSE; int pageSize = 0; char *pageAddr = 0; UINT virtAddr = (UINT)intVecSet; /* where am I? */#endif if (VXM_IF_VEC_SET (vector, function) == OK) /* can monitor do it? */ return; /* vector is offset by the vector base address */ newVector = (FUNCPTR *)((UINT)vector + (UINT)intVecBaseGet ());#if (CPU==SH7750 || CPU==SH7700) /* set the vector from its virtual address */ newVector = (FUNCPTR *)(((UINT)newVector & 0x1fffffff) | (virtAddr & 0xe0000000)); /* see if we need to write enable the memory */ if (vmLibInfo.vmLibInstalled) { UINT state; pageSize = VM_PAGE_SIZE_GET(); pageAddr = (char *)((UINT)newVector / pageSize * pageSize); if (VM_STATE_GET (NULL, (void *)pageAddr, &state) != ERROR) { if ((state & VM_STATE_MASK_WRITABLE) == VM_STATE_WRITABLE_NOT) { writeProtected = TRUE; VM_STATE_SET (NULL, pageAddr, pageSize, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE); } } }#endif *newVector = function;#if (CPU==SH7750 || CPU==SH7700) /* push out the new vector on the data cache to physical memory */ /* XXX The SH7729 gets an exception from fppProbe() unless flushing * XXX the cached vector before protecting the virtual page, and it * XXX happens regardless of the caching mode (copyback/writethru). * XXX The exception handler gets a vector from P1-cacheable region * XXX (by default), so that the new vector seems invisible on P1. * XXX It is strange because the SH7729 cache holds physical addresses * XXX in its address section, so we should not experience a cache * XXX incoherency problem between P0 and P1. However the flush code * XXX below is absolutely necessary if VBR is set to P2-noncacheable * XXX region, so please do not remove this. (02l,28oct00,hk) */ if (cacheLib.flushRtn != NULL) cacheLib.flushRtn (DATA_CACHE, (void *)newVector, sizeof (FUNCPTR)); if (writeProtected) { VM_STATE_SET (NULL, pageAddr, pageSize, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE_NOT); }#endif /* synchronize the instruction and data caches if they are separated */ CACHE_TEXT_UPDATE ((void *)newVector, sizeof (FUNCPTR)); }/******************************************************************************** intVecGet - get a vector** This routine returns the current value of a specified exception/interrupt* vector. The vector is specified as an offset into the CPU's vector table.* On SH CPUs, the vector table may be set to start at any address with the* intVecBaseSet() routine.** RETURNS: The current value of <vector>.** SEE ALSO: intVecSet(), intVecBaseSet()*/FUNCPTR intVecGet ( FUNCPTR *vector /* vector offset */ ) { FUNCPTR vec; if ((vec = VXM_IF_VEC_GET (vector)) != NULL) /* can monitor do it? */ return vec; /* vector is offset by vector base address */ return *(FUNCPTR *)((int)vector + (int)intVecBaseGet ()); }#if (CPU==SH7750 || CPU==SH7700)/******************************************************************************** intVecTableWriteProtect - write protect exception vector table** If the unbundled mmu support package (vmLib) is present, write protect* the exception vector table to protect it from being accidently corrupted.* Note that other data structures contained in the page will also be* write protected. In the default VxWorks configuration, the exception* vector table is located at location 0 in memory. Write protecting this* affects the backplane anchor, boot configuration information and potentially* the text segment (assuming the default text location of 0x1000.) All code* that manipulates these structures has been modified to write enable the * memory for the duration of the operation. If the user selects a different * address for the exception vector table, he should insure that it resides * in a page seperate from other writable data structures.* * RETURNS: OK, or ERROR if unable to write protect memory.*/STATUS intVecTableWriteProtect (void) { STATUS status = ERROR; if (vmLibInfo.vmLibInstalled) { int pageSize = VM_PAGE_SIZE_GET (); if (pageSize != 0) { UINT virtAddr = (UINT)intVecTableWriteProtect; /* where am I? */ UINT vecBase = (UINT)intVecBaseGet (); UINT vecPage = ((vecBase & 0x1fffffff) | (virtAddr & 0xe0000000)) / pageSize * pageSize; status = VM_STATE_SET (NULL, (void *)vecPage, pageSize, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE_NOT); } } else errno = S_intLib_VEC_TABLE_WP_UNAVAILABLE; return status; }/******************************************************************************** intGlobalSRSet - set special bit(s) to status register values used by kernel** This routine sets the specified bit(s) to every SR value used by kernel.** RETURNS: OK, or ERROR*/STATUS intGlobalSRSet (UINT32 bits, UINT32 mask, int maxTasks) { UINT virtAddr = (UINT)intGlobalSRSet; /* where am I? */ UINT virtBase = ((UINT)intVecBase & 0x1fffffff) | (virtAddr & 0xe0000000); UINT prioTbl = virtBase + SH7700_INT_PRIO_TABLE_OFFSET; UINT32 *p; BOOL writeProtected = FALSE; int pageSize = 0; char *pageAddr = 0; STATUS status = OK; /* modify intPrioTable[] entries */ if (vmLibInfo.vmLibInstalled) { UINT state; pageSize = VM_PAGE_SIZE_GET(); pageAddr = (char *)(prioTbl / pageSize * pageSize); if (VM_STATE_GET (NULL, (void *)pageAddr, &state) != ERROR) { if ((state & VM_STATE_MASK_WRITABLE) == VM_STATE_WRITABLE_NOT) { writeProtected = TRUE; VM_STATE_SET (NULL, pageAddr, pageSize, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE); } } } for (p = (UINT32 *)prioTbl; p < (UINT32 *)(prioTbl + intPrioTableSize); p++) if (*p != 0) *p = (*p & mask) | (bits & ~mask); if (writeProtected) { VM_STATE_SET (NULL, pageAddr, pageSize, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE_NOT); } CACHE_TEXT_UPDATE ((void *)prioTbl, intPrioTableSize); /* modify global SR values used by kernel */ intLockTaskSR = (intLockTaskSR & mask) | (bits & ~mask); intUnlockSR = (intUnlockSR & mask) | (bits & ~mask); intBlockSR = (intBlockSR & mask) | (bits & ~mask); /* if kernel is running, we also need to take care of the existing tasks */ if (Q_FIRST (&activeQHead) != NULL) /* kernel is running */ { int i, numTasks; int *idList; if ((idList = malloc ((maxTasks + 1) * sizeof (int))) == NULL) return ERROR; taskLock (); /* LOCK PREEMPTION */ numTasks = taskIdListGet (idList, maxTasks + 1); if (numTasks <= maxTasks) { /* modify current SR value */ int key = intLock (); /* LOCK INTERRUPTS */ intSRSet ((intSRGet () & mask) | (bits & ~mask)); intUnlock (key); /* UNLOCK INTERRUPTS */ /* modify every task's SR value in TCB */ for (i = 0; i < numTasks; i++) { WIND_TCB *pTcb = (WIND_TCB *)idList[i]; pTcb->regs.sr = (pTcb->regs.sr & mask) | (bits & ~mask); } } else status = ERROR; taskUnlock (); /* UNLOCK PREEMPTION */ free (idList); } return status; }#endif /* CPU==SH7750 || CPU==SH7700 *//******************************************************************************** intRegsLock - modify a REG_SET to have interrupts locked**/ int intRegsLock ( REG_SET *pRegs /* register set to modify */ ) { ULONG oldSr = pRegs->sr; pRegs->sr = (oldSr & 0xffffff0f) | intLockMask; return oldSr; }/******************************************************************************** intRegsUnlock - restore an REG_SET's interrupt lockout level** NOTE: M/Q/S/T bits have to be preserved.*/ void intRegsUnlock ( REG_SET * pRegs, /* register set to modify */ int oldSr /* sr with int lock level to restore */ ) { pRegs->sr = (pRegs->sr & 0xffffff0f) | ((ULONG)oldSr & 0x000000f0); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -