📄 windalib.s
字号:
mov.l @(r0,r11),r12; /* r12: priority */ mov.l r3,@-sp mov.l r4,@-sp /* check if we need to log this event */ mov.l DII_WvEvtClass,r1; mov.l r5,@-sp mov.l DII_WV_CLASS_1_ON,r3; mov.l r6,@-sp mov.l @r1,r0; mov.l r7,@-sp and r3,r0; sts.l pr,@-sp cmp/eq r3,r0 bf dispatchCheckTrg /* Here we try to determine if the task is running at an * inherited priority, if so a different event is generated. */ mov #WIND_TCB_PRI_NORMAL,r0 mov.l @(r0,r11),r1; mov #EVENT_WIND_EXIT_DISPATCH,r4 cmp/ge r1,r12 /* normalPriority <= priority */ bt dispatchEvtLog /* no inheritance */ mov #EVENT_WIND_EXIT_DISPATCH_PI,r4dispatchEvtLog: /* r4: event ID */ mov.l DII_EvtLogTSched,r1; mov.l @r1,r0; mov r12,r6 /* r4 r5 r6 */ jsr @r0; mov r11,r5 /* evtLogTSched (eventID,taskID,pri) */dispatchCheckTrg: /* check if we need to evaluate trigger for this event */ mov.l DII_TrgEvtClass,r1; mov.l DII_TRG_CLASS_1_ON,r3; mov.l @r1,r0; mov #0,r2 /* r2: NULL */ and r3,r0 cmp/eq r3,r0 bf dispatchInstDone mov #WIND_TCB_PRI_NORMAL,r0 mov.l @(r0,r11),r1; mov #EVENT_WIND_EXIT_DISPATCH,r4 cmp/ge r1,r12 /* normalPriority <= priority */ bt dispatchEvalTrg /* no inheritance */ mov #EVENT_WIND_EXIT_DISPATCH_PI,r4dispatchEvalTrg: /* r4: event ID */ /* r4 r5 r6 r7 +0 +4 +8 +12 */ /* trgCheck (eventID,index,taskID,priority, 0, 0, 0, 0) */ mov.l r2,@-sp; mov.l r2,@-sp mov.l r2,@-sp; mov.l r2,@-sp mov.l DII_TrgCheck,r1; mov r12,r7 mov.l @r1,r0; mov r11,r6 jsr @r0; mov #TRG_CLASS1_INDEX,r5 add #16,sp /* pop params */dispatchInstDone: lds.l @sp+,pr mov.l @sp+,r7 mov.l @sp+,r6 mov.l @sp+,r5 mov.l @sp+,r4 mov.l @sp+,r3 mov.l @sp+,r2 mov.l @sp+,r1 bra dispatchNoInstr; mov.l @sp+,r0 .align 2DII_TaskIdCurrent: .long _taskIdCurrentDII_WvEvtClass: .long _wvEvtClassDII_WV_CLASS_1_ON: .long WV_CLASS_1_ONDII_EvtLogTSched: .long __func_evtLogTSchedDII_TrgEvtClass: .long _trgEvtClassDII_TRG_CLASS_1_ON: .long TRG_CLASS_1_ONDII_TrgCheck: .long __func_trgCheck/******************************************************************************** idleInstr - windview instrumentation: enter idle state**/ .align _ALIGN_TEXT .type idleInstr,@functionidleInstr: mov.l ILI_IntLockSR,r1; sts.l pr,@-sp mov.l @r1,r0; ldc r0,sr; /* LOCK INTERRUPTS */ /* check if we need to log this event */ mov.l ILI_WvEvtClass,r1; mov.l ILI_WV_CLASS_1_ON,r3; mov.l @r1,r0; and r3,r0 cmp/eq r3,r0 bf idleCheckTrg mov.l ILI_EvtLogT0,r1; mov.l @r1,r0; jsr @r0; /* evtLogT0 (eventID) */ mov #EVENT_WIND_EXIT_IDLE,r4idleCheckTrg: /* check if we need to evaluate trigger for this event */ mov.l ILI_TrgEvtClass,r1; mov.l ILI_TRG_CLASS_1_ON,r3; mov.l @r1,r0; mov #0,r7 /* r7: NULL */ and r3,r0 cmp/eq r3,r0 bf idleInstDone /* r4 r5 r6 r7 +0 +4 +8 +12 */ /* trgCheck (eventID,index, 0, 0, 0, 0, 0, 0) */ mov.l r7,@-sp; mov.l r7,@-sp mov.l r7,@-sp; mov.l r7,@-sp mov.l ILI_TrgCheck,r1; mov r7,r6 mov.l @r1,r0; mov #TRG_CLASS1_INDEX,r5 jsr @r0; mov #EVENT_WIND_EXIT_IDLE,r4 add #16,sp /* pop params */idleInstDone: bra idleNoInstr; lds.l @sp+,pr .align 2ILI_IntLockSR: .long _intLockTaskSRILI_WvEvtClass: .long _wvEvtClassILI_WV_CLASS_1_ON: .long WV_CLASS_1_ON /* 0x10000001 */ILI_EvtLogT0: .long __func_evtLogT0ILI_TrgEvtClass: .long _trgEvtClassILI_TRG_CLASS_1_ON: .long TRG_CLASS_1_ON /* 0x10000001 */ILI_TrgCheck: .long __func_trgCheck#endif /* !PORTABLE */#endif /* WV_INSTRUMENTATION *//******************************************************************************** windExitInt - exit kernel routine from interrupt level** windExit branches here if exiting kernel routine from int level.* No rescheduling is necessary because the ISR will exit via intExit, and* intExit does the necessary rescheduling. Before leaving kernel state* the work queue is emptied.*/ .align _ALIGN_TEXT .type windExitIntWork,@function .type windExitInt,@functionwindExitIntWork: /* windExitIntWork (r2) */ mov.l WXI_WorkQDoWork,r1; sts.l pr,@-sp#if (CPU==SH7600 || CPU==SH7000) jsr @r1; ldc r2,sr#else ldc r2,sr; /* UNLOCK INTERRUPTS */ jsr @r1; /* empty the work queue */ nop#endif lds.l @sp+,prwindExitInt: /* windExitInt (void) */ mov.l WXI_IntLockSR,r4; mov.l WXI_WorkQIsEmpty,r5; mov.l @r4,r1; stc sr,r2 /* r2: old sr */ ldc r1,sr /* LOCK INTERRUPTS */ mov.l @r5,r1; /* r1: _workQIsEmpty */ tst r1,r1 /* test for work to do */ bt windExitIntWork /* workQueue is not empty */#ifdef WV_INSTRUMENTATION mov.l WXI_EvtAction,r1; mov.l @r1,r0; tst r0,r0 bt windExitIntNoInstr; sts.l pr,@-sp bsr noDispatchInstr; mov.l r2,@-sp mov.l @sp+,r2 lds.l @sp+,prwindExitIntNoInstr:#endif mov.l WXI_KernelState,r6; xor r0,r0 /* r0: NULL */ mov.l r0,@r6 /* release exclusion */#if (CPU==SH7600 || CPU==SH7000) rts; ldc r2,sr#else ldc r2,sr /* UNLOCK INTERRUPTS */ rts; /* back to calling task */ nop /* return 0 (r0) */#endif .align 2WXI_WorkQDoWork: .long _workQDoWorkWXI_IntLockSR: .long _intLockTaskSRWXI_WorkQIsEmpty: .long _workQIsEmptyWXI_KernelState: .long _kernelState#ifdef WV_INSTRUMENTATIONWXI_EvtAction: .long _evtAction#endif/******************************************************************************** checkTaskReady - check that taskIdCurrent is ready to run** This code branched to by windExit when it finds preemption is disabled.* It is possible that even though preemption is disabled, a context switch* must occur. This situation arrises when a task block during a preemption* lock. So this routine checks if taskIdCurrent is ready to run, if not it* branches to save the context of taskIdCurrent, otherwise it falls thru to* check the work queue for any pending work. The h/private/taskLibP.h defines* WIND_TCB_STATUS as 0x3c, and WIND_READY as 0x00.*/ .align _ALIGN_TEXT .type checkTaskReady,@function /* r4: ReadyQHead */ /* r5: TaskIdCurrent */ /* r6: _readyQHead */checkTaskReady: /* r7: _taskIdCurrent */ mov #WIND_TCB_STATUS,r0 mov.l @(r0,r7),r1; tst r1,r1 /* is task ready to run? */ bf saveTaskContext /* if no, we blocked with preempt off */ /* FALL THRU TO CHECK WORK QUEUE *//******************************************************************************** checkWorkQ - check the work queue for any work to do** This code is branched to by windExit. Currently taskIdCurrent is highest* priority ready task, but before we can return to it we must check the work* queue. If there is work we empty it via doWorkPreSave, otherwise we unlock* interrupts, clear r0, and return to taskIdCurrent.*/ .type checkWorkQ,@function /* r4: ReadyQHead */ /* r5: TaskIdCurrent */ /* r6: _readyQHead */checkWorkQ: /* r7: _taskIdCurrent */ mov.l CQ_IntLockSR,r2; /* r2: IntLockSR */ mov.l CQ_WorkQIsEmpty,r3; /* r3: WorkQIsEmpty */ mov.l @r2,r1; ldc r1,sr /* LOCK INTERRUPTS */ mov.l @r3,r1; tst r1,r1 /* test for work to do */ bt doWorkPreSave /* workQueue is not empty */#ifdef WV_INSTRUMENTATION mov.l CQ_EvtAction,r1; mov.l @r1,r0; tst r0,r0 bt checkWorkQNoInstr sts.l pr,@-sp bsr noDispatchInstr; nop lds.l @sp+,prcheckWorkQNoInstr:#endif#if (CPU==SH7600 || CPU==SH7000) mov.l CQ_KernelState,r1; xor r0,r0 /* r0: zero */ mov.l r0,@r1 /* release exclusion */ rts; /* back to calling task (return 0) */ ldc r0,sr /* UNLOCK INTERRUPTS */#else /* CPU==SH7750 || CPU==SH7700 */ mov.l CQ_KernelState,r1; mov.l CQ_intUnlockSR,r3 xor r0,r0; mov.l @r3,r2 mov.l r0,@r1; ldc r2,sr rts; /* return 0 */ nop .align 2CQ_intUnlockSR: .long _intUnlockSR#endif /* CPU==SH7750 || CPU==SH7700 */ .align 2CQ_IntLockSR: .long _intLockTaskSRCQ_WorkQIsEmpty: .long _workQIsEmptyCQ_KernelState: .long _kernelState#ifdef WV_INSTRUMENTATIONCQ_EvtAction: .long _evtAction#endif/******************************************************************************** doWorkPreSave - empty the work queue with current context not saved** We try to empty the work queue here, rather than let reschedule* perform the work because there is a strong chance that the* work we do will not preempt the calling task. If this is the case, then* saving the entire context just to restore it in reschedule is a waste of* time. Once the work has been emptied, the ready queue must be checked to* see if reschedule must be called, the check of the ready queue is done by* branching back up to checkTaskCode.*/ .align _ALIGN_TEXT .type doWorkPreSave,@function /* r7: _taskIdCurrent */ /* r6: _readyQHead */ /* r5: TaskIdCurrent */ /* r4: ReadyQHead */ /* r3: WorkQIsEmpty */ /* r2: IntLockSR */doWorkPreSave: /* r1: NULL (to unlock interrupts) */#if (CPU==SH7600 || CPU==SH7000) mov.l PS_WorkQDoWork,r0; sts.l pr,@-sp jsr @r0; /* empty the work queue */ ldc r1,sr /* UNLOCK INTERRUPTS */#else mov.l PS_intUnlockSR,r2 mov.l PS_WorkQDoWork,r0; mov.l @r2,r1 sts.l pr,@-sp; ldc r1,sr jsr @r0; nop#endif bra checkTaskSwitch; /* back up to test if tasks switched */ lds.l @sp+,pr /* r0-r7: (invalid) */ .align 2PS_WorkQDoWork: .long _workQDoWork /* void workQDoWork (void) */#if (CPU!=SH7600 && CPU!=SH7000)PS_intUnlockSR: .long _intUnlockSR#endif/******************************************************************************** windExit - task level exit from kernel** Release kernel mutual exclusion (kernelState) and dispatch any new task if* necessary. If a higher priority task than the current task has been made* ready, then we invoke the rescheduler. Before releasing mutual exclusion,* the work queue is checked and emptied if necessary.** If rescheduling is necessary, the context of the calling task is saved in its* associated TCB with the PC pointing at the next instruction after the jsr to* this routine. This is simply done by saving PR as PC, thus the context saved* is as if this routine was never called. The PR itself will be poped from the* stack, as shown in the diagram below. Only the volatile registers r0..r7 are* safe to use until the context is saved in saveTaskContext.** RETURNS: OK or ERROR if semaphore timeout occurs.** NOMANUAL* STATUS windExit ()* INTERNAL* |* ex. semDelete () | r0 - r7: free to use* { | mac[hl]: free to use* push pr |* jsr semDestroy | r8 - r14: must be saved before use.* { |* push pr | sp: points at pr value on the stack.* call windExit | this pr value is the semDestroy* $1: pop pr | return address, namely $2.* rts |* } | pr: holds the windExit return address,* $2: pop pr | namely $1.* rts |* } | vbr,gbr: doesn't matter.* +-------+ |* sp-> | $2 | |* |* pr [ $1 ] |*/ .align _ALIGN_TEXT .type _windExit,@function_windExit: /* STATUS windExit (void); */ mov.l WX_IntCnt,r1; mov.l @r1,r0; tst r0,r0 /* if intCnt == 0 we're from task */ bf windExitInt /* else we're exiting interrupt code */ /* FALL THRU TO CHECK THAT CURRENT TASK IS STILL HIGHEST *//******************************************************************************** checkTaskSwitch - check to see if taskIdCurrent is still highest task** We arrive at this code either as the result of falling thru from windExit,* or if we have finished emptying the work queue. We compare taskIdCurrent* with the highest ready task on the ready queue. If they are same we* go to a routine to check the work queue. If they are different and preemption* is allowed we branch to a routine to make sure that taskIdCurrent is really* ready (it may have blocked with preemption disabled). If they are different* we save the context of taskIdCurrent and fall thru to reschedule.*/ .type checkTaskSwitch,@functioncheckTaskSwitch: /* r0-r7: (invalid) */ mov.l WX_TaskIdCurrent,r5; /* r5: TaskIdCurrent */ mov.l WX_ReadyQHead,r4; /* r4: ReadyQHead */ mov.l @r5,r7; /* r7: _taskIdCurrent */ mov.l @r4,r6; /* r6: _readyQHead */ cmp/eq r7,r6 /* compare highest ready task */ bt checkWorkQ /* if same then time to leave (^) */ mov #WIND_TCB_LOCK_CNT,r0 mov.l @(r0,r7),r1; tst r1,r1 /* is task preemption allowed */ bf checkTaskReady /* if no, check task is ready (^) */ /* FALL THRU TO SAVE THE CURRENT TASK CONTEXT IN TCB *//******************************************************************************** saveTaskContext - save the current task context** We arrive at this code either as the result of falling thru from checkTask-* Switch, or if taskIdCurrent is not ready at checkTaskReady. Save all the
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -