⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 intalib.s

📁 VxWorks BSP框架源代码包含头文件和驱动
💻 S
📖 第 1 页 / 共 4 页
字号:
	lwz	p1, _PPC_ESF_CR(sp)		/* load saved CR */	stw	p1, WIND_TCB_CR(p0)		/* store CR to tcb */	lwz	sp, 0(sp)			/* recover stack */	stw	sp, WIND_TCB_SP(p0)		/* store SP to tcb */	mfspr	p1, CTR				/* load CTR to p1 */	stw	p1, WIND_TCB_CTR(p0)		/* store CTR to tcb */	mfspr	p1, XER				/* load XER to p6 */	stw	p1, WIND_TCB_XER(p0)		/* store XER to tcb */#if	(CPU==PPC601)	mfspr	p1, MQ				/* load MQ to p7 */	stw	p1, WIND_TCB_MQ(p0)		/* store MQ to tcb */#endif	/* (CPU==PPC601) */#if	(CPU==PPC85XX)	mfspr	p1, SPEFSCR			/* load SPEFSCR to p1 */	stw	p1, WIND_TCB_SPEFSCR(p0)	/* store SPEFSCR to tcb */#endif	/* (CPU==PPC85XX) */#if 	TRUE	stw	p4, WIND_TCB_P4(p0) 		/* store P4 to tcb */	stw	p5, WIND_TCB_P5(p0) 		/* store P5 to tcb */	stw	p6, WIND_TCB_P6(p0) 		/* store P6 to tcb */	stw	p7, WIND_TCB_P7(p0) 		/* store P7 to tcb */	stw	r11, WIND_TCB_R11(p0) 		/* store r11 to tcb */	stw	r12, WIND_TCB_R12(p0) 		/* store r12 to tcb */	stw	r13, WIND_TCB_R13(p0) 		/* store r13 to tcb */	stw	t0, WIND_TCB_T0(p0) 		/* store t0 to tcb */	stw	t1, WIND_TCB_T1(p0) 		/* store t1 to tcb */	stw	t2, WIND_TCB_T2(p0) 		/* store t2 to tcb */	stw	t3, WIND_TCB_T3(p0) 		/* store t3 to tcb */	stw	t4, WIND_TCB_T4(p0) 		/* store t4 to tcb */	stw	t5, WIND_TCB_T5(p0) 		/* store t5 to tcb */	stw	t6, WIND_TCB_T6(p0) 		/* store t6 to tcb */	stw	t7, WIND_TCB_T7(p0) 		/* store t7 to tcb */	stw	t8, WIND_TCB_T8(p0) 		/* store t8 to tcb */	stw	t9, WIND_TCB_T9(p0) 		/* store t9 to tcb */	stw	t10, WIND_TCB_T10(p0) 		/* store t10 to tcb */	stw	t11, WIND_TCB_T11(p0) 		/* store t11 to tcb */	stw	t12, WIND_TCB_T12(p0) 		/* store t12 to tcb */	stw	t13, WIND_TCB_T13(p0) 		/* store t13 to tcb */	stw	t14, WIND_TCB_T14(p0) 		/* store t14 to tcb */	stw	t15, WIND_TCB_T15(p0) 		/* store t15 to tcb */	stw	t16, WIND_TCB_T16(p0) 		/* store t16 to tcb */	stw	t17, WIND_TCB_T17(p0) 		/* store t17 to tcb */#else	stmw    p4, _PPC_ESF_P2(p0)             /* save general register 7 */						/* through 31 */#endif	/* unlock interrupts and set MSR's FP, RI and CE bits if they exist */	mfmsr	p2				/* load msr */#ifdef  _PPC_MSR_RI# ifdef _PPC_MSR_FP	ori	p2, p2, _PPC_MSR_RI | _PPC_MSR_EE | _PPC_MSR_FP# else	ori	p2, p2, _PPC_MSR_RI | _PPC_MSR_EE# endif /* _PPC_MSR_FP */#else   /* _PPC_MSR_RI */# ifdef _PPC_MSR_FP	ori	p2, p2, _PPC_MSR_EE | _PPC_MSR_FP# else	ori	p2, p2, _PPC_MSR_EE# endif /* _PPC_MSR_FP */#endif  /* _PPC_MSR_RI */#ifdef	_PPC_MSR_CE_U	oris	p2, p2, _PPC_MSR_CE_U		/* set CE bit (critical intr) */#endif 	/* _PPC_MSR_CE_U */	mtmsr	p2				/* UNLOCK INTERRUPT */	isync	stwu    sp, -FRAMEBASESZ(sp)		/* carve stack frame */	b	FUNC(reschedule)		/* goto rescheduler */FUNC_END(intExit)/********************************************************************************* intLock - lock out interrupts** This routine disables interrupts.** IMPORTANT CAVEAT* The routine intLock() can be called from either interrupt or task level.* When called from a task context, the interrupt lock level is part of the* task context.  Locking out interrupts does not prevent rescheduling.* Thus, if a task locks out interrupts and invokes kernel services that* cause the task to block (e.g., taskSuspend() or taskDelay()) or causes a* higher priority task to be ready (e.g., semGive() or taskResume()), then* rescheduling will occur and interrupts will be unlocked while other tasks* run.  Rescheduling may be explicitly disabled with taskLock().** EXAMPLE* .CS*     lockKey = intLock ();**      ...**     intUnlock (lockKey);* .CE** RETURNS* An architecture-dependent lock-out key for the interrupt state* prior to the call.** SEE ALSO: intUnlock(), taskLock()* int intLock ()*/FUNC_BEGIN(intLock)	mfmsr	 p0			/* load msr to parm0 */	INT_MASK(p0, p1)		/* mask EE bit */	mtmsr	 p1			/* LOCK INTERRUPT */	isync				/* SYNC XXX TPR not requested */	blr				/* return to the caller */FUNC_END(intLock)/********************************************************************************* intUnlock - cancel interrupt locks** This routine re-enables interrupts that have been disabled by the routine* intLock().  Use the architecture-dependent lock-out key obtained from the* preceding intLock() call.** RETURNS: N/A** SEE ALSO: intLock()* void intUnlock*	(*	int lockKey*	)*/FUNC_BEGIN(intUnlock)#ifdef	_PPC_MSR_CE_U	rlwinm	p0,p0,0,14,16		/* select EE and CE bit in lockKey */	rlwinm	p0,p0,0,16,14#else	/* _PPC_MSR_CE_U */	rlwinm	p0,p0,0,16,16		/* select EE bit in lockKey */#endif 	/* _PPC_MSR_CE_U */	mfmsr	p1			/* move MSR to parm1 */	or	p0,p1,p0		/* restore EE bit (and CE for 403) */	mtmsr	p0			/* UNLOCK INTERRUPRS */	isync				/* Instruction SYNChronization XXX */	blr				/* return to the caller */FUNC_END(intUnlock)#ifdef	_PPC_MSR_CE_U/********************************************************************************* intCrtEnt - catch and dispatch  external critical interrupt** This is the interrupt dispatcher that is pointed by the PowerPC exception.* It saves the context when interrupt happens and return to the stub.** NOMANUAL* void intCrtEnt()*/FUNC_BEGIN(intCrtEnt)	/* At the entry of this function, the following is done */	/* mtspr	SPRG2, p0	/@ save P0 to SPRG2 */	/* mfspr	p0, LR		/@ load LR to P0 */	/* bla		intCrtEnt	/@ save vector in LR, jump to handler */	mtspr	SPRG0, p1		/* save P1 to SPRG0: free p1 up */	/*	 * we may have a Critical Exception occuring at the very beginning	 * of the processing of a normal external interrupt, right before	 * MSR[CE] bit is cleared in the stub excConnectCode. Therefore	 * it is necessary to save the value of SPRG3 in this Critical	 * exception handler before using it and restore it after.	 */	mfspr	p1, SPRG3		/* push SPRG3 on the current stack */	stw	p1, -4(sp)		/* either the interrupt one or the */					/* interrupted task one */	mfspr	p1, SPRG2		/* move initial p0 from SPRG2 */	mtspr	SPRG3, p1		/* to SPRG3 */	mtspr	SPRG2, p0		/* save LR to SPRG2 */	mfcr	p1 			/* save CR to P1 before it is changed */	/*	 * On the PowerPC 60X, when an external interrupt is taken, the	 * processor turns the MMU off. At this moment the cache is not	 * longer controlled by the WIMG bits of the MMU. If the data cache	 * is on, the value of the SP, CR, PC and MSR register are written in	 * the cache and not in the memory.	 * The interrupt handler should be executed with the MMU in the	 * same state as the none interrupt code. This stub should	 * re-enable the data or/and instruction MMU if they were enabled	 * before the interrupt.	 * By re-enabling the MMU, the cache restarts to be controlled via the	 * WIMG bits. If the memory pages which handle the interrupt stack are	 * in not cacheables state, the cache is turn off for all memory acces	 * in this area.	 * In intCrtExit() the SP, CR, PC, and MSR register values should be	 * restored. Because the MMU is on the processor go to read the memory	 * to get the register values. But the true value is save in the cache.	 * In this case the restored value is corrupted and can crash VxWorks.	 * To avoid memory this coherency problem, the data cache should be	 * locked.	 */	/*	 * The special purpose register SPRG1 contains the real interrupt	 * nesting count.  It is initialized in wintIntStackSet() in windALib.s.	 * Because intCnt is faked in windTickAnnounce(), intCnt cannot be	 * used for checking of nested interrupts to switch stack.	 */	mfspr	p0, SPRG1		/* get nested count */	addi	p0, p0, 1		/* increment nested count */	mtspr	SPRG1, p0		/* update nested count */	/*	 * The PowerPC Familly doesn't support the notion of interrupt	 * stack. To avoid to add the size of interrupt stack in each task	 * stack this stub switch from the task stack to the interrupt stack	 * by changing the value of the SP(R1).	 * This switch should be performed only if the interrupt is not	 * a nested interrupt.	 */	cmpwi	p0, 1			/* test for nested interrput */	beq	intCrtStackSwitch	/* no, switch to interrupt stack */	stwu	sp, -_PPC_ESF_STK_SIZE(sp)	/* we already use int. stack */	b	intCrtSaveCritical		/* save old stack pointer */intCrtStackSwitch:	addi	p0, sp, 0			/* save current SP to P0 */	lis	sp, HIADJ(vxIntStackBase)	/* load int. stack base addr. */	lwz	sp, LO(vxIntStackBase)(sp)  	/* in SP */	stwu	p0,  -_PPC_ESF_STK_SIZE(sp)	/* carve stack */intCrtSaveCritical:	stw	p1, _PPC_ESF_CR(sp)		/* save CR */	mfspr	p0, CRIT_SAVE_PC		/* load P0 with CRIT_SAVE_PC */	stw	p0, _PPC_ESF_PC(sp)		/* save in ESF */	mfspr	p1, CRIT_SAVE_MSR		/* load P1 with CRIT_SAVE_MSR */	stw	p1, _PPC_ESF_MSR(sp)		/* save in ESF */#if	(CPU == PPC405F)        /*         * we need the following fix for certain versions of PPC405F         */# ifdef PPC405F_ERRATA_18        mfspr   p0, LR                          /* save current LR */	stw	p0, _PPC_ESF_LR(sp)        bl      fpCrtfix	                /* handle fix */        lwz	p0, _PPC_ESF_LR(sp)        mtspr   LR, p0				/* restore current LR */# endif	/* PPC405F_ERRATA_18 */#endif	/* (CPU == PPC405F) */	mfspr	p0, SPRG2			/* load saved LR to P0 */	stw	p0, _PPC_ESF_LR(sp)		/* save LR */	mfspr	p0, SPRG3			/* reload saved P0 */	stw	p0, _PPC_ESF_P0(sp) 		/* save P0 to the stack */	lwz	p1, 0(sp)			/* load stack were was saved */	lwz	p1, -4(p1)			/* SPRG3 and restore SPRG3 */	mtspr	SPRG3, p1			/* prior to the CE exception */	mfspr	p1, SPRG0			/* reload saved P1 */	stw	p1, _PPC_ESF_P1(sp) 		/* save P1 to the stack */        lis     p0, HIADJ(errno)        lwz     p0, LO(errno)(p0)		/* load ERRNO to P0 and */	stw	p0, _PPC_ESF_ERRNO(sp)		/* save ERRNO to the stack */        lis     p1, HIADJ(intCnt)        lwz     p0, LO(intCnt)(p1)		/* load intCnt value */        addi    p0, p0, 1			/* increment intCnt value */        stw     p0, LO(intCnt)(p1)		/* save new intCnt value */	/*	 * At this point we should restore the preexisting MMU state, so that	 * the exception handler can run in the correct memory protection and	 * caching context. This requires fixing SPR 78819 & 78780.	 *	 * SPR 78738 also needs to be taken into account, to flush the items	 * just written to cache, to maintain coherency between MMU 'on' and	 * 'off' data.	 */	/*	 * The critical status are saved at this stage. The interrupt should	 * be enabled as soon as possible to reduce the interrupt latency.	 * However, there is only one mask bit on PowerPC. It is at the	 * interrupt controller level to set mask for each individual	 * interrupt.	 * Thus, we save task's register first, then call interrupt controller	 * routine to decide if the interrupt should be re-enabled or not.	 */	mfspr	p0, XER				/* load XER to P0 */	stw	p0, _PPC_ESF_XER(sp)		/* save XER to the stack */	mfspr	p0, CTR				/* load CTR to P0 */	stw	p0, _PPC_ESF_CTR(sp)		/* save CTR to the stack */#if	FALSE	lwz	p0, 0(sp)	stw	p0, _PPC_ESF_SP(sp)#endif	/* FALSE */	stw	r0, _PPC_ESF_R0(sp)		/* save general register 0 */	stw	r2, _PPC_ESF_R2(sp)		/* save general register 2 */	stw	p2, _PPC_ESF_P2(sp) 		/* save general register 5 */	stw	p3, _PPC_ESF_P3(sp) 		/* save general register 6 */	stw	p4, _PPC_ESF_P4(sp) 		/* save general register 7 */	stw	p5, _PPC_ESF_P5(sp) 		/* save general register 8 */	stw	p6, _PPC_ESF_P6(sp) 		/* save general register 9 */	stw	p7, _PPC_ESF_P7(sp) 		/* save general register 10 */	stw	r11, _PPC_ESF_R11(sp)		/* save general register 11 */	stw	r12, _PPC_ESF_R12(sp)		/* save general register 12 */	stw	r13, _PPC_ESF_R13(sp)		/* save general register 13 */	/*	 * The callee should save the non volatile registers, thus they are	 * not saved here. An assembly routine should not use these registers	 * or follow the calling convention by saving them before using.	 */        blr					/* return */FUNC_END(intCrtEnt)/********************************************************************************* intCrtExit - exit a critical interrupt service routine** Check the kernel ready queue to determine if rescheduling is necessary.  If* no higher priority task has been readied, and no kernel work has been queued,* then we return to the interrupted task.** If rescheduling is necessary, the context of the interrupted task is saved in* its associated TCB with the PC, MSR in CRIT_SAVE_PC, CRIT_SAVE_MSR.* This routine must be branched to when exiting an interrupt service routine.* This normally happens automatically, from the stub built by excIntCrtConnect* (2).** This routine can NEVER be called from C.** INTERNAL* This routine must preserve all registers up until the context is saved,* so any registers that are used to check the queues must first be saved on* the stack.** At the call to reschedule the value of taskIdCurrent must be in p0.** SEE ALSO: excIntCrtConnect(2)* void intCrtExit ()*/FUNC_BEGIN(intCrtExit)	mfmsr	p0				/* read msr to p0 */#ifdef	_PPC_MSR_RI	RI_MASK (p0, p0)			/* mask RI bit */#endif	/* _PPC_MSR_RI */	INT_MASK(p0, p0)			/* clear EE and RI bit */	mtmsr	p0				/* DISABLE INTERRUPT */	isync					/* synchronize */#ifdef  WV_INSTRUMENTATION        /* windview instrumentation - BEGIN         * log event if work has been done in the interrupt handler.         */        lis     p6, HIADJ(evtAction)            /* is WindView on? */        lwz     p0, LO(evtAction)(p6)        cmpwi   p0, 0        beq     noActionIntCrtExit              /* if not, exit */        lis     p6, HIADJ(wvEvtClass)           /* is windview on? */        lwz     p4, LO(wvEvtClass)(p6)        lis     p1, HI(WV_CLASS_1_ON)        ori     p1, p1, LO(WV_CLASS_1_ON)        and     p6, p4, p1        cmpw    p1, p6        bne     trgCheckIntCrtExit              /* if not, go to trigger */        lis     p3, HIADJ(workQIsEmpty)         /* is work queue empty? */        lwz     p2, LO(workQIsEmpty)(p3)        cmpwi   p2, 0        beq     intCrtExitNoK        li      p0, EVENT_INT_EXIT_K        b       intCrtExitContintCrtExitNoK:        li      p0, EVENT_INT_EXIT              /* get event id */intCrtExitCont:        stwu    sp, -(FRAMEBASESZ+_STACK_ALIGN_SIZE)(sp) /* stack frame * /        mfspr   p6, LR                          /* read lr to p6 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -