⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cachealib.s

📁 VxWorks BSP框架源代码包含头文件和驱动
💻 S
📖 第 1 页 / 共 5 页
字号:
	BX	lr#else	MOV	pc, lr#endif#endif /* ARMCACHE == SA110,1100,1500,920T,926E,940T,946E,XSCALE,1020E,1022E *//********************************************************************************* cacheArchPipeFlush - drain Write Buffer (ARM)** This routine drains the write-buffer.** NOMANUAL** RETURNS: N/A** SEE ALSO:* .I "ARM Architecture Reference"** void cacheArchPipeFlush (void)*/_ARM_FUNCTION_CALLED_FROM_C(cacheArchPipeFlush)#if ((ARMCACHE == ARMCACHE_SA110)  || (ARMCACHE == ARMCACHE_SA1100) || \     (ARMCACHE == ARMCACHE_SA1500) || (ARMCACHE == ARMCACHE_920T)   || \     (ARMCACHE == ARMCACHE_926E)   || (ARMCACHE == ARMCACHE_946E)   || \     (ARMCACHE == ARMCACHE_XSCALE) || (ARMCACHE == ARMCACHE_1020E)  || \     (ARMCACHE == ARMCACHE_1022E))        /* On later cache/MMUs there is an explicit MMU op to do this */#if ((ARMCACHE == ARMCACHE_920T)   || (ARMCACHE == ARMCACHE_926E)   || \     (ARMCACHE == ARMCACHE_946E)   || (ARMCACHE == ARMCACHE_1020E)  || \     (ARMCACHE == ARMCACHE_1022E))        MOV     r0, #0                          /* Data SBZ */#endif        MCR     CP_MMU, 0, r0, c7, c10, 4       /* Drain write-buffer */#if ((ARMCACHE == ARMCACHE_1020E) && ARMCACHE_1020E_REV0_MCR_CP15)        NOP        NOP#endif /* ((ARMCACHE == ARMCACHE_1020E) && ARMCACHE_1020E_REV0_MCR_CP15) */#endif /* (ARMCACHE == ARMCACHE_SA*,920T,926E,946E,XSCALE,1020E,1022E) */#if ((ARMCACHE == ARMCACHE_710A) || (ARMCACHE == ARMCACHE_720T) || \     (ARMCACHE == ARMCACHE_740T) || (ARMCACHE == ARMCACHE_810))	/*	 * On 710A/810, there is no explicit MMU op to do this. However	 * a SWPB (read-lock-write instruction) always acts as an	 * unbuffered write. Strict ordering of read and write operations	 * is preserved on 710A/810/740T/720T/710T, so any external read e.g.	 * uncacheable read or unbuffered write will stall until the	 * write buffer has drained.	 */	LDR	r0, L$_cacheSwapVar		/* R0 -> FUNC(_cacheSwapVar) */	SWPB	r1, r1, [r0]#endif#if (ARMCACHE == ARMCACHE_926E)        /*         * On ARM 926E, we have already drained the write-buffer itself above         * via the MMU co-proc to do this. However, in addition to this, at         * the sort of times that we drain the write-buffer, we also may need         * to synchronise the data and instruction streams in Level 2 AHB         * subsystems after draining the write-buffer.         */        LDR     r1, L$_sysCacheUncachedAdrs        LDR     r1, [r1]                /* R1 -> uncached area */        LDR     r1, [r1]                /* synchronise I and D streams */#endif /* (ARMCACHE == ARMCACHE_926E) */#if (ARMCACHE == ARMCACHE_940T)	/*	 * On 940T, there is no explicit MMU op to do this and the	 * special pleading in the 710A/810 etc. is also absent. However,	 * any read from an uncached area will stall until the	 * write-buffer has been drained.	 */	LDR	r0, L$_sysCacheUncachedAdrs	LDR	r0, [r0]		/* R0 -> uncached area */	LDR	r0, [r0]		/* drain write-buffer */#endif /* (ARMCACHE == ARMCACHE_940T) */#if ((ARMCACHE != ARMCACHE_920T)   && (ARMCACHE != ARMCACHE_926E)   && \     (ARMCACHE != ARMCACHE_946E)   && (ARMCACHE != ARMCACHE_1020E)  && \     (ARMCACHE != ARMCACHE_1022E))        /* Already done it above on these caches, before MMU op */        MOV     r0, #OK                 /* should return STATUS, SPR #22258 */#endif /* (ARMCACHE != ARMCACHE_920T,926E,946E,1020E,1022E) */#if (ARM_THUMB)	BX	lr#else	MOV	pc, lr#endif#if ARMCACHE_NEEDS_IMB/********************************************************************************* cacheIMB - issue Instruction Memory Barrier (IMB) instruction (ARM)** This routine executes an Instruction Memory Barrier instruction to flush the* Prefetch Unit on the ARM810.** NOMANUAL** RETURNS: N/A** void cacheIMB (void)*/_ARM_FUNCTION_CALLED_FROM_C(cacheIMB)	STMFD	sp!, {lr}	/* The SWI will overwrite LR */	SWI	0xF00000#if (ARM_THUMB)	LDMFD	sp!, {lr}	BX	lr#else	LDMFD	sp!, {pc}#endif/********************************************************************************* cacheIMBRange - issue IMBRange instruction (ARM)** This routine executes an Instruction Memory Barrier Range instruction* to flush some of the Prefetch Unit on the ARM810.** NOMANUAL** RETURNS: N/A** void cacheIMBRange (INSTR * startAddr, INSTR * endAddr)*/_ARM_FUNCTION_CALLED_FROM_C(cacheIMBRange)	STMFD	sp!, {lr}	SWI	0xF00001#if (ARM_THUMB)	LDMFD	sp!, {lr}	BX	lr#else	LDMFD	sp!, {pc}#endif#endif /* ARMCACHE_NEEDS_IMB */#if ((ARMCACHE == ARMCACHE_926E)   || (ARMCACHE == ARMCACHE_946E)   || \     (ARMCACHE == ARMCACHE_1020E)  || (ARMCACHE == ARMCACHE_1022E))/********************************************************************************* cacheIdentify - identify type and size of cache(s) fitted (ARM)** This routine reads the MMU register to determine the type(s) and* size(s) of cache(s) fitted.** NOMANUAL** RETURNS: coded value indicating information about the cache(s).** UINT32 cacheIdentify (void)*/_ARM_FUNCTION_CALLED_FROM_C(cacheIdentify)	MRC	CP_MMU, 0, r0, c0, c0, 1	/* Return, with value read in R0 */#if (ARM_THUMB)	BX	lr#else	MOV	pc, lr#endif#endif /* (ARMCACHE == 926E,946E,1020E,1022E) */#if (ARMCACHE == ARMCACHE_XSCALE)/********************************************************************************* cacheIFetchNLock - fetch and lock instruction cache line** This routine will fetch and lock instruction cache line.** NOMANUAL** RETURNS: N/A** void cacheIFetchNLock*     (*     void *    addr    /@ virtual address to be locked @/*     )*/_ARM_FUNCTION_CALLED_FROM_C(cacheIFetchNLock)#if (ARMCACHE == ARMCACHE_XSCALE)        stmfd   sp!,{r0-r3}             /* save r0-r3 to stack */        LDR     r2, L$_cacheArchIntMask /* Get pointer to cacheArchIntMask */        LDR     r2, [r2]                /* get cacheArchIntMask */        MRS     r3, cpsr                /* Get CPSR */        ORR     r2, r3, r2              /* disable interrupts */        MSR     cpsr, r2        /*	 * The I-cache must be invalidated prior to locking down lines:         * invalidate the insruction cache and branch target buffer	 */	MCR     CP_MMU, 0, r0, c7, c5, 0        /* assure that CP15 update takes effect */        MRC     CP_MMU, 0, r1, c2, c0, 0 /* arbitrary read of CP15 */        MOV     r1, r1                   /* wait for it */        SUB     pc, pc, #4               /* branch to next instruction */        /*	 * The MMU is guaranteed to be updated at this point; the next	 * instruction will see the locked instruction TLB entry         */        /* the entry to lock is specified by the virtual address in R0 */	MCR     CP_MMU, 0, r0, c9, c1, 0 /* fetch and lock i-cache line */        /* assure that CP15 update takes effect */        MRC     CP_MMU, 0, r1, c2, c0, 0 /* arbitrary read of CP15 */        MOV     r1, r1                   /* wait for it */        SUB     pc, pc, #4               /* branch to next instruction */        /*	 * The MMU is guaranteed to be updated at this point; the next	 * instruction will see the locked instruction TLB entry         */        MSR     cpsr, r3                /* Restore interrupt state */        /* restore registers and return */        ldmfd   sp!,{r0-r3}#endif /* (ARMCACHE == ARMCACHE_XSCALE) */#if (ARM_THUMB)	BX      lr#else	MOV     pc, lr#endif/********************************************************************************* cacheIUnLock - unlock instruction cache** This routine will unlock instruction cache.** NOMANUAL** RETURNS: N/A** void cacheIUnLock*     (*     void*     )*/_ARM_FUNCTION_CALLED_FROM_C(cacheIUnLock)#if (ARMCACHE == ARMCACHE_XSCALE)        stmfd   sp!,{r0-r3}             /* save r0-r3 to stack */        LDR     r2, L$_cacheArchIntMask /* Get pointer to cacheArchIntMask */        LDR     r2, [r2]                /* get cacheArchIntMask */        MRS     r3, cpsr                /* Get CPSR */        ORR     r2, r3, r2              /* disable interrupts */        MSR     cpsr, r2	MCR     CP_MMU, 0, r0, c9, c1, 1 /* unlock i-cache */        /* assure that CP15 update takes effect */        MRC     CP_MMU, 0, r1, c2, c0, 0 /* arbitrary read of CP15 */        MOV     r1, r1                   /* wait for it */        SUB     pc, pc, #4               /* branch to next instruction */        /*	 * The MMU is guaranteed to be updated at this point; the next	 * instruction will see the locked instruction TLB entry         */        MSR     cpsr, r3                /* Restore interrupt state */        /* restore registers and return */        ldmfd   sp!,{r0-r3}#endif /* (ARMCACHE == ARMCACHE_XSCALE) */#if (ARM_THUMB)	BX      lr#else	MOV     pc, lr#endif/********************************************************************************* cacheDSetLockMode - set data cache lock register mode.** This routine will set data cache lock mode.** NOMANUAL** RETURNS: N/A** void cacheDSetLockMode*     (*     UINT32 lock_mode*     )*/_ARM_FUNCTION_CALLED_FROM_C(cacheDSetLockMode)#if (ARMCACHE == ARMCACHE_XSCALE)        stmfd   sp!,{r0-r3}             /* save r0-r3 to stack */        LDR     r2, L$_cacheArchIntMask /* Get pointer to cacheArchIntMask */        LDR     r2, [r2]                /* get cacheArchIntMask */        MRS     r3, cpsr                /* Get CPSR */        ORR     r2, r3, r2              /* disable interrupts */        MSR     cpsr, r2        /* drain pending loads and stores */	MCR     CP_MMU, 0, r0, c7, c10, 4 /* drain */        /*         * 0 = no locking occurs	 * 1 = any fill into the data cache while this bit is set gets locked in         */	MCR     CP_MMU, 0, r0, c9, c2, 0 /* lock d-cache */        /* assure that CP15 update takes effect */        MRC     CP_MMU, 0, r1, c2, c0, 0 /* arbitrary read of CP15 */        MOV     r1, r1                   /* wait for it */        SUB     pc, pc, #4               /* branch to next instruction */        /*	 * The MMU is guaranteed to be updated at this point; the next	 * instruction will see the locked instruction TLB entry         */        MSR     cpsr, r3                /* Restore interrupt state */        /* restore registers and return */        ldmfd   sp!,{r0-r3}#endif /* (ARMCACHE == ARMCACHE_XSCALE) */#if (ARM_THUMB)	BX      lr#else	MOV     pc, lr#endif/********************************************************************************* cacheDLockRead - read data cache lock register mode

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -