⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cache.s

📁 国产CPU-龙芯(loongson)BIOS源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
 * Assumtions: *      If a cache is not direct mapped, line size is 32. * *---------------------------------------------------------------------------- */LEAF(CPU_FlushICache)	lw	t0, CpuPrimaryInstSetSize	# Set size	addu	a1, 127				# Align for unrolling	and	a0, 0xffff80			# Align start address	addu	a0, CACHED_MEMORY_ADDR		# a0 now new KSEG0 address	srl	a1, a1, 7			# Number of unrolled loops1:	lw	v0, CpuNWayCache		# Cache properties	addiu	v0, -2				# <0 1way, 0 = two, >0 four	bgez	v0, 2f	addu	a1, -1	cache	IndexInvalidate_I, 16(a0)	# direct mapped	cache	IndexInvalidate_I, 48(a0)	cache	IndexInvalidate_I, 80(a0)	b	3f	cache	IndexInvalidate_I, 112(a0)2:	addu	t1, t0, a0			# Nway cache, flush set B.	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	beqz	v0, 3f				# Is two way do set A	addu	t1, t0				# else step to set C.	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	addu	t1, t0				# step to set D	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	addiu	v0, -2				# 0 = 4-way, >0 = 8-way	beqz	v0, 3f				# If just 4-way, to set A	addu	t1, t0				# else step to set E.	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	addu	t1, t0				# step to set F	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	addu	t1, t0				# step to set G	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)	addu	t1, t0				# step to set H	cache	IndexInvalidate_I, 0(t1)	cache	IndexInvalidate_I, 32(t1)	cache	IndexInvalidate_I, 64(t1)	cache	IndexInvalidate_I, 96(t1)3:	cache	IndexInvalidate_I, 0(a0)	# do set (A if NWay)	cache	IndexInvalidate_I, 32(a0)	cache	IndexInvalidate_I, 64(a0)	cache	IndexInvalidate_I, 96(a0)	bne	a1, zero, 1b	addu	a0, 128	j	ra	move	v0, zero		# suiword depends on this!!END(CPU_FlushICache)/*---------------------------------------------------------------------------- * * CPU_FlushDCache -- * *	void CPU_FlushDCache(addr, len) *		vm_offset_t addr, len; * *	Flush the L1 data cache for index range of at least *	addr to addr + len - 1. *	The address is reduced to a KSEG0 index to avoid TLB faults. *	 * Results: *	None. * * Side effects: *	The contents of the cache is written back to primary memory. *	The cache line is invalidated. * *---------------------------------------------------------------------------- */LEAF(CPU_FlushDCache)	lw	a2, CpuPrimaryDataSetSize	addiu	a3, a2, -1	and	a0, a3				# get cache index	addu	a0, CACHED_MEMORY_ADDR		# a0 now new KSEG0 address	andi	a3, a0, 127	addu	a1, a3				# compute extra size from	subu	a0, a3				# alignment of address	addiu	a1, 127	blt	a1, a2, 1f			# flushing more than cache?	srl	a1, a1, 7			# Compute number of cache lines	srl	a1, a2, 7			# no need for more than size!1:	lw	v0, CpuNWayCache	addiu	v0, -2				# <0 1way, 0 = two, >0 four	bgez	v0, 2f				# NWay.. go flush sets	addu	a1, -1	cache	IndexWBInvalidate_D, 16(a0)	# direct mapped, assume 16 byte 	cache	IndexWBInvalidate_D, 48(a0)	# linesize.	cache	IndexWBInvalidate_D, 80(a0)	b	3f	cache	IndexWBInvalidate_D, 112(a0)2:	addu	t1, a0, a2			# flush set B.	cache	IndexWBInvalidate_D, 0(t1)	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	lw      t5, CpuProcessorId	li      t6, (MIPS_GODSON1<<8)	beq     t5,t6,3f	nop	li      t6, (MIPS_GODSON2<<8)	bne     t5,t6,non_godson2_1	nop3:	cache	IndexWBInvalidate_D, 1(t1)	cache	IndexWBInvalidate_D, 33(t1)	cache	IndexWBInvalidate_D, 65(t1)	cache	IndexWBInvalidate_D, 97(t1)non_godson2_1:	beqz	v0, 3f				# Two way, do set A,	addu	t1, a2	cache	IndexWBInvalidate_D, 0(t1)	# do set C	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	addu	t1, a2				# do set D	cache	IndexWBInvalidate_D, 0(t1)	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	addiu	v0, -2				# 0 = 4-way, >0 8-way	bgez	v0, 3f				# Only 4-way... flush set A	addu	t1, a2	cache	IndexWBInvalidate_D, 0(t1)	# do set E	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	addu	t1, a2				# do set F	cache	IndexWBInvalidate_D, 0(t1)	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	addu	t1, a2				# do set G	cache	IndexWBInvalidate_D, 0(t1)	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)	addu	t1, a2				# do set H	cache	IndexWBInvalidate_D, 0(t1)	cache	IndexWBInvalidate_D, 32(t1)	cache	IndexWBInvalidate_D, 64(t1)	cache	IndexWBInvalidate_D, 96(t1)3:	cache	IndexWBInvalidate_D, 0(a0)	# do set A	cache	IndexWBInvalidate_D, 32(a0)	cache	IndexWBInvalidate_D, 64(a0)	cache	IndexWBInvalidate_D, 96(a0)	bne	a1, zero, 1b	addu	a0, 128	j	ra	nopEND(CPU_FlushDCache)/*---------------------------------------------------------------------------- * * CPU_HitFlushDCache -- * *	void CPU_HitFlushDCache(addr, len) *		vm_offset_t addr, len; * *	Flush data cache for range of addr to addr + len - 1. *	The address can be any valid viritual address as long *	as no TLB invalid traps occur. Only lines with matching *	addr are flushed. * *	Note: Use the CpuNWayCache flag to select 16 or 32 byte linesize. *	      All Nway cpu's now available have a fixed 32byte linesize. *	 * Results: *	None. * * Side effects: *	The contents of the L1 cache is written back to primary memory. *	The cache line is invalidated. * *---------------------------------------------------------------------------- */LEAF(CPU_HitFlushDCache)	mfc0	v1, COP_0_STATUS_REG		# Save the status register.	li	v0, SR_DIAG_DE	mtc0	v0, COP_0_STATUS_REG		# Disable interrupts	lw	v0, CpuNWayCache	beq	a1, zero, 3f			# size is zero!	addu	a1, 127				# Round up	addu	a1, a1, a0			# Add extra from address	and	a0, a0, -128			# align address	subu	a1, a1, a0	srl	a1, a1, 7			# Compute number of cache lines	addu	v0, -2				# Make negative is direct map.	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_L21:	bgez	v0, 2f	addu	a1, -1	cache	HitWBInvalidate_D, 16(a0)	# direct mapped, do 16 byte	cache	HitWBInvalidate_D, 48(a0)	# line size.	cache	HitWBInvalidate_D, 80(a0)	cache	HitWBInvalidate_D, 112(a0)2:	cache	HitWBInvalidate_D, 0(a0)	cache	HitWBInvalidate_D, 32(a0)	cache	HitWBInvalidate_D, 64(a0)	cache	HitWBInvalidate_D, 96(a0)	bne	a1, zero, 1b	addu	a0, 1283:	mtc0	v1, COP_0_STATUS_REG	# Restore the status register.	NOP10	j	ra	nopEND(CPU_HitFlushDCache)/*---------------------------------------------------------------------------- * * CPU_HitFlushSCache -- * *	void CPU_HitFlushSCache(addr, len) *		vm_offset_t addr, len; * *	Flush secondary cache for range of addr to addr + len - 1. *	The address can be any valid viritual address as long *	as no TLB invalid traps occur. Only lines with matching *	addr are flushed. * * Results: *	None. * * Side effects: *	The contents of the L2 cache is written back to primary memory. *	The cache line is invalidated. * *---------------------------------------------------------------------------- */LEAF(CPU_HitFlushSCache)	mfc0	v1, COP_0_STATUS_REG		# Save the status register.	li	v0, SR_DIAG_DE	mtc0	v0, COP_0_STATUS_REG		# Disable interrupts	beq	a1, zero, 3f			# size is zero!	addu	a1, 127				# Round up.	addu	a1, a1, a0			# Add in extra from align	and	a0, a0, -128			# Align address	subu	a1, a1, a0	srl	a1, a1, 7			# Compute number of cache lines1:	addu	a1, -1	cache	HitWBInvalidate_S, 0(a0)	cache	HitWBInvalidate_S, 32(a0)	cache	HitWBInvalidate_S, 64(a0)	cache	HitWBInvalidate_S, 96(a0)	bne	a1, zero, 1b	addu	a0, 1283:	mtc0	v1, COP_0_STATUS_REG	# Restore the status register.	NOP10	j	ra	nopEND(CPU_HitFlushSCache)/*---------------------------------------------------------------------------- * * CPU_HitInvalidateDCache -- * *	void CPU_HitInvalidateDCache(addr, len) *		vm_offset_t addr, len; * *	Invalidate data cache for range of addr to addr + len - 1. *	The address can be any valid address as long as no TLB misses occur. *	(Be sure to use cached K0SEG kernel addresses or mapped addresses) *	Only lines with matching addresses are invalidated. * *	Note: Use the CpuNWayCache flag to select 16 or 32 byte linesize. *	      All Nway cpu's now available have a fixed 32byte linesize. * * Results: *	None. * * Side effects: *	The L1 cache line is invalidated. * *---------------------------------------------------------------------------- */LEAF(CPU_HitInvalidateDCache)	mfc0	v1, COP_0_STATUS_REG		# Save the status register.	li	v0, SR_DIAG_DE	mtc0	v0, COP_0_STATUS_REG		# Disable interrupts	lw	v0, CpuNWayCache	beq	a1, zero, 3f			# size is zero!	addu	a1, 127				# Round up.	addu	a1, a1, a0			# Add in extra from align	and	a0, a0, -128			# Align address	subu	a1, a1, a0	srl	a1, a1, 7			# Compute number of cache lines	addu	v0, -2				# Make negative is direct map.	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_L21:	bgez	v0, 2f	addu	a1, -1	cache	HitInvalidate_D, 16(a0)		# direct mapped, do 16 byte 	cache	HitInvalidate_D, 48(a0)		# line size.	cache	HitInvalidate_D, 80(a0)	cache	HitInvalidate_D, 112(a0)2:	cache	HitInvalidate_D, 0(a0)	cache	HitInvalidate_D, 32(a0)	cache	HitInvalidate_D, 64(a0)	cache	HitInvalidate_D, 96(a0)	bne	a1, zero, 1b	addu	a0, 1283:	mtc0	v1, COP_0_STATUS_REG		# Restore the status register.	NOP10	j	ra	nopEND(CPU_HitInvalidateDCache)/*---------------------------------------------------------------------------- * * CPU_HitInvalidateSCache -- * *	void CPU_HitInvalidateSCache(addr, len) *		vm_offset_t addr, len; * *	Invalidate secondary cache for range of addr to addr + len - 1. *	The address can be any valid address as long as no TLB misses occur. *	(Be sure to use cached K0SEG kernel addresses or mapped addresses) *	Only lines with matching addresses are invalidated. * * Results: *	None. * * Side effects: *	The L2 cache line is invalidated. * *---------------------------------------------------------------------------- */LEAF(CPU_HitInvalidateSCache)	mfc0	v1, COP_0_STATUS_REG		# Save the status register.	li	v0, SR_DIAG_DE	mtc0	v0, COP_0_STATUS_REG		# Disable interrupts	beq	a1, zero, 3f			# size is zero!	addu	a1, 127				# Round up	addu	a1, a1, a0			# Add in extra from align	and	a0, a0, -128			# Align address	subu	a1, a1, a0	srl	a1, a1, 7			# Compute number of cache lines1:	addu	a1, -1		cache	HitInvalidate_S, 0(a0)	cache	HitInvalidate_S, 32(a0)	cache	HitInvalidate_S, 64(a0)	cache	HitInvalidate_S, 96(a0)	bne	a1, zero, 1b	addu	a0, 1283:	mtc0	v1, COP_0_STATUS_REG	# Restore the status register.	NOP10	j	ra	nopEND(CPU_HitInvalidateSCache)/*---------------------------------------------------------------------------- * * CPU_IOFlushDCache -- * *	void CPU_IOFlushDCache(addr, len, rw) *		vm_offset_t addr; *		int  len, rw; * *	Invalidate or flush data cache for range of addr to addr + len - 1. *	The address can be any valid address as long as no TLB misses occur. *	(Be sure to use cached K0SEG kernel addresses or mapped addresses) * *	In case of the existence of an external cache we invalidate pages *	which are in the given range ONLY if transfer direction is READ.  *	The assumption here is a 'write through' external cache which is *	true for all now supported processors. * * Results: *	None. * * Side effects: *	If rw == 0 (read), L1 cache is invalidated or flushed if the area *		does not match the alignment requirements. L2 and L3 cache *		is invalidated for the address range. *	If rw =! 0 (write), L1 cache is written back to memory. L2 cache *		is left alone if R4K or R5K otherwise written back. L3 *		cache is left alone (write through). * *---------------------------------------------------------------------------- */NON_LEAF(CPU_IOFlushDCache, STAND_FRAME_SIZE, ra)	subu	sp, STAND_FRAME_SIZE	sw	ra, STAND_RA_OFFSET(sp)	sw	a0, STAND_FRAME_SIZE(sp)	# save args	beqz	a2, FlushRD			# read operation	sw	a1, STAND_FRAME_SIZE+4(sp)# --- Flush for I/O Write --------	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_L2		# Have internal L2?	beqzl	t0, CPU_HitFlushDCache		# No flush L1	addu	sp, STAND_FRAME_SIZE	jal	CPU_HitFlushSCache		# Do internal L2 cache	nop					# L1 done in parallel	lw	a0, STAND_FRAME_SIZE(sp)	jal	CPU_HitFlushDCache		# Do any orphans in L1	lw	a1, STAND_FRAME_SIZE+4(sp)	b	FlushDone			# Any L3 is write through	lw	ra, STAND_RA_OFFSET(sp)		# no need to flush# --- Flush for I/O Read ---------FlushRD:	and	t0, a0, 127			# check if invalidate possible	bnez	t0, FlushRDWB			# both address and size must	nop	and	t0, a1, 127			# be aligned at the cache loop	bnez	t0, FlushRDWB			# unroll size	nop	lw	t0, CpuCacheType		# Aligned, do invalidate	and	t0, CTYPE_HAS_L2		# Have internal L2?	bnez	t0, FlushRDL2	nop	jal	CPU_HitInvalidateDCache		# External L2 or no L2. Do L1.	nop	b	FlushRDXL2	lw	ra, STAND_RA_OFFSET(sp)		# External L2 if presentFlushRDL2:	jal	CPU_HitInvalidateSCache		# Internal L2 cache	nop					# L1 done in parallel	lw	a0, STAND_FRAME_SIZE(sp)	jal	CPU_HitInvalidateDCache		# Do any orphans in L1	lw	a1, STAND_FRAME_SIZE+4(sp)	b	FlushRDL3	lw	ra, STAND_RA_OFFSET(sp)		# L3 invalidate if presentFlushRDWB:	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_L2		# Have internal L2?	bnez	t0, FlushRDWBL2			# Yes, do L2	nop	jal	CPU_HitFlushDCache	nop	b	FlushRDXL2	lw	ra, STAND_RA_OFFSET(sp)		# External L2 if presentFlushRDWBL2:	jal	CPU_HitFlushSCache		# Internal L2 cache	nop					# L1 done in parallel	lw	a0, STAND_FRAME_SIZE(sp)	jal	CPU_HitFlushDCache		# Do any orphans in L1	lw	a1, STAND_FRAME_SIZE+4(sp)	b	FlushRDL3	lw	ra, STAND_RA_OFFSET(sp)		# L3 invalidate if presentFlushRDXL2:	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_XL2		# Have external L2?	beqz	t0, FlushRDL3			# Nope.	lw	a0, STAND_FRAME_SIZE(sp)	lw	a1, STAND_FRAME_SIZE+4(sp)	and	a2, a0, 4095			# align on page size	subu	a0, a2	addu 	a1, a250:	blez	a1, FlushDone	subu	a1, 4096			# Fixed cache page size.	cache	InvalidateSecondaryPage, 0(a0)	b	50b	addu	a0, 4096FlushRDL3:	lw	t0, CpuCacheType	and	t0, CTYPE_HAS_XL3		# Have L3?	beqz	t0, FlushDone			# Nope.	lw	a0, STAND_FRAME_SIZE(sp)	lw	a1, STAND_FRAME_SIZE+4(sp)	and	a2, a0, 4095			# align on page size	subu	a0, a2	addu 	a1, a240:	blez	a1, FlushDone	subu	a1, 4096			# Fixed cache page size.	cache	InvalidateTertiaryPage, 0(a0)	b	40b	addu	a0, 4096FlushDone:	NOP10	j	ra	addu	sp, STAND_FRAME_SIZEEND(CPU_IOFlushDCache).set pop

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -