⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cacheasm.h

📁 linux-2.6.15.6
💻 H
📖 第 1 页 / 共 2 页
字号:
	.endm/* *  Unlock entire instruction cache. * *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	icache_unlock_all	aa, ab#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE	//  Instruction cache unlock:	cache_index_all		iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab	icache_sync	\aa	//  End of instruction cache unlock#endif	.endm/***************************   DATA CACHE   ***************************//* *  Reset/initialize the data cache by simply invalidating it *  (need to unlock first also, if cache locking implemented): * *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	dcache_reset	aa, ab	dcache_unlock_all	\aa, \ab	dcache_invalidate_all	\aa, \ab	.endm/* * Synchronize after a data cache operation, * to be sure everything is in sync with memory as to be * expected following any previous data cache control operations. * * Parameters are: *	ar	an address register (temporary) (currently unused, but may be used in future) */	.macro	dcache_sync	ar#if XCHAL_DCACHE_SIZE > 0	//  This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:	//memw		// synchronize data cache changes relative to subsequent memory accesses	//isync		// be conservative and ISYNC as well (just to be sure)	dsync#endif	.endm/* * Synchronize after a data store operation, * to be sure the stored data is completely off the processor * (and assuming there is no buffering outside the processor, *  that the data is in memory).  This may be required to * ensure that the processor's write buffers are emptied. * A MEMW followed by a read guarantees this, by definition. * We also try to make sure the read itself completes. * * Parameters are: *	ar	an address register (temporary) */	.macro	write_sync	ar	memw			// ensure previous memory accesses are complete prior to subsequent memory accesses	l32i	\ar, sp, 0	// completing this read ensures any previous write has completed, because of MEMW	//slot	add	\ar, \ar, \ar	// use the result of the read to help ensure the read completes (in future architectures)	.endm/* *  Invalidate a single line of the data cache. *  Parameters are: *	ar	address register that contains (virtual) address to invalidate *		(may get clobbered in a future implementation, but not currently) *	offset	(optional) offset to add to \ar to compute effective address to invalidate *		(note: some number of lsbits are ignored) */	.macro	dcache_invalidate_line	ar, offset#if XCHAL_DCACHE_SIZE > 0	dhi	\ar, \offset	dcache_sync	\ar#endif	.endm/* *  Invalidate data cache entries that cache a specified portion of memory. *  Parameters are: *	astart	start address (register gets clobbered) *	asize	size of the region in bytes (register gets clobbered) *	ac	unique register used as temporary */	.macro	dcache_invalidate_region	astart, asize, ac#if XCHAL_DCACHE_SIZE > 0	//  Data cache region invalidation:	cache_hit_region	dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac	dcache_sync	\ac	//  End of data cache region invalidation#endif	.endm#if 0/* *  This is a work-around for a bug in SiChip1 (???). *  There should be a proper mechanism for not outputting *  these instructions when not needed. *  To enable work-around, uncomment this and replace 'dii' *  with 'dii_s1' everywhere, eg. in dcache_invalidate_all *  macro below. */	.macro	dii_s1	ar, offset	dii	\ar, \offset	or	\ar, \ar, \ar	or	\ar, \ar, \ar	or	\ar, \ar, \ar	or	\ar, \ar, \ar	.endm#endif/* *  Invalidate entire data cache. * *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	dcache_invalidate_all	aa, ab#if XCHAL_DCACHE_SIZE > 0	//  Data cache invalidation:	cache_index_all		dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab	dcache_sync	\aa	//  End of data cache invalidation#endif	.endm/* *  Writeback a single line of the data cache. *  Parameters are: *	ar	address register that contains (virtual) address to writeback *		(may get clobbered in a future implementation, but not currently) *	offset	offset to add to \ar to compute effective address to writeback *		(note: some number of lsbits are ignored) */	.macro	dcache_writeback_line	ar, offset#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK	dhwb	\ar, \offset	dcache_sync	\ar#endif	.endm/* *  Writeback dirty data cache entries that cache a specified portion of memory. *  Parameters are: *	astart	start address (register gets clobbered) *	asize	size of the region in bytes (register gets clobbered) *	ac	unique register used as temporary */	.macro	dcache_writeback_region		astart, asize, ac#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK	//  Data cache region writeback:	cache_hit_region	dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac	dcache_sync	\ac	//  End of data cache region writeback#endif	.endm/* *  Writeback entire data cache. *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	dcache_writeback_all	aa, ab#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK	//  Data cache writeback:	cache_index_all		diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab	dcache_sync	\aa	//  End of data cache writeback#endif	.endm/* *  Writeback and invalidate a single line of the data cache. *  Parameters are: *	ar	address register that contains (virtual) address to writeback and invalidate *		(may get clobbered in a future implementation, but not currently) *	offset	offset to add to \ar to compute effective address to writeback and invalidate *		(note: some number of lsbits are ignored) */	.macro	dcache_writeback_inv_line	ar, offset#if XCHAL_DCACHE_SIZE > 0	dhwbi	\ar, \offset	/* writeback and invalidate dcache line */	dcache_sync	\ar#endif	.endm/* *  Writeback and invalidate data cache entries that cache a specified portion of memory. *  Parameters are: *	astart	start address (register gets clobbered) *	asize	size of the region in bytes (register gets clobbered) *	ac	unique register used as temporary */	.macro	dcache_writeback_inv_region	astart, asize, ac#if XCHAL_DCACHE_SIZE > 0	//  Data cache region writeback and invalidate:	cache_hit_region	dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac	dcache_sync	\ac	//  End of data cache region writeback and invalidate#endif	.endm/* *  Writeback and invalidate entire data cache. *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	dcache_writeback_inv_all	aa, ab#if XCHAL_DCACHE_SIZE > 0	//  Data cache writeback and invalidate:#if XCHAL_DCACHE_IS_WRITEBACK	cache_index_all		diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab	dcache_sync	\aa#else /*writeback*/	//  Data cache does not support writeback, so just invalidate: */	dcache_invalidate_all	\aa, \ab#endif /*writeback*/	//  End of data cache writeback and invalidate#endif	.endm/* *  Lock (prefetch & lock) a single line of the data cache. * *  Parameters are: *	ar	address register that contains (virtual) address to lock *		(may get clobbered in a future implementation, but not currently) *	offset	offset to add to \ar to compute effective address to lock *		(note: some number of lsbits are ignored) */	.macro	dcache_lock_line	ar, offset#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE	dpfl	\ar, \offset	/* prefetch and lock dcache line */	dcache_sync	\ar#endif	.endm/* *  Lock (prefetch & lock) a specified portion of memory into the data cache. *  Parameters are: *	astart	start address (register gets clobbered) *	asize	size of the region in bytes (register gets clobbered) *	ac	unique register used as temporary */	.macro	dcache_lock_region	astart, asize, ac#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE	//  Data cache region lock:	cache_hit_region	dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac	dcache_sync	\ac	//  End of data cache region lock#endif	.endm/* *  Unlock a single line of the data cache. * *  Parameters are: *	ar	address register that contains (virtual) address to unlock *		(may get clobbered in a future implementation, but not currently) *	offset	offset to add to \ar to compute effective address to unlock *		(note: some number of lsbits are ignored) */	.macro	dcache_unlock_line	ar, offset#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE	dhu	\ar, \offset	/* unlock dcache line */	dcache_sync	\ar#endif	.endm/* *  Unlock a specified portion of memory from the data cache. *  Parameters are: *	astart	start address (register gets clobbered) *	asize	size of the region in bytes (register gets clobbered) *	ac	unique register used as temporary */	.macro	dcache_unlock_region	astart, asize, ac#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE	//  Data cache region unlock:	cache_hit_region	dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac	dcache_sync	\ac	//  End of data cache region unlock#endif	.endm/* *  Unlock entire data cache. * *  Parameters: *	aa, ab		unique address registers (temporaries) */	.macro	dcache_unlock_all	aa, ab#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE	//  Data cache unlock:	cache_index_all		diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab	dcache_sync	\aa	//  End of data cache unlock#endif	.endm#endif /*XTENSA_CACHEASM_H*/

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -