⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cache-sh4.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	/*	 * Don't bother groveling around the dcache for the VMA ranges	 * if there are too many PTEs to make it worthwhile.	 */	if (mm->nr_ptes >= MAX_DCACHE_PAGES)		flush_dcache_all();	else {		struct vm_area_struct *vma;		/*		 * In this case there are reasonably sized ranges to flush,		 * iterate through the VMA list and take care of any aliases.		 */		for (vma = mm->mmap; vma; vma = vma->vm_next)			__flush_cache_mm(mm, vma->vm_start, vma->vm_end);	}	/* Only touch the icache if one of the VMAs has VM_EXEC set. */	if (mm->exec_vm)		flush_icache_all();}/* * Write back and invalidate I/D-caches for the page. * * ADDR: Virtual Address (U0 address) * PFN: Physical page number */void flush_cache_page(struct vm_area_struct *vma, unsigned long address,		      unsigned long pfn){	unsigned long phys = pfn << PAGE_SHIFT;	unsigned int alias_mask;	alias_mask = boot_cpu_data.dcache.alias_mask;	/* We only need to flush D-cache when we have alias */	if ((address^phys) & alias_mask) {		/* Loop 4K of the D-cache */		flush_cache_4096(			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),			phys);		/* Loop another 4K of the D-cache */		flush_cache_4096(			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),			phys);	}	alias_mask = boot_cpu_data.icache.alias_mask;	if (vma->vm_flags & VM_EXEC) {		/*		 * Evict entries from the portion of the cache from which code		 * may have been executed at this address (virtual).  There's		 * no need to evict from the portion corresponding to the		 * physical address as for the D-cache, because we know the		 * kernel has never executed the code through its identity		 * translation.		 */		flush_cache_4096(			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),			phys);	}}/* * Write back and invalidate D-caches. * * START, END: Virtual Address (U0 address) * * NOTE: We need to flush the _physical_ page entry. * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */void flush_cache_range(struct vm_area_struct *vma, unsigned long start,		       unsigned long end){	/*	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since	 * the cache is physically tagged, the data can just be left in there.	 */	if (boot_cpu_data.dcache.n_aliases == 0)		return;	/*	 * Don't bother with the lookup and alias check if we have a	 * wide range to cover, just blow away the dcache in its	 * entirety instead. -- PFM.	 */	if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)		flush_dcache_all();	else		__flush_cache_mm(vma->vm_mm, start, end);	if (vma->vm_flags & VM_EXEC) {		/*		 * TODO: Is this required???  Need to look at how I-cache		 * coherency is assured when new programs are loaded to see if		 * this matters.		 */		flush_icache_all();	}}/* * flush_icache_user_range * @vma: VMA of the process * @page: page * @addr: U0 address * @len: length of the range (< page size) */void flush_icache_user_range(struct vm_area_struct *vma,			     struct page *page, unsigned long addr, int len){	flush_cache_page(vma, addr, page_to_pfn(page));	mb();}/** * __flush_cache_4096 * * @addr:  address in memory mapped cache array * @phys:  P1 address to flush (has to match tags if addr has 'A' bit *         set i.e. associative write) * @exec_offset: set to 0x20000000 if flush has to be executed from P2 *               region else 0x0 * * The offset into the cache array implied by 'addr' selects the * 'colour' of the virtual address range that will be flushed.  The * operation (purge/write-back) is selected by the lower 2 bits of * 'phys'. */static void __flush_cache_4096(unsigned long addr, unsigned long phys,			       unsigned long exec_offset){	int way_count;	unsigned long base_addr = addr;	struct cache_info *dcache;	unsigned long way_incr;	unsigned long a, ea, p;	unsigned long temp_pc;	dcache = &boot_cpu_data.dcache;	/* Write this way for better assembly. */	way_count = dcache->ways;	way_incr = dcache->way_incr;	/*	 * Apply exec_offset (i.e. branch to P2 if required.).	 *	 * FIXME:	 *	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence	 *	trashing exec_offset before it's been added on - why?  Hence	 *	"=&r" as a 'workaround'	 */	asm volatile("mov.l 1f, %0\n\t"		     "add   %1, %0\n\t"		     "jmp   @%0\n\t"		     "nop\n\t"		     ".balign 4\n\t"		     "1:  .long 2f\n\t"		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));	/*	 * We know there will be >=1 iteration, so write as do-while to avoid	 * pointless nead-of-loop check for 0 iterations.	 */	do {		ea = base_addr + PAGE_SIZE;		a = base_addr;		p = phys;		do {			*(volatile unsigned long *)a = p;			/*			 * Next line: intentionally not p+32, saves an add, p			 * will do since only the cache tag bits need to			 * match.			 */			*(volatile unsigned long *)(a+32) = p;			a += 64;			p += 64;		} while (a < ea);		base_addr += way_incr;	} while (--way_count != 0);}/* * Break the 1, 2 and 4 way variants of this out into separate functions to * avoid nearly all the overhead of having the conditional stuff in the function * bodies (+ the 1 and 2 way cases avoid saving any registers too). */static void __flush_dcache_segment_1way(unsigned long start,					unsigned long extent_per_way){	unsigned long orig_sr, sr_with_bl;	unsigned long base_addr;	unsigned long way_incr, linesz, way_size;	struct cache_info *dcache;	register unsigned long a0, a0e;	asm volatile("stc sr, %0" : "=r" (orig_sr));	sr_with_bl = orig_sr | (1<<28);	base_addr = ((unsigned long)&empty_zero_page[0]);	/*	 * The previous code aligned base_addr to 16k, i.e. the way_size of all	 * existing SH-4 D-caches.  Whilst I don't see a need to have this	 * aligned to any better than the cache line size (which it will be	 * anyway by construction), let's align it to at least the way_size of	 * any existing or conceivable SH-4 D-cache.  -- RPC	 */	base_addr = ((base_addr >> 16) << 16);	base_addr |= start;	dcache = &boot_cpu_data.dcache;	linesz = dcache->linesz;	way_incr = dcache->way_incr;	way_size = dcache->way_size;	a0 = base_addr;	a0e = base_addr + extent_per_way;	do {		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));		asm volatile("movca.l r0, @%0\n\t"			     "ocbi @%0" : : "r" (a0));		a0 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "ocbi @%0" : : "r" (a0));		a0 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "ocbi @%0" : : "r" (a0));		a0 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "ocbi @%0" : : "r" (a0));		asm volatile("ldc %0, sr" : : "r" (orig_sr));		a0 += linesz;	} while (a0 < a0e);}static void __flush_dcache_segment_2way(unsigned long start,					unsigned long extent_per_way){	unsigned long orig_sr, sr_with_bl;	unsigned long base_addr;	unsigned long way_incr, linesz, way_size;	struct cache_info *dcache;	register unsigned long a0, a1, a0e;	asm volatile("stc sr, %0" : "=r" (orig_sr));	sr_with_bl = orig_sr | (1<<28);	base_addr = ((unsigned long)&empty_zero_page[0]);	/* See comment under 1-way above */	base_addr = ((base_addr >> 16) << 16);	base_addr |= start;	dcache = &boot_cpu_data.dcache;	linesz = dcache->linesz;	way_incr = dcache->way_incr;	way_size = dcache->way_size;	a0 = base_addr;	a1 = a0 + way_incr;	a0e = base_addr + extent_per_way;	do {		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1" : :			     "r" (a0), "r" (a1));		a0 += linesz;		a1 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1" : :			     "r" (a0), "r" (a1));		a0 += linesz;		a1 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1" : :			     "r" (a0), "r" (a1));		a0 += linesz;		a1 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1" : :			     "r" (a0), "r" (a1));		asm volatile("ldc %0, sr" : : "r" (orig_sr));		a0 += linesz;		a1 += linesz;	} while (a0 < a0e);}static void __flush_dcache_segment_4way(unsigned long start,					unsigned long extent_per_way){	unsigned long orig_sr, sr_with_bl;	unsigned long base_addr;	unsigned long way_incr, linesz, way_size;	struct cache_info *dcache;	register unsigned long a0, a1, a2, a3, a0e;	asm volatile("stc sr, %0" : "=r" (orig_sr));	sr_with_bl = orig_sr | (1<<28);	base_addr = ((unsigned long)&empty_zero_page[0]);	/* See comment under 1-way above */	base_addr = ((base_addr >> 16) << 16);	base_addr |= start;	dcache = &boot_cpu_data.dcache;	linesz = dcache->linesz;	way_incr = dcache->way_incr;	way_size = dcache->way_size;	a0 = base_addr;	a1 = a0 + way_incr;	a2 = a1 + way_incr;	a3 = a2 + way_incr;	a0e = base_addr + extent_per_way;	do {		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "movca.l r0, @%2\n\t"			     "movca.l r0, @%3\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1\n\t"			     "ocbi @%2\n\t"			     "ocbi @%3\n\t" : :			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));		a0 += linesz;		a1 += linesz;		a2 += linesz;		a3 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "movca.l r0, @%2\n\t"			     "movca.l r0, @%3\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1\n\t"			     "ocbi @%2\n\t"			     "ocbi @%3\n\t" : :			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));		a0 += linesz;		a1 += linesz;		a2 += linesz;		a3 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "movca.l r0, @%2\n\t"			     "movca.l r0, @%3\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1\n\t"			     "ocbi @%2\n\t"			     "ocbi @%3\n\t" : :			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));		a0 += linesz;		a1 += linesz;		a2 += linesz;		a3 += linesz;		asm volatile("movca.l r0, @%0\n\t"			     "movca.l r0, @%1\n\t"			     "movca.l r0, @%2\n\t"			     "movca.l r0, @%3\n\t"			     "ocbi @%0\n\t"			     "ocbi @%1\n\t"			     "ocbi @%2\n\t"			     "ocbi @%3\n\t" : :			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));		asm volatile("ldc %0, sr" : : "r" (orig_sr));		a0 += linesz;		a1 += linesz;		a2 += linesz;		a3 += linesz;	} while (a0 < a0e);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -