⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cache-sh4.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002  Niibe Yutaka * Copyright (C) 2001 - 2007  Paul Mundt * Copyright (C) 2003  Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. */#include <linux/init.h>#include <linux/mm.h>#include <linux/io.h>#include <linux/mutex.h>#include <asm/mmu_context.h>#include <asm/cacheflush.h>/* * The maximum number of pages we support up to when doing ranged dcache * flushing. Anything exceeding this will simply flush the dcache in its * entirety. */#define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */static void __flush_dcache_segment_1way(unsigned long start,					unsigned long extent);static void __flush_dcache_segment_2way(unsigned long start,					unsigned long extent);static void __flush_dcache_segment_4way(unsigned long start,					unsigned long extent);static void __flush_cache_4096(unsigned long addr, unsigned long phys,			       unsigned long exec_offset);/* * This is initialised here to ensure that it is not placed in the BSS.  If * that were to happen, note that cache_init gets called before the BSS is * cleared, so this would get nulled out which would be hopeless. */static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =	(void (*)(unsigned long, unsigned long))0xdeadbeef;static void compute_alias(struct cache_info *c){	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;}static void __init emit_cache_params(void){	printk("PVR=%08x CVR=%08x PRR=%08x\n",		ctrl_inl(CCN_PVR),		ctrl_inl(CCN_CVR),		ctrl_inl(CCN_PRR));	printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",		boot_cpu_data.icache.ways,		boot_cpu_data.icache.sets,		boot_cpu_data.icache.way_incr);	printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",		boot_cpu_data.icache.entry_mask,		boot_cpu_data.icache.alias_mask,		boot_cpu_data.icache.n_aliases);	printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",		boot_cpu_data.dcache.ways,		boot_cpu_data.dcache.sets,		boot_cpu_data.dcache.way_incr);	printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",		boot_cpu_data.dcache.entry_mask,		boot_cpu_data.dcache.alias_mask,		boot_cpu_data.dcache.n_aliases);	/*	 * Emit Secondary Cache parameters if the CPU has a probed L2.	 */	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {		printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",			boot_cpu_data.scache.ways,			boot_cpu_data.scache.sets,			boot_cpu_data.scache.way_incr);		printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",			boot_cpu_data.scache.entry_mask,			boot_cpu_data.scache.alias_mask,			boot_cpu_data.scache.n_aliases);	}	if (!__flush_dcache_segment_fn)		panic("unknown number of cache ways\n");}/* * SH-4 has virtually indexed and physically tagged cache. */void __init p3_cache_init(void){	compute_alias(&boot_cpu_data.icache);	compute_alias(&boot_cpu_data.dcache);	compute_alias(&boot_cpu_data.scache);	switch (boot_cpu_data.dcache.ways) {	case 1:		__flush_dcache_segment_fn = __flush_dcache_segment_1way;		break;	case 2:		__flush_dcache_segment_fn = __flush_dcache_segment_2way;		break;	case 4:		__flush_dcache_segment_fn = __flush_dcache_segment_4way;		break;	default:		__flush_dcache_segment_fn = NULL;		break;	}	emit_cache_params();}/* * Write back the dirty D-caches, but not invalidate them. * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */void __flush_wback_region(void *start, int size){	unsigned long v;	unsigned long begin, end;	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)		& ~(L1_CACHE_BYTES-1);	for (v = begin; v < end; v+=L1_CACHE_BYTES) {		asm volatile("ocbwb	%0"			     : /* no output */			     : "m" (__m(v)));	}}/* * Write back the dirty D-caches and invalidate them. * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */void __flush_purge_region(void *start, int size){	unsigned long v;	unsigned long begin, end;	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)		& ~(L1_CACHE_BYTES-1);	for (v = begin; v < end; v+=L1_CACHE_BYTES) {		asm volatile("ocbp	%0"			     : /* no output */			     : "m" (__m(v)));	}}/* * No write back please */void __flush_invalidate_region(void *start, int size){	unsigned long v;	unsigned long begin, end;	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)		& ~(L1_CACHE_BYTES-1);	for (v = begin; v < end; v+=L1_CACHE_BYTES) {		asm volatile("ocbi	%0"			     : /* no output */			     : "m" (__m(v)));	}}/* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */void flush_icache_range(unsigned long start, unsigned long end){	flush_cache_all();}/* * Write back the D-cache and purge the I-cache for signal trampoline. * .. which happens to be the same behavior as flush_icache_range(). * So, we simply flush out a line. */void flush_cache_sigtramp(unsigned long addr){	unsigned long v, index;	unsigned long flags;	int i;	v = addr & ~(L1_CACHE_BYTES-1);	asm volatile("ocbwb	%0"		     : /* no output */		     : "m" (__m(v)));	index = CACHE_IC_ADDRESS_ARRAY |			(v & boot_cpu_data.icache.entry_mask);	local_irq_save(flags);	jump_to_P2();	for (i = 0; i < boot_cpu_data.icache.ways;	     i++, index += boot_cpu_data.icache.way_incr)		ctrl_outl(0, index);	/* Clear out Valid-bit */	back_to_P1();	wmb();	local_irq_restore(flags);}static inline void flush_cache_4096(unsigned long start,				    unsigned long phys){	unsigned long flags, exec_offset = 0;	/*	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.	 */	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||	    (start < CACHE_OC_ADDRESS_ARRAY))		exec_offset = 0x20000000;	local_irq_save(flags);	__flush_cache_4096(start | SH_CACHE_ASSOC,			   P1SEGADDR(phys), exec_offset);	local_irq_restore(flags);}/* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */void flush_dcache_page(struct page *page){	if (test_bit(PG_mapped, &page->flags)) {		unsigned long phys = PHYSADDR(page_address(page));		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;		int i, n;		/* Loop all the D-cache */		n = boot_cpu_data.dcache.n_aliases;		for (i = 0; i < n; i++, addr += 4096)			flush_cache_4096(addr, phys);	}	wmb();}/* TODO: Selective icache invalidation through IC address array.. */static inline void flush_icache_all(void){	unsigned long flags, ccr;	local_irq_save(flags);	jump_to_P2();	/* Flush I-cache */	ccr = ctrl_inl(CCR);	ccr |= CCR_CACHE_ICI;	ctrl_outl(ccr, CCR);	/*	 * back_to_P1() will take care of the barrier for us, don't add	 * another one!	 */	back_to_P1();	local_irq_restore(flags);}void flush_dcache_all(void){	(*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);	wmb();}void flush_cache_all(void){	flush_dcache_all();	flush_icache_all();}static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,			     unsigned long end){	unsigned long d = 0, p = start & PAGE_MASK;	unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;	unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;	unsigned long select_bit;	unsigned long all_aliases_mask;	unsigned long addr_offset;	pgd_t *dir;	pmd_t *pmd;	pud_t *pud;	pte_t *pte;	int i;	dir = pgd_offset(mm, p);	pud = pud_offset(dir, p);	pmd = pmd_offset(pud, p);	end = PAGE_ALIGN(end);	all_aliases_mask = (1 << n_aliases) - 1;	do {		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {			p &= PMD_MASK;			p += PMD_SIZE;			pmd++;			continue;		}		pte = pte_offset_kernel(pmd, p);		do {			unsigned long phys;			pte_t entry = *pte;			if (!(pte_val(entry) & _PAGE_PRESENT)) {				pte++;				p += PAGE_SIZE;				continue;			}			phys = pte_val(entry) & PTE_PHYS_MASK;			if ((p ^ phys) & alias_mask) {				d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);				d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);				if (d == all_aliases_mask)					goto loop_exit;			}			pte++;			p += PAGE_SIZE;		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));		pmd++;	} while (p < end);loop_exit:	addr_offset = 0;	select_bit = 1;	for (i = 0; i < n_aliases; i++) {		if (d & select_bit) {			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);			wmb();		}		select_bit <<= 1;		addr_offset += PAGE_SIZE;	}}/* * Note : (RPC) since the caches are physically tagged, the only point * of flush_cache_mm for SH-4 is to get rid of aliases from the * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that * lines can stay resident so long as the virtual address they were * accessed with (hence cache set) is in accord with the physical * address (i.e. tag).  It's no different here.  So I reckon we don't * need to flush the I-cache, since aliases don't matter for that.  We * should try that. * * Caller takes mm->mmap_sem. */void flush_cache_mm(struct mm_struct *mm){	/*	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since	 * the cache is physically tagged, the data can just be left in there.	 */	if (boot_cpu_data.dcache.n_aliases == 0)		return;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -