⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cache.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * arch/sh64/mm/cache.c * * Original version Copyright (C) 2000, 2001  Paolo Alberelli * Second version Copyright (C) benedict.gaster@superh.com 2002 * Third version Copyright Richard.Curnow@superh.com 2003 * Hacks to third version Copyright (C) 2003 Paul Mundt *//****************************************************************************/#include <linux/config.h>#include <linux/init.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/threads.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/cache.h>#include <asm/tlb.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/pgalloc.h> /* for flush_itlb_range */#include <linux/proc_fs.h>/* This function is in entry.S */extern unsigned long switch_and_save_asid(unsigned long new_asid);/* Wired TLB entry for the D-cache */static unsigned long long dtlb_cache_slot;/** * sh64_cache_init() * * This is pretty much just a straightforward clone of the SH * detect_cpu_and_cache_system(). * * This function is responsible for setting up all of the cache * info dynamically as well as taking care of CPU probing and * setting up the relevant subtype data. * * FIXME: For the time being, we only really support the SH5-101 * out of the box, and don't support dynamic probing for things * like the SH5-103 or even cut2 of the SH5-101. Implement this * later! */int __init sh64_cache_init(void){	/*	 * First, setup some sane values for the I-cache.	 */	cpu_data->icache.ways		= 4;	cpu_data->icache.sets		= 256;	cpu_data->icache.linesz		= L1_CACHE_BYTES;	/*	 * FIXME: This can probably be cleaned up a bit as well.. for example,	 * do we really need the way shift _and_ the way_step_shift ?? Judging	 * by the existing code, I would guess no.. is there any valid reason	 * why we need to be tracking this around?	 */	cpu_data->icache.way_shift	= 13;	cpu_data->icache.entry_shift	= 5;	cpu_data->icache.set_shift	= 4;	cpu_data->icache.way_step_shift	= 16;	cpu_data->icache.asid_shift	= 2;	/*	 * way offset = cache size / associativity, so just don't factor in	 * associativity in the first place..	 */	cpu_data->icache.way_ofs	= cpu_data->icache.sets *					  cpu_data->icache.linesz;	cpu_data->icache.asid_mask	= 0x3fc;	cpu_data->icache.idx_mask	= 0x1fe0;	cpu_data->icache.epn_mask	= 0xffffe000;	cpu_data->icache.flags		= 0;	/*	 * Next, setup some sane values for the D-cache.	 *	 * On the SH5, these are pretty consistent with the I-cache settings,	 * so we just copy over the existing definitions.. these can be fixed	 * up later, especially if we add runtime CPU probing.	 *	 * Though in the meantime it saves us from having to duplicate all of	 * the above definitions..	 */	cpu_data->dcache		= cpu_data->icache;	/*	 * Setup any cache-related flags here	 */#if defined(CONFIG_DCACHE_WRITE_THROUGH)	set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));#elif defined(CONFIG_DCACHE_WRITE_BACK)	set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));#endif	/*	 * We also need to reserve a slot for the D-cache in the DTLB, so we	 * do this now ..	 */	dtlb_cache_slot			= sh64_get_wired_dtlb_entry();	return 0;}#ifdef CONFIG_DCACHE_DISABLED#define sh64_dcache_purge_all()					do { } while (0)#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)#define sh64_dcache_purge_kernel_range(start, end)		do { } while (0)#define sh64_dcache_wback_current_user_range(start, end)	do { } while (0)#endif/*##########################################################################*//* From here onwards, a rewrite of the implementation,   by Richard.Curnow@superh.com.   The major changes in this compared to the old version are;   1. use more selective purging through OCBP instead of using ALLOCO to purge      by natural replacement.  This avoids purging out unrelated cache lines      that happen to be in the same set.   2. exploit the APIs copy_user_page and clear_user_page better   3. be more selective about I-cache purging, in particular use invalidate_all      more sparingly.   *//*##########################################################################			       SUPPORT FUNCTIONS  ##########################################################################*//****************************************************************************//* The following group of functions deal with mapping and unmapping a temporary   page into the DTLB slot that have been set aside for our exclusive use. *//* In order to accomplish this, we use the generic interface for adding and   removing a wired slot entry as defined in arch/sh64/mm/tlb.c *//****************************************************************************/static unsigned long slot_own_flags;static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr){	local_irq_save(slot_own_flags);	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);}static inline void sh64_teardown_dtlb_cache_slot(void){	sh64_teardown_tlb_slot(dtlb_cache_slot);	local_irq_restore(slot_own_flags);}/****************************************************************************/#ifndef CONFIG_ICACHE_DISABLEDstatic void __inline__ sh64_icache_inv_all(void){	unsigned long long addr, flag, data;	unsigned int flags;	addr=ICCR0;	flag=ICCR0_ICI;	data=0;	/* Make this a critical section for safety (probably not strictly necessary.) */	local_irq_save(flags);	/* Without %1 it gets unexplicably wrong */	asm volatile("getcfg	%3, 0, %0\n\t"			"or	%0, %2, %0\n\t"			"putcfg	%3, 0, %0\n\t"			"synci"			: "=&r" (data)			: "0" (data), "r" (flag), "r" (addr));	local_irq_restore(flags);}static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end){	/* Invalidate range of addresses [start,end] from the I-cache, where	 * the addresses lie in the kernel superpage. */	unsigned long long ullend, addr, aligned_start;#if (NEFF == 32)	aligned_start = (unsigned long long)(signed long long)(signed long) start;#else#error "NEFF != 32"#endif	aligned_start &= L1_CACHE_ALIGN_MASK;	addr = aligned_start;#if (NEFF == 32)	ullend = (unsigned long long) (signed long long) (signed long) end;#else#error "NEFF != 32"#endif	while (addr <= ullend) {		asm __volatile__ ("icbi %0, 0" : : "r" (addr));		addr += L1_CACHE_BYTES;	}}static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr){	/* If we get called, we know that vma->vm_flags contains VM_EXEC.	   Also, eaddr is page-aligned. */	unsigned long long addr, end_addr;	unsigned long flags = 0;	unsigned long running_asid, vma_asid;	addr = eaddr;	end_addr = addr + PAGE_SIZE;	/* Check whether we can use the current ASID for the I-cache	   invalidation.  For example, if we're called via	   access_process_vm->flush_cache_page->here, (e.g. when reading from	   /proc), 'running_asid' will be that of the reader, not of the	   victim.	   Also, note the risk that we might get pre-empted between the ASID	   compare and blocking IRQs, and before we regain control, the	   pid->ASID mapping changes.  However, the whole cache will get	   invalidated when the mapping is renewed, so the worst that can	   happen is that the loop below ends up invalidating somebody else's	   cache entries.	*/	running_asid = get_asid();	vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);	if (running_asid != vma_asid) {		local_irq_save(flags);		switch_and_save_asid(vma_asid);	}	while (addr < end_addr) {		/* Worth unrolling a little */		asm __volatile__("icbi %0,  0" : : "r" (addr));		asm __volatile__("icbi %0, 32" : : "r" (addr));		asm __volatile__("icbi %0, 64" : : "r" (addr));		asm __volatile__("icbi %0, 96" : : "r" (addr));		addr += 128;	}	if (running_asid != vma_asid) {		switch_and_save_asid(running_asid);		local_irq_restore(flags);	}}/****************************************************************************/static void sh64_icache_inv_user_page_range(struct mm_struct *mm,			  unsigned long start, unsigned long end){	/* Used for invalidating big chunks of I-cache, i.e. assume the range	   is whole pages.  If 'start' or 'end' is not page aligned, the code	   is conservative and invalidates to the ends of the enclosing pages.	   This is functionally OK, just a performance loss. */	/* See the comments below in sh64_dcache_purge_user_range() regarding	   the choice of algorithm.  However, for the I-cache option (2) isn't	   available because there are no physical tags so aliases can't be	   resolved.  The icbi instruction has to be used through the user	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it	   would be cheaper to use the selective code for a large range than is	   possible with the D-cache.  Just assume 64 for now as a working	   figure.	   */	int n_pages;	if (!mm) return;	n_pages = ((end - start) >> PAGE_SHIFT);	if (n_pages >= 64) {		sh64_icache_inv_all();	} else {		unsigned long aligned_start;		unsigned long eaddr;		unsigned long after_last_page_start;		unsigned long mm_asid, current_asid;		unsigned long long flags = 0ULL;		mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;		current_asid = get_asid();		if (mm_asid != current_asid) {			/* Switch ASID and run the invalidate loop under cli */			local_irq_save(flags);			switch_and_save_asid(mm_asid);		}		aligned_start = start & PAGE_MASK;		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);		while (aligned_start < after_last_page_start) {			struct vm_area_struct *vma;			unsigned long vma_end;			vma = find_vma(mm, aligned_start);			if (!vma || (aligned_start <= vma->vm_end)) {				/* Avoid getting stuck in an error condition */				aligned_start += PAGE_SIZE;				continue;			}			vma_end = vma->vm_end;			if (vma->vm_flags & VM_EXEC) {				/* Executable */				eaddr = aligned_start;				while (eaddr < vma_end) {					sh64_icache_inv_user_page(vma, eaddr);					eaddr += PAGE_SIZE;				}			}			aligned_start = vma->vm_end; /* Skip to start of next region */		}		if (mm_asid != current_asid) {			switch_and_save_asid(current_asid);			local_irq_restore(flags);		}	}}static void sh64_icache_inv_user_small_range(struct mm_struct *mm,						unsigned long start, int len){	/* Invalidate a small range of user context I-cache, not necessarily	   page (or even cache-line) aligned. */	unsigned long long eaddr = start;	unsigned long long eaddr_end = start + len;	unsigned long current_asid, mm_asid;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -