⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c-r4k.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
	pmdp = pmd_offset(pudp, addr);	ptep = pte_offset(pmdp, addr);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		return;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {		if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {			r4k_blast_dcache_page(addr);			if (exec && !cpu_icache_snoops_remote_store)				r4k_blast_scache_page(addr);		}		if (exec)			r4k_blast_icache_page(addr);		return;	}	/*	 * Do indexed flush, too much work to get the (possible) TLB refills	 * to work correctly.	 */	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {		r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?					      paddr : addr);		if (exec && !cpu_icache_snoops_remote_store) {			r4k_blast_scache_page_indexed(paddr);		}	}	if (exec) {		if (cpu_has_vtag_icache) {			int cpu = smp_processor_id();			if (cpu_context(cpu, mm) != 0)				drop_mmu_context(mm, cpu);		} else			r4k_blast_icache_page_indexed(addr);	}}static void r4k_flush_cache_page(struct vm_area_struct *vma,	unsigned long addr, unsigned long pfn){	struct flush_cache_page_args args;	args.vma = vma;	args.addr = addr;	args.pfn = pfn;	r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);}static inline void local_r4k_flush_data_cache_page(void * addr){	r4k_blast_dcache_page((unsigned long) addr);}static void r4k_flush_data_cache_page(unsigned long addr){	r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);}struct flush_icache_range_args {	unsigned long start;	unsigned long end;};static inline void local_r4k_flush_icache_range(void *args){	struct flush_icache_range_args *fir_args = args;	unsigned long start = fir_args->start;	unsigned long end = fir_args->end;	if (!cpu_has_ic_fills_f_dc) {		if (end - start > dcache_size) {			r4k_blast_dcache();		} else {			R4600_HIT_CACHEOP_WAR_IMPL;			protected_blast_dcache_range(start, end);		}		if (!cpu_icache_snoops_remote_store && scache_size) {			if (end - start > scache_size)				r4k_blast_scache();			else				protected_blast_scache_range(start, end);		}	}	if (end - start > icache_size)		r4k_blast_icache();	else		protected_blast_icache_range(start, end);}static void r4k_flush_icache_range(unsigned long start, unsigned long end){	struct flush_icache_range_args args;	args.start = start;	args.end = end;	r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);	instruction_hazard();}/* * Ok, this seriously sucks.  We use them to flush a user page but don't * know the virtual address, so we have to blast away the whole icache * which is significantly more expensive than the real thing.  Otoh we at * least know the kernel address of the page so we can flush it * selectivly. */struct flush_icache_page_args {	struct vm_area_struct *vma;	struct page *page;};static inline void local_r4k_flush_icache_page(void *args){	struct flush_icache_page_args *fip_args = args;	struct vm_area_struct *vma = fip_args->vma;	struct page *page = fip_args->page;	/*	 * Tricky ...  Because we don't know the virtual address we've got the	 * choice of either invalidating the entire primary and secondary	 * caches or invalidating the secondary caches also.  With the subset	 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the	 * secondary cache will result in any entries in the primary caches	 * also getting invalidated which hopefully is a bit more economical.	 */	if (cpu_has_subset_pcaches) {		unsigned long addr = (unsigned long) page_address(page);		r4k_blast_scache_page(addr);		ClearPageDcacheDirty(page);		return;	}	if (!cpu_has_ic_fills_f_dc) {		unsigned long addr = (unsigned long) page_address(page);		r4k_blast_dcache_page(addr);		if (!cpu_icache_snoops_remote_store)			r4k_blast_scache_page(addr);		ClearPageDcacheDirty(page);	}	/*	 * We're not sure of the virtual address(es) involved here, so	 * we have to flush the entire I-cache.	 */	if (cpu_has_vtag_icache) {		int cpu = smp_processor_id();		if (cpu_context(cpu, vma->vm_mm) != 0)			drop_mmu_context(vma->vm_mm, cpu);	} else		r4k_blast_icache();}static void r4k_flush_icache_page(struct vm_area_struct *vma,	struct page *page){	struct flush_icache_page_args args;	/*	 * If there's no context yet, or the page isn't executable, no I-cache	 * flush is needed.	 */	if (!(vma->vm_flags & VM_EXEC))		return;	args.vma = vma;	args.page = page;	r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);}#ifdef CONFIG_DMA_NONCOHERENTstatic void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size){	/* Catch bad driver code */	BUG_ON(size == 0);	if (cpu_has_subset_pcaches) {		if (size >= scache_size)			r4k_blast_scache();		else			blast_scache_range(addr, addr + size);		return;	}	/*	 * Either no secondary cache or the available caches don't have the	 * subset property so we have to flush the primary caches	 * explicitly	 */	if (size >= dcache_size) {		r4k_blast_dcache();	} else {		R4600_HIT_CACHEOP_WAR_IMPL;		blast_dcache_range(addr, addr + size);	}	bc_wback_inv(addr, size);}static void r4k_dma_cache_inv(unsigned long addr, unsigned long size){	/* Catch bad driver code */	BUG_ON(size == 0);	if (cpu_has_subset_pcaches) {		if (size >= scache_size)			r4k_blast_scache();		else			blast_scache_range(addr, addr + size);		return;	}	if (size >= dcache_size) {		r4k_blast_dcache();	} else {		R4600_HIT_CACHEOP_WAR_IMPL;		blast_dcache_range(addr, addr + size);	}	bc_inv(addr, size);}#endif /* CONFIG_DMA_NONCOHERENT *//* * While we're protected against bad userland addresses we don't care * very much about what happens in that case.  Usually a segmentation * fault will dump the process later on anyway ... */static void local_r4k_flush_cache_sigtramp(void * arg){	unsigned long ic_lsize = cpu_icache_line_size();	unsigned long dc_lsize = cpu_dcache_line_size();	unsigned long sc_lsize = cpu_scache_line_size();	unsigned long addr = (unsigned long) arg;	R4600_HIT_CACHEOP_WAR_IMPL;	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));	if (!cpu_icache_snoops_remote_store && scache_size)		protected_writeback_scache_line(addr & ~(sc_lsize - 1));	protected_flush_icache_line(addr & ~(ic_lsize - 1));	if (MIPS4K_ICACHE_REFILL_WAR) {		__asm__ __volatile__ (			".set push\n\t"			".set noat\n\t"			".set mips3\n\t"#ifdef CONFIG_32BIT			"la	$at,1f\n\t"#endif#ifdef CONFIG_64BIT			"dla	$at,1f\n\t"#endif			"cache	%0,($at)\n\t"			"nop; nop; nop\n"			"1:\n\t"			".set pop"			:			: "i" (Hit_Invalidate_I));	}	if (MIPS_CACHE_SYNC_WAR)		__asm__ __volatile__ ("sync");}static void r4k_flush_cache_sigtramp(unsigned long addr){	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);}static void r4k_flush_icache_all(void){	if (cpu_has_vtag_icache)		r4k_blast_icache();}static inline void rm7k_erratum31(void){	const unsigned long ic_lsize = 32;	unsigned long addr;	/* RM7000 erratum #31. The icache is screwed at startup. */	write_c0_taglo(0);	write_c0_taghi(0);	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {		__asm__ __volatile__ (			".set push\n\t"			".set noreorder\n\t"			".set mips3\n\t"			"cache\t%1, 0(%0)\n\t"			"cache\t%1, 0x1000(%0)\n\t"			"cache\t%1, 0x2000(%0)\n\t"			"cache\t%1, 0x3000(%0)\n\t"			"cache\t%2, 0(%0)\n\t"			"cache\t%2, 0x1000(%0)\n\t"			"cache\t%2, 0x2000(%0)\n\t"			"cache\t%2, 0x3000(%0)\n\t"			"cache\t%1, 0(%0)\n\t"			"cache\t%1, 0x1000(%0)\n\t"			"cache\t%1, 0x2000(%0)\n\t"			"cache\t%1, 0x3000(%0)\n\t"			".set pop\n"			:			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));	}}static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"};static void __init probe_pcache(void){	struct cpuinfo_mips *c = &current_cpu_data;	unsigned int config = read_c0_config();	unsigned int prid = read_c0_prid();	unsigned long config1;	unsigned int lsize;	switch (c->cputype) {	case CPU_R4600:			/* QED style two way caches? */	case CPU_R4700:	case CPU_R5000:	case CPU_NEVADA:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 2;		c->icache.waybit = __ffs(icache_size/2);		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 2;		c->dcache.waybit= __ffs(dcache_size/2);		c->options |= MIPS_CPU_CACHE_CDEX_P;		break;	case CPU_R5432:	case CPU_R5500:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 2;		c->icache.waybit= 0;		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 2;		c->dcache.waybit = 0;		c->options |= MIPS_CPU_CACHE_CDEX_P;		break;	case CPU_TX49XX:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 4;		c->icache.waybit= 0;		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 4;		c->dcache.waybit = 0;		c->options |= MIPS_CPU_CACHE_CDEX_P;		c->options |= MIPS_CPU_PREFETCH;		break;	case CPU_R4000PC:	case CPU_R4000SC:	case CPU_R4000MC:	case CPU_R4400PC:	case CPU_R4400SC:	case CPU_R4400MC:	case CPU_R4300:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 1;		c->icache.waybit = 0; 	/* doesn't matter */		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 1;		c->dcache.waybit = 0;	/* does not matter */		c->options |= MIPS_CPU_CACHE_CDEX_P;		break;	case CPU_R10000:	case CPU_R12000:	case CPU_R14000:		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));		c->icache.linesz = 64;		c->icache.ways = 2;		c->icache.waybit = 0;		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));		c->dcache.linesz = 32;		c->dcache.ways = 2;		c->dcache.waybit = 0;		c->options |= MIPS_CPU_PREFETCH;		break;	case CPU_VR4133:		write_c0_config(config & ~CONF_EB);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -