⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c-r4k.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
	addr = INDEX_BASE + (addr & (dcache_size - 1));	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {		r4k_blast_dcache_page_indexed(addr);		if (exec && !cpu_icache_snoops_remote_store)			r4k_blast_scache_page_indexed(addr);	}	if (exec) {		if (cpu_has_vtag_icache) {			int cpu = smp_processor_id();			if (cpu_context(cpu, mm) != 0)				drop_mmu_context(mm, cpu);		} else			r4k_blast_icache_page_indexed(addr);	}}static void r4k_flush_cache_page(struct vm_area_struct *vma,	unsigned long addr, unsigned long pfn){	struct flush_cache_page_args args;	args.vma = vma;	args.addr = addr;	on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);}static inline void local_r4k_flush_data_cache_page(void * addr){	r4k_blast_dcache_page((unsigned long) addr);}static void r4k_flush_data_cache_page(unsigned long addr){	on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);}struct flush_icache_range_args {	unsigned long __user start;	unsigned long __user end;};static inline void local_r4k_flush_icache_range(void *args){	struct flush_icache_range_args *fir_args = args;	unsigned long dc_lsize = cpu_dcache_line_size();	unsigned long ic_lsize = cpu_icache_line_size();	unsigned long sc_lsize = cpu_scache_line_size();	unsigned long start = fir_args->start;	unsigned long end = fir_args->end;	unsigned long addr, aend;	if (!cpu_has_ic_fills_f_dc) {		if (end - start > dcache_size) {			r4k_blast_dcache();		} else {			R4600_HIT_CACHEOP_WAR_IMPL;			addr = start & ~(dc_lsize - 1);			aend = (end - 1) & ~(dc_lsize - 1);			while (1) {				/* Hit_Writeback_Inv_D */				protected_writeback_dcache_line(addr);				if (addr == aend)					break;				addr += dc_lsize;			}		}		if (!cpu_icache_snoops_remote_store) {			if (end - start > scache_size) {				r4k_blast_scache();			} else {				addr = start & ~(sc_lsize - 1);				aend = (end - 1) & ~(sc_lsize - 1);				while (1) {					/* Hit_Writeback_Inv_SD */					protected_writeback_scache_line(addr);					if (addr == aend)						break;					addr += sc_lsize;				}			}		}	}	if (end - start > icache_size)		r4k_blast_icache();	else {		addr = start & ~(ic_lsize - 1);		aend = (end - 1) & ~(ic_lsize - 1);		while (1) {			/* Hit_Invalidate_I */			protected_flush_icache_line(addr);			if (addr == aend)				break;			addr += ic_lsize;		}	}}static void r4k_flush_icache_range(unsigned long __user start,	unsigned long __user end){	struct flush_icache_range_args args;	args.start = start;	args.end = end;	on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);	instruction_hazard();}/* * Ok, this seriously sucks.  We use them to flush a user page but don't * know the virtual address, so we have to blast away the whole icache * which is significantly more expensive than the real thing.  Otoh we at * least know the kernel address of the page so we can flush it * selectivly. */struct flush_icache_page_args {	struct vm_area_struct *vma;	struct page *page;};static inline void local_r4k_flush_icache_page(void *args){	struct flush_icache_page_args *fip_args = args;	struct vm_area_struct *vma = fip_args->vma;	struct page *page = fip_args->page;	/*	 * Tricky ...  Because we don't know the virtual address we've got the	 * choice of either invalidating the entire primary and secondary	 * caches or invalidating the secondary caches also.  With the subset	 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the	 * secondary cache will result in any entries in the primary caches	 * also getting invalidated which hopefully is a bit more economical.	 */	if (cpu_has_subset_pcaches) {		unsigned long addr = (unsigned long) page_address(page);		r4k_blast_scache_page(addr);		ClearPageDcacheDirty(page);		return;	}	if (!cpu_has_ic_fills_f_dc) {		unsigned long addr = (unsigned long) page_address(page);		r4k_blast_dcache_page(addr);		if (!cpu_icache_snoops_remote_store)			r4k_blast_scache_page(addr);		ClearPageDcacheDirty(page);	}	/*	 * We're not sure of the virtual address(es) involved here, so	 * we have to flush the entire I-cache.	 */	if (cpu_has_vtag_icache) {		int cpu = smp_processor_id();		if (cpu_context(cpu, vma->vm_mm) != 0)			drop_mmu_context(vma->vm_mm, cpu);	} else		r4k_blast_icache();}static void r4k_flush_icache_page(struct vm_area_struct *vma,	struct page *page){	struct flush_icache_page_args args;	/*	 * If there's no context yet, or the page isn't executable, no I-cache	 * flush is needed.	 */	if (!(vma->vm_flags & VM_EXEC))		return;	args.vma = vma;	args.page = page;	on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);}#ifdef CONFIG_DMA_NONCOHERENTstatic void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size){	unsigned long end, a;	/* Catch bad driver code */	BUG_ON(size == 0);	if (cpu_has_subset_pcaches) {		unsigned long sc_lsize = cpu_scache_line_size();		if (size >= scache_size) {			r4k_blast_scache();			return;		}		a = addr & ~(sc_lsize - 1);		end = (addr + size - 1) & ~(sc_lsize - 1);		while (1) {			flush_scache_line(a);	/* Hit_Writeback_Inv_SD */			if (a == end)				break;			a += sc_lsize;		}		return;	}	/*	 * Either no secondary cache or the available caches don't have the	 * subset property so we have to flush the primary caches	 * explicitly	 */	if (size >= dcache_size) {		r4k_blast_dcache();	} else {		unsigned long dc_lsize = cpu_dcache_line_size();		R4600_HIT_CACHEOP_WAR_IMPL;		a = addr & ~(dc_lsize - 1);		end = (addr + size - 1) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a);	/* Hit_Writeback_Inv_D */			if (a == end)				break;			a += dc_lsize;		}	}	bc_wback_inv(addr, size);}static void r4k_dma_cache_inv(unsigned long addr, unsigned long size){	unsigned long end, a;	/* Catch bad driver code */	BUG_ON(size == 0);	if (cpu_has_subset_pcaches) {		unsigned long sc_lsize = cpu_scache_line_size();		if (size >= scache_size) {			r4k_blast_scache();			return;		}		a = addr & ~(sc_lsize - 1);		end = (addr + size - 1) & ~(sc_lsize - 1);		while (1) {			flush_scache_line(a);	/* Hit_Writeback_Inv_SD */			if (a == end)				break;			a += sc_lsize;		}		return;	}	if (size >= dcache_size) {		r4k_blast_dcache();	} else {		unsigned long dc_lsize = cpu_dcache_line_size();		R4600_HIT_CACHEOP_WAR_IMPL;		a = addr & ~(dc_lsize - 1);		end = (addr + size - 1) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a);	/* Hit_Writeback_Inv_D */			if (a == end)				break;			a += dc_lsize;		}	}	bc_inv(addr, size);}#endif /* CONFIG_DMA_NONCOHERENT *//* * While we're protected against bad userland addresses we don't care * very much about what happens in that case.  Usually a segmentation * fault will dump the process later on anyway ... */static void local_r4k_flush_cache_sigtramp(void * arg){	unsigned long ic_lsize = cpu_icache_line_size();	unsigned long dc_lsize = cpu_dcache_line_size();	unsigned long sc_lsize = cpu_scache_line_size();	unsigned long addr = (unsigned long) arg;	R4600_HIT_CACHEOP_WAR_IMPL;	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));	if (!cpu_icache_snoops_remote_store)		protected_writeback_scache_line(addr & ~(sc_lsize - 1));	protected_flush_icache_line(addr & ~(ic_lsize - 1));	if (MIPS4K_ICACHE_REFILL_WAR) {		__asm__ __volatile__ (			".set push\n\t"			".set noat\n\t"			".set mips3\n\t"#ifdef CONFIG_32BIT			"la	$at,1f\n\t"#endif#ifdef CONFIG_64BIT			"dla	$at,1f\n\t"#endif			"cache	%0,($at)\n\t"			"nop; nop; nop\n"			"1:\n\t"			".set pop"			:			: "i" (Hit_Invalidate_I));	}	if (MIPS_CACHE_SYNC_WAR)		__asm__ __volatile__ ("sync");}static void r4k_flush_cache_sigtramp(unsigned long addr){	on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);}static void r4k_flush_icache_all(void){	if (cpu_has_vtag_icache)		r4k_blast_icache();}static inline void rm7k_erratum31(void){	const unsigned long ic_lsize = 32;	unsigned long addr;	/* RM7000 erratum #31. The icache is screwed at startup. */	write_c0_taglo(0);	write_c0_taghi(0);	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {		__asm__ __volatile__ (			".set push\n\t"			".set noreorder\n\t"			".set mips3\n\t"			"cache\t%1, 0(%0)\n\t"			"cache\t%1, 0x1000(%0)\n\t"			"cache\t%1, 0x2000(%0)\n\t"			"cache\t%1, 0x3000(%0)\n\t"			"cache\t%2, 0(%0)\n\t"			"cache\t%2, 0x1000(%0)\n\t"			"cache\t%2, 0x2000(%0)\n\t"			"cache\t%2, 0x3000(%0)\n\t"			"cache\t%1, 0(%0)\n\t"			"cache\t%1, 0x1000(%0)\n\t"			"cache\t%1, 0x2000(%0)\n\t"			"cache\t%1, 0x3000(%0)\n\t"			".set pop\n"			:			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));	}}static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"};static void __init probe_pcache(void){	struct cpuinfo_mips *c = &current_cpu_data;	unsigned int config = read_c0_config();	unsigned int prid = read_c0_prid();	unsigned long config1;	unsigned int lsize;	switch (c->cputype) {	case CPU_R4600:			/* QED style two way caches? */	case CPU_R4700:	case CPU_R5000:	case CPU_NEVADA:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 2;		c->icache.waybit = ffs(icache_size/2) - 1;		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 2;		c->dcache.waybit= ffs(dcache_size/2) - 1;		c->options |= MIPS_CPU_CACHE_CDEX_P;		break;	case CPU_R5432:	case CPU_R5500:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 2;		c->icache.waybit= 0;		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 2;		c->dcache.waybit = 0;		c->options |= MIPS_CPU_CACHE_CDEX_P;		break;	case CPU_TX49XX:		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);		c->icache.ways = 4;		c->icache.waybit= 0;		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);		c->dcache.ways = 4;		c->dcache.waybit = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -