⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r4xx0.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		blast_scache32_page_indexed(page);	} else		blast_scache32_page(page);out:	__restore_flags(flags);}static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		blast_scache64_page_indexed(page);	} else		blast_scache64_page(page);out:	__restore_flags(flags);}static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,					    unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		blast_scache128_page_indexed(page);	} else		blast_scache128_page(page);out:	__restore_flags(flags);}static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,					unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm == current->active_mm) {		blast_dcache16_page(page);	} else {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache16_page_indexed(page);	}out:	__restore_flags(flags);}static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,					unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {		blast_dcache32_page(page);	} else {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache32_page_indexed(page);	}out:	__restore_flags(flags);}static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,					      unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {		blast_dcache32_page(page);	} else {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache32_page_indexed(page);		blast_dcache32_page_indexed(page ^ dcache_waybit);	}out:	__restore_flags(flags);}/* If the addresses passed to these routines are valid, they are * either: * * 1) In KSEG0, so we can do a direct flush of the page. * 2) In KSEG2, and since every process can translate those *    addresses all the time in kernel mode we can do a direct *    flush. * 3) In KSEG1, no flush necessary. */static void r4k_flush_page_to_ram_s16(struct page *page){	blast_scache16_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_s32(struct page *page){	blast_scache32_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_s64(struct page *page){	blast_scache64_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_s128(struct page *page){	blast_scache128_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_d16(struct page *page){	blast_dcache16_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_d32(struct page *page){	blast_dcache32_page((unsigned long)page_address(page));}static void r4k_flush_page_to_ram_d32_r4600(struct page *page){	unsigned long flags;	__save_and_cli(flags);			/* For R4600 v1.7 bug.  */	blast_dcache32_page((unsigned long)page_address(page));	__restore_flags(flags);}static voidr4k_flush_icache_page_s(struct vm_area_struct *vma, struct page *page){	/*	 * We did an scache flush therefore PI is already clean.	 */}static voidr4k_flush_icache_range(unsigned long start, unsigned long end){	flush_cache_all();}/* * Ok, this seriously sucks.  We use them to flush a user page but don't * know the virtual address, so we have to blast away the whole icache * which is significantly more expensive than the real thing. */static voidr4k_flush_icache_page_p(struct vm_area_struct *vma, struct page *page){	if (!(vma->vm_flags & VM_EXEC))		return;	flush_cache_all();}/* * Writeback and invalidate the primary cache dcache before DMA. * * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D, * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only * operate correctly if the internal data cache refill buffer is empty.  These * CACHE instructions should be separated from any potential data cache miss * by a load instruction to an uncached address to empty the response buffer." * (Revision 2.0 device errata from IDT available on http://www.idt.com/ * in .pdf format.) */static voidr4k_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size){	unsigned long end, a;	unsigned int flags;	if (size >= dcache_size) {		flush_cache_all();	} else {		/* Workaround for R4600 bug.  See comment above. */		__save_and_cli(flags);		*(volatile unsigned long *)KSEG1;		a = addr & ~(dc_lsize - 1);		end = (addr + size) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a); /* Hit_Writeback_Inv_D */			if (a == end) break;			a += dc_lsize;		}		__restore_flags(flags);	}	bc_wback_inv(addr, size);}static voidr4k_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size){	unsigned long end, a;	if (size >= scache_size) {		flush_cache_all();		return;	}	a = addr & ~(sc_lsize - 1);	end = (addr + size) & ~(sc_lsize - 1);	while (1) {		flush_scache_line(a);	/* Hit_Writeback_Inv_SD */		if (a == end) break;		a += sc_lsize;	}}static voidr4k_dma_cache_inv_pc(unsigned long addr, unsigned long size){	unsigned long end, a;	unsigned int flags;	if (size >= dcache_size) {		flush_cache_all();	} else {		/* Workaround for R4600 bug.  See comment above. */		__save_and_cli(flags);		*(volatile unsigned long *)KSEG1;		a = addr & ~(dc_lsize - 1);		end = (addr + size) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a); /* Hit_Writeback_Inv_D */			if (a == end) break;			a += dc_lsize;		}		__restore_flags(flags);	}	bc_inv(addr, size);}static voidr4k_dma_cache_inv_sc(unsigned long addr, unsigned long size){	unsigned long end, a;	if (size >= scache_size) {		flush_cache_all();		return;	}	a = addr & ~(sc_lsize - 1);	end = (addr + size) & ~(sc_lsize - 1);	while (1) {		flush_scache_line(a); /* Hit_Writeback_Inv_SD */		if (a == end) break;		a += sc_lsize;	}}static voidr4k_dma_cache_wback(unsigned long addr, unsigned long size){	panic("r4k_dma_cache called - should not happen.\n");}/* * While we're protected against bad userland addresses we don't care * very much about what happens in that case.  Usually a segmentation * fault will dump the process later on anyway ... */static void r4k_flush_cache_sigtramp(unsigned long addr){	__asm__ __volatile__("nop;nop;nop;nop");	/* R4600 V1.7 */	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));	protected_flush_icache_line(addr & ~(ic_lsize - 1));}static void r4600v20k_flush_cache_sigtramp(unsigned long addr){	unsigned int flags;	__save_and_cli(flags);	/* Clear internal cache refill buffer */	*(volatile unsigned int *)KSEG1;	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));	protected_flush_icache_line(addr & ~(ic_lsize - 1));	__restore_flags(flags);}#undef DEBUG_TLB#undef DEBUG_TLBUPDATEvoid flush_tlb_all(void){	unsigned long flags;	unsigned long old_ctx;	int entry;#ifdef DEBUG_TLB	printk("[tlball]");#endif	__save_and_cli(flags);	/* Save old context and create impossible VPN2 value */	old_ctx = (get_entryhi() & 0xff);	set_entryhi(KSEG0);	set_entrylo0(0);	set_entrylo1(0);	BARRIER;	entry = get_wired();	/* Blast 'em all away. */	while(entry < mips_cpu.tlbsize) {		set_index(entry);		BARRIER;		tlb_write_indexed();		BARRIER;		entry++;	}	BARRIER;	set_entryhi(old_ctx);	__restore_flags(flags);}void flush_tlb_mm(struct mm_struct *mm){	if (mm->context != 0) {		unsigned long flags;#ifdef DEBUG_TLB		printk("[tlbmm<%d>]", mm->context);#endif		__save_and_cli(flags);		get_new_mmu_context(mm, asid_cache);		if (mm == current->active_mm)			set_entryhi(mm->context & 0xff);		__restore_flags(flags);	}}void flush_tlb_range(struct mm_struct *mm, unsigned long start,				unsigned long end){	if(mm->context != 0) {		unsigned long flags;		int size;#ifdef DEBUG_TLB		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),		       start, end);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -