⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c-mips32.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * MIPS32 CPU variant specific MMU/Cache routines. */#include <linux/config.h>#include <linux/init.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/mm.h>#include <asm/bootinfo.h>#include <asm/cpu.h>#include <asm/bcache.h>#include <asm/io.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/mmu_context.h>/* CP0 hazard avoidance. */#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \				     "nop; nop; nop; nop; nop; nop;\n\t" \				     ".set reorder\n\t")/* Primary cache parameters. */int icache_size, dcache_size; 			/* Size in bytes */int ic_lsize, dc_lsize;				/* LineSize in bytes *//* Secondary cache (if present) parameters. */unsigned int scache_size, sc_lsize;		/* Again, in bytes */#include <asm/cacheops.h>#include <asm/mips32_cache.h>#undef DEBUG_CACHE/* * Dummy cache handling routines for machines without boardcaches */static void no_sc_noop(void) {}static struct bcache_ops no_sc_ops = {	(void *)no_sc_noop, (void *)no_sc_noop,	(void *)no_sc_noop, (void *)no_sc_noop};struct bcache_ops *bcops = &no_sc_ops;static inline void mips32_flush_cache_all_sc(void){	unsigned long flags;	__save_and_cli(flags);	blast_dcache(); blast_icache(); blast_scache();	__restore_flags(flags);}static inline void mips32_flush_cache_all_pc(void){	unsigned long flags;	__save_and_cli(flags);	blast_dcache(); blast_icache();	__restore_flags(flags);}static voidmips32_flush_cache_range_sc(struct mm_struct *mm,			 unsigned long start,			 unsigned long end){	struct vm_area_struct *vma;	unsigned long flags;	if(mm->context == 0)		return;	start &= PAGE_MASK;#ifdef DEBUG_CACHE	printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif	vma = find_vma(mm, start);	if(vma) {		if(mm->context != current->mm->context) {			mips32_flush_cache_all_sc();		} else {			pgd_t *pgd;			pmd_t *pmd;			pte_t *pte;			__save_and_cli(flags);			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache_page(start);				start += PAGE_SIZE;			}			__restore_flags(flags);		}	}}static void mips32_flush_cache_range_pc(struct mm_struct *mm,				     unsigned long start,				     unsigned long end){	if(mm->context != 0) {		unsigned long flags;#ifdef DEBUG_CACHE		printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif		__save_and_cli(flags);		blast_dcache(); blast_icache();		__restore_flags(flags);	}}/* * On architectures like the Sparc, we could get rid of lines in * the cache created only by a certain context, but on the MIPS * (and actually certain Sparc's) we cannot. */static void mips32_flush_cache_mm_sc(struct mm_struct *mm){	if(mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		mips32_flush_cache_all_sc();	}}static void mips32_flush_cache_mm_pc(struct mm_struct *mm){	if(mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		mips32_flush_cache_all_pc();	}}static void mips32_flush_cache_page_sc(struct vm_area_struct *vma,				    unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache_page_indexed(page);		blast_scache_page_indexed(page);	} else		blast_scache_page(page);out:	__restore_flags(flags);}static void mips32_flush_cache_page_pc(struct vm_area_struct *vma,				    unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	__save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since Mips32 caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm == current->active_mm) {		blast_dcache_page(page);	} else {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache_page_indexed(page);	}out:	__restore_flags(flags);}/* If the addresses passed to these routines are valid, they are * either: * * 1) In KSEG0, so we can do a direct flush of the page. * 2) In KSEG2, and since every process can translate those *    addresses all the time in kernel mode we can do a direct *    flush. * 3) In KSEG1, no flush necessary. */static void mips32_flush_page_to_ram_sc(struct page *page){	blast_scache_page((unsigned long)page_address(page));}static void mips32_flush_page_to_ram_pc(struct page *page){	blast_dcache_page((unsigned long)page_address(page));}static voidmips32_flush_icache_page_s(struct vm_area_struct *vma, struct page *page){	/*	 * We did an scache flush therefore PI is already clean.	 */}static voidmips32_flush_icache_range(unsigned long start, unsigned long end){	flush_cache_all();}static voidmips32_flush_icache_page(struct vm_area_struct *vma, struct page *page){	int address;	if (!(vma->vm_flags & VM_EXEC))		return;	address = KSEG0 + ((unsigned long)page_address(page) & PAGE_MASK & (dcache_size - 1));	blast_icache_page_indexed(address);}/* * Writeback and invalidate the primary cache dcache before DMA. */static voidmips32_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size){	unsigned long end, a;	unsigned int flags;	if (size >= dcache_size) {		flush_cache_all();	} else {	        __save_and_cli(flags);		a = addr & ~(dc_lsize - 1);		end = (addr + size) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a); /* Hit_Writeback_Inv_D */			if (a == end) break;			a += dc_lsize;		}		__restore_flags(flags);	}	bc_wback_inv(addr, size);}static voidmips32_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -