⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r4xx0.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		if(text)			blast_icache32_page_indexed(page);		blast_scache32_page_indexed(page);	} else		blast_scache32_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		if(text)			blast_icache32_page_indexed(page);		blast_scache64_page_indexed(page);	} else		blast_scache64_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,					    unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/* If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache32_page_indexed(page);		if(text)			blast_icache32_page_indexed(page);		blast_scache128_page_indexed(page);	} else		blast_scache128_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,					unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/* If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm == current->active_mm) {		blast_dcache16_page(page);		if(text)			blast_icache16_page(page);	} else {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache16_page_indexed(page);		if(text)			blast_icache16_page_indexed(page);	}out:	restore_flags(flags);}static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,					unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {		blast_dcache32_page(page);		if(text)			blast_icache32_page(page);	} else {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache32_page_indexed(page);		if(text)			blast_icache32_page_indexed(page);	}out:	restore_flags(flags);}static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,					      unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {		blast_dcache32_page(page);		if(text)			blast_icache32_page(page);	} else {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (dcache_size - 1)));		blast_dcache32_page_indexed(page);		blast_dcache32_page_indexed(page ^ dcache_waybit);		if(text) {			blast_icache32_page_indexed(page);			blast_icache32_page_indexed(page ^ icache_waybit);		}	}out:	restore_flags(flags);}/* If the addresses passed to these routines are valid, they are * either: * * 1) In KSEG0, so we can do a direct flush of the page. * 2) In KSEG2, and since every process can translate those *    addresses all the time in kernel mode we can do a direct *    flush. * 3) In KSEG1, no flush necessary. */static void r4k_flush_page_to_ram_s16d16i16(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache16_page(addr);	}}static void r4k_flush_page_to_ram_s32d16i16(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache32_page(addr);	}}static void r4k_flush_page_to_ram_s64d16i16(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache64_page(addr);	}}static void r4k_flush_page_to_ram_s128d16i16(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache128_page(addr);	}}static void r4k_flush_page_to_ram_s32d32i32(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache32_page(addr);	}}static void r4k_flush_page_to_ram_s64d32i32(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache64_page(addr);	}}static void r4k_flush_page_to_ram_s128d32i32(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		blast_scache128_page(addr);	}}static void r4k_flush_page_to_ram_d16i16(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {		unsigned long flags;#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		__save_and_cli(flags);		blast_dcache16_page(addr);		__restore_flags(flags);	}}static void r4k_flush_page_to_ram_d32i32(struct page * page){	unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;	if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {		unsigned long flags;#ifdef DEBUG_CACHE		printk("cram[%08lx]", addr);#endif		__save_and_cli(flags);		blast_dcache32_page(addr);		__restore_flags(flags);	}}/* * Writeback and invalidate the primary cache dcache before DMA. * * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D, * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only * operate correctly if the internal data cache refill buffer is empty.  These * CACHE instructions should be separated from any potential data cache miss * by a load instruction to an uncached address to empty the response buffer." * (Revision 2.0 device errata from IDT available on http://www.idt.com/ * in .pdf format.) */static voidr4k_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size){	unsigned long end, a;	unsigned int flags;	if (size >= dcache_size) {		flush_cache_all();	} else {		/* Workaround for R4600 bug.  See comment above. */		save_and_cli(flags);		*(volatile unsigned long *)KSEG1;		a = addr & ~(dc_lsize - 1);		end = (addr + size) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a); /* Hit_Writeback_Inv_D */			if (a == end) break;			a += dc_lsize;		}		restore_flags(flags);	}	bcops->bc_wback_inv(addr, size);}static voidr4k_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size){	unsigned long end, a;	if (size >= scache_size) {		flush_cache_all();		return;	}	a = addr & ~(sc_lsize - 1);	end = (addr + size) & ~(sc_lsize - 1);	while (1) {		flush_scache_line(a);	/* Hit_Writeback_Inv_SD */		if (a == end) break;		a += sc_lsize;	}}static voidr4k_dma_cache_inv_pc(unsigned long addr, unsigned long size){	unsigned long end, a;	unsigned int flags;	if (size >= dcache_size) {		flush_cache_all();	} else {		/* Workaround for R4600 bug.  See comment above. */		save_and_cli(flags);		*(volatile unsigned long *)KSEG1;		a = addr & ~(dc_lsize - 1);		end = (addr + size) & ~(dc_lsize - 1);		while (1) {			flush_dcache_line(a); /* Hit_Writeback_Inv_D */			if (a == end) break;			a += dc_lsize;		}		restore_flags(flags);	}	bcops->bc_inv(addr, size);}static voidr4k_dma_cache_inv_sc(unsigned long addr, unsigned long size){	unsigned long end, a;	if (size >= scache_size) {		flush_cache_all();		return;	}	a = addr & ~(sc_lsize - 1);	end = (addr + size) & ~(sc_lsize - 1);	while (1) {		flush_scache_line(a); /* Hit_Writeback_Inv_SD */		if (a == end) break;		a += sc_lsize;	}}static voidr4k_dma_cache_wback(unsigned long addr, unsigned long size){	panic("r4k_dma_cache called - should not happen.\n");}/* * While we're protected against bad userland addresses we don't care * very much about what happens in that case.  Usually a segmentation * fault will dump the process later on anyway ... */static void r4k_flush_cache_sigtramp(unsigned long addr){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -