⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r4xx0.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			text = vma->vm_flags & VM_EXEC;			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache64_page(start);				start += PAGE_SIZE;			}			restore_flags(flags);		}	}}static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm,					     unsigned long start,					     unsigned long end){	struct vm_area_struct *vma;	unsigned long flags;	if (mm->context == 0)		return;	start &= PAGE_MASK;#ifdef DEBUG_CACHE	printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif	vma = find_vma(mm, start);	if (vma) {		if (mm->context != current->active_mm->context) {			r4k_flush_cache_all_s128d16i16();		} else {			pgd_t *pgd;			pmd_t *pmd;			pte_t *pte;			int text;			save_and_cli(flags);			text = vma->vm_flags & VM_EXEC;			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache128_page(start);				start += PAGE_SIZE;			}			restore_flags(flags);		}	}}static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm,					    unsigned long start,					    unsigned long end){	struct vm_area_struct *vma;	unsigned long flags;	if (mm->context == 0)		return;	start &= PAGE_MASK;#ifdef DEBUG_CACHE	printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif	vma = find_vma(mm, start);	if (vma) {		if (mm->context != current->active_mm->context) {			r4k_flush_cache_all_s32d32i32();		} else {			pgd_t *pgd;			pmd_t *pmd;			pte_t *pte;			int text;			save_and_cli(flags);			text = vma->vm_flags & VM_EXEC;			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache32_page(start);				start += PAGE_SIZE;			}			restore_flags(flags);		}	}}static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm,					    unsigned long start,					    unsigned long end){	struct vm_area_struct *vma;	unsigned long flags;	if (mm->context == 0)		return;	start &= PAGE_MASK;#ifdef DEBUG_CACHE	printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif	vma = find_vma(mm, start);	if (vma) {		if (mm->context != current->active_mm->context) {			r4k_flush_cache_all_s64d32i32();		} else {			pgd_t *pgd;			pmd_t *pmd;			pte_t *pte;			int text;			save_and_cli(flags);			text = vma->vm_flags & VM_EXEC;			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache64_page(start);				start += PAGE_SIZE;			}			restore_flags(flags);		}	}}static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm,					     unsigned long start,					     unsigned long end){	struct vm_area_struct *vma;	unsigned long flags;	if (mm->context == 0)		return;	start &= PAGE_MASK;#ifdef DEBUG_CACHE	printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif	vma = find_vma(mm, start);	if (vma) {		if (mm->context != current->active_mm->context) {			r4k_flush_cache_all_s128d32i32();		} else {			pgd_t *pgd;			pmd_t *pmd;			pte_t *pte;			int text;			save_and_cli(flags);			text = vma->vm_flags & VM_EXEC;			while(start < end) {				pgd = pgd_offset(mm, start);				pmd = pmd_offset(pgd, start);				pte = pte_offset(pmd, start);				if(pte_val(*pte) & _PAGE_VALID)					blast_scache128_page(start);				start += PAGE_SIZE;			}			restore_flags(flags);		}	}}static void r4k_flush_cache_range_d16i16(struct mm_struct *mm,					 unsigned long start,					 unsigned long end){	if (mm->context != 0) {		unsigned long flags;#ifdef DEBUG_CACHE		printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif		save_and_cli(flags);		blast_dcache16(); blast_icache16();		restore_flags(flags);	}}static void r4k_flush_cache_range_d32i32(struct mm_struct *mm,					 unsigned long start,					 unsigned long end){	if (mm->context != 0) {		unsigned long flags;#ifdef DEBUG_CACHE		printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif		save_and_cli(flags);		blast_dcache32(); blast_icache32();		restore_flags(flags);	}}/* * On architectures like the Sparc, we could get rid of lines in * the cache created only by a certain context, but on the MIPS * (and actually certain Sparc's) we cannot. */static void r4k_flush_cache_mm_s16d16i16(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s16d16i16();	}}static void r4k_flush_cache_mm_s32d16i16(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s32d16i16();	}}static void r4k_flush_cache_mm_s64d16i16(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s64d16i16();	}}static void r4k_flush_cache_mm_s128d16i16(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s128d16i16();	}}static void r4k_flush_cache_mm_s32d32i32(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s32d32i32();	}}static void r4k_flush_cache_mm_s64d32i32(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s64d32i32();	}}static void r4k_flush_cache_mm_s128d32i32(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_s128d32i32();	}}static void r4k_flush_cache_mm_d16i16(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_d16i16();	}}static void r4k_flush_cache_mm_d32i32(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r4k_flush_cache_all_d32i32();	}}static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/* Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache16_page_indexed(page);		if(text)			blast_icache16_page_indexed(page);		blast_scache16_page_indexed(page);	} else		blast_scache16_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/* If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/* Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache16_page_indexed(page);		if(text)			blast_icache16_page_indexed(page);		blast_scache32_page_indexed(page);	} else		blast_scache32_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/* If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/* Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache16_page_indexed(page);		if(text)			blast_icache16_page_indexed(page);		blast_scache64_page_indexed(page);	} else		blast_scache64_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,					    unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_VALID))		goto out;	text = (vma->vm_flags & VM_EXEC);	/* Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if (mm->context != current->active_mm->context) {		/*		 * Do indexed flush, too much work to get the (possible)		 * tlb refills to work correctly.		 */		page = (KSEG0 + (page & (scache_size - 1)));		blast_dcache16_page_indexed(page);		if(text)			blast_icache16_page_indexed(page);		blast_scache128_page_indexed(page);	} else		blast_scache128_page(page);out:	restore_flags(flags);}static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,					   unsigned long page){	struct mm_struct *mm = vma->vm_mm;	unsigned long flags;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int text;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	save_and_cli(flags);	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -