⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r5432.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * r5432.c: NEC Vr5432 processor.  We cannot use r4xx0.c because of *      its unique way-selection method for indexed operations. * * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle (ralf@gnu.org) * Copyright (C) 2000 Jun Sun (jsun@mvista.com) * *//* * [jsun] * In a sense, this is really silly.  We cannot re-use r4xx0.c because * the lowest-level indexed cache operation does not take way-selection * into account.  So all what I am doing here is to copy all the funcs * and macros (in r4kcache.h) relavent to R5432 to this file, and then * modify a few indexed cache operations.  *sigh* */#include <linux/init.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/mm.h>#include <asm/bcache.h>#include <asm/io.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/bootinfo.h>#include <asm/mmu_context.h>/* CP0 hazard avoidance. */#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \				     "nop; nop; nop; nop; nop; nop;\n\t" \				     ".set reorder\n\t")#include <asm/asm.h>#include <asm/cacheops.h>#undef DEBUG_CACHE/* Primary cache parameters. */static int icache_size, dcache_size; /* Size in bytes */static int ic_lsize, dc_lsize;       /* LineSize in bytes *//* -------------------------------------------------------------------- *//* #include <asm/r4kcache.h> */extern inline void flush_icache_line_indexed(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n\t"		"cache %1, (%0)\n\t"		"cache %1, 1(%0)\n\t"		".set mips0\n\t"		".set reorder"		:		: "r" (addr),		  "i" (Index_Invalidate_I));}extern inline void flush_dcache_line_indexed(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n\t"		"cache %1, (%0)\n\t"		"cache %1, 1(%0)\n\t"		".set mips0\n\t"		".set reorder"		:		: "r" (addr),		  "i" (Index_Writeback_Inv_D));}extern inline void flush_icache_line(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n\t"		"cache %1, (%0)\n\t"		".set mips0\n\t"		".set reorder"		:		: "r" (addr),		  "i" (Hit_Invalidate_I));}extern inline void flush_dcache_line(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n\t"		"cache %1, (%0)\n\t"		".set mips0\n\t"		".set reorder"		:		: "r" (addr),		  "i" (Hit_Writeback_Inv_D));}extern inline void invalidate_dcache_line(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n\t"		"cache %1, (%0)\n\t"		".set mips0\n\t"		".set reorder"		:		: "r" (addr),		  "i" (Hit_Invalidate_D));}/* * The next two are for badland addresses like signal trampolines. */extern inline void protected_flush_icache_line(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n"		"1:\tcache %1,(%0)\n"		"2:\t.set mips0\n\t"		".set reorder\n\t"		".section\t__ex_table,\"a\"\n\t"		STR(PTR)"\t1b,2b\n\t"		".previous"		:		: "r" (addr),		  "i" (Hit_Invalidate_I));}extern inline void protected_writeback_dcache_line(unsigned long addr){	__asm__ __volatile__(		".set noreorder\n\t"		".set mips3\n"		"1:\tcache %1,(%0)\n"		"2:\t.set mips0\n\t"		".set reorder\n\t"		".section\t__ex_table,\"a\"\n\t"		STR(PTR)"\t1b,2b\n\t"		".previous"		:		: "r" (addr),		  "i" (Hit_Writeback_D));}#define cache32_unroll32(base,op)				\	__asm__ __volatile__("					\		.set noreorder;					\		.set mips3;					\		cache %1, 0x000(%0); cache %1, 0x020(%0);	\		cache %1, 0x040(%0); cache %1, 0x060(%0);	\		cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\		cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\		cache %1, 0x100(%0); cache %1, 0x120(%0);	\		cache %1, 0x140(%0); cache %1, 0x160(%0);	\		cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\		cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\		cache %1, 0x200(%0); cache %1, 0x220(%0);	\		cache %1, 0x240(%0); cache %1, 0x260(%0);	\		cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\		cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\		cache %1, 0x300(%0); cache %1, 0x320(%0);	\		cache %1, 0x340(%0); cache %1, 0x360(%0);	\		cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\		cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\		.set mips0;					\		.set reorder"					\		:						\		: "r" (base),					\		  "i" (op));extern inline void blast_dcache32(void){	unsigned long start = KSEG0;	unsigned long end = (start + dcache_size/2);	while(start < end) {		cache32_unroll32(start,Index_Writeback_Inv_D);		cache32_unroll32(start+1,Index_Writeback_Inv_D);		start += 0x400;	}}extern inline void blast_dcache32_page(unsigned long page){	unsigned long start = page;	unsigned long end = (start + PAGE_SIZE);	while(start < end) {		cache32_unroll32(start,Hit_Writeback_Inv_D);		start += 0x400;	}}extern inline void blast_dcache32_page_indexed(unsigned long page){	unsigned long start = page;	unsigned long end = (start + PAGE_SIZE);	while(start < end) {		cache32_unroll32(start,Index_Writeback_Inv_D);		cache32_unroll32(start+1,Index_Writeback_Inv_D);		start += 0x400;	}}extern inline void blast_icache32(void){	unsigned long start = KSEG0;	unsigned long end = (start + icache_size/2);	while(start < end) {		cache32_unroll32(start,Index_Invalidate_I);		cache32_unroll32(start+1,Index_Invalidate_I);		start += 0x400;	}}extern inline void blast_icache32_page(unsigned long page){	unsigned long start = page;	unsigned long end = (start + PAGE_SIZE);	while(start < end) {		cache32_unroll32(start,Hit_Invalidate_I);		start += 0x400;	}}extern inline void blast_icache32_page_indexed(unsigned long page){	unsigned long start = page;	unsigned long end = (start + PAGE_SIZE);	while(start < end) {		cache32_unroll32(start,Index_Invalidate_I);		cache32_unroll32(start+1,Index_Invalidate_I);		start += 0x400;	}}/* -------------------------------------------------------------------- */static void r5432_clear_page_d32(void * page){	__asm__ __volatile__(		".set\tnoreorder\n\t"		".set\tnoat\n\t"		".set\tmips3\n\t"		"daddiu\t$1,%0,%2\n"		"1:\tcache\t%3,(%0)\n\t"		"sd\t$0,(%0)\n\t"		"sd\t$0,8(%0)\n\t"		"sd\t$0,16(%0)\n\t"		"sd\t$0,24(%0)\n\t"		"daddiu\t%0,64\n\t"		"cache\t%3,-32(%0)\n\t"		"sd\t$0,-32(%0)\n\t"		"sd\t$0,-24(%0)\n\t"		"sd\t$0,-16(%0)\n\t"		"bne\t$1,%0,1b\n\t"		"sd\t$0,-8(%0)\n\t"		".set\tmips0\n\t"		".set\tat\n\t"		".set\treorder"		:"=r" (page)		:"0" (page),		 "I" (PAGE_SIZE),		 "i" (Create_Dirty_Excl_D)		:"$1","memory");}/* * This is still inefficient.  We only can do better if we know the * virtual address where the copy will be accessed. */static void r5432_copy_page_d32(void * to, void * from){	unsigned long dummy1, dummy2;	unsigned long reg1, reg2, reg3, reg4;	__asm__ __volatile__(		".set\tnoreorder\n\t"		".set\tnoat\n\t"		".set\tmips3\n\t"		"daddiu\t$1,%0,%8\n"		"1:\tcache\t%9,(%0)\n\t"		"lw\t%2,(%1)\n\t"		"lw\t%3,4(%1)\n\t"		"lw\t%4,8(%1)\n\t"		"lw\t%5,12(%1)\n\t"		"sw\t%2,(%0)\n\t"		"sw\t%3,4(%0)\n\t"		"sw\t%4,8(%0)\n\t"		"sw\t%5,12(%0)\n\t"		"lw\t%2,16(%1)\n\t"		"lw\t%3,20(%1)\n\t"		"lw\t%4,24(%1)\n\t"		"lw\t%5,28(%1)\n\t"		"sw\t%2,16(%0)\n\t"		"sw\t%3,20(%0)\n\t"		"sw\t%4,24(%0)\n\t"		"sw\t%5,28(%0)\n\t"		"cache\t%9,32(%0)\n\t"		"daddiu\t%0,64\n\t"		"daddiu\t%1,64\n\t"		"lw\t%2,-32(%1)\n\t"		"lw\t%3,-28(%1)\n\t"		"lw\t%4,-24(%1)\n\t"		"lw\t%5,-20(%1)\n\t"		"sw\t%2,-32(%0)\n\t"		"sw\t%3,-28(%0)\n\t"		"sw\t%4,-24(%0)\n\t"		"sw\t%5,-20(%0)\n\t"		"lw\t%2,-16(%1)\n\t"		"lw\t%3,-12(%1)\n\t"		"lw\t%4,-8(%1)\n\t"		"lw\t%5,-4(%1)\n\t"		"sw\t%2,-16(%0)\n\t"		"sw\t%3,-12(%0)\n\t"		"sw\t%4,-8(%0)\n\t"		"bne\t$1,%0,1b\n\t"		"sw\t%5,-4(%0)\n\t"		".set\tmips0\n\t"		".set\tat\n\t"		".set\treorder"		:"=r" (dummy1), "=r" (dummy2),		 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)		:"0" (to), "1" (from),		 "I" (PAGE_SIZE),		 "i" (Create_Dirty_Excl_D));}/* * If you think for one second that this stuff coming up is a lot * of bulky code eating too many kernel cache lines.  Think _again_. * * Consider: * 1) Taken branches have a 3 cycle penalty on R4k * 2) The branch itself is a real dead cycle on even R4600/R5000. * 3) Only one of the following variants of each type is even used by *    the kernel based upon the cache parameters we detect at boot time. * * QED. */static inline void r5432_flush_cache_all_d32i32(void){	blast_dcache32(); blast_icache32();}static void r5432_flush_cache_range_d32i32(struct mm_struct *mm,					 unsigned long start,					 unsigned long end){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif		blast_dcache32(); blast_icache32();	}}/* * On architectures like the Sparc, we could get rid of lines in * the cache created only by a certain context, but on the MIPS * (and actually certain Sparc's) we cannot. */static void r5432_flush_cache_mm_d32i32(struct mm_struct *mm){	if (mm->context != 0) {#ifdef DEBUG_CACHE		printk("cmm[%d]", (int)mm->context);#endif		r5432_flush_cache_all_d32i32();	}}static void r5432_flush_cache_page_d32i32(struct vm_area_struct *vma,					unsigned long page){	struct mm_struct *mm = vma->vm_mm;	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.	 */	if (mm->context == 0)		return;#ifdef DEBUG_CACHE	printk("cpage[%d,%08lx]", (int)mm->context, page);#endif	page &= PAGE_MASK;	pgdp = pgd_offset(mm, page);	pmdp = pmd_offset(pgdp, page);	ptep = pte_offset(pmdp, page);	/*	 * If the page isn't marked valid, the page cannot possibly be	 * in the cache.	 */	if (!(pte_val(*ptep) & _PAGE_PRESENT))		return;	/*	 * Doing flushes for another ASID than the current one is	 * too difficult since stupid R4k caches do a TLB translation	 * for every cache flush operation.  So we do indexed flushes	 * in that case, which doesn't overly flush the cache too much.	 */	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -