📄 rm7k.c
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * r4xx0.c: R4000 processor variant specific MMU/Cache routines. * * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1997, 1998 Ralf Baechle ralf@gnu.org * * To do: * * - this code is a overbloated pig * - many of the bug workarounds are not efficient at all, but at * least they are functional ... */#include <linux/init.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/mm.h>#include <asm/io.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/bootinfo.h>#include <asm/mmu_context.h>/* CP0 hazard avoidance. */#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ "nop; nop; nop; nop; nop; nop;\n\t" \ ".set reorder\n\t")/* Primary cache parameters. */static int icache_size, dcache_size; /* Size in bytes */#define ic_lsize 32 /* Fixed to 32 byte on RM7000 */#define dc_lsize 32 /* Fixed to 32 byte on RM7000 */#define sc_lsize 32 /* Fixed to 32 byte on RM7000 */#define tc_pagesize (32*128)/* Secondary cache parameters. */#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */#include <asm/cacheops.h>#include <asm/r4kcache.h>int rm7k_tcache_enabled = 0;/* * Not added to asm/r4kcache.h because it seems to be RM7000-specific. */#define Page_Invalidate_T 0x16static inline void invalidate_tcache_page(unsigned long addr){ __asm__ __volatile__( ".set\tnoreorder\t\t\t# invalidate_tcache_page\n\t" ".set\tmips3\n\t" "cache\t%1, (%0)\n\t" ".set\tmips0\n\t" ".set\treorder" : : "r" (addr), "i" (Page_Invalidate_T));}/* * Zero an entire page. Note that while the RM7000 has a second level cache * it doesn't have a Create_Dirty_Excl_SD operation. */static void rm7k_clear_page(void * page){ __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" ".set\tmips3\n\t" "daddiu\t$1,%0,%2\n" "1:\tcache\t%3,(%0)\n\t" "sd\t$0,(%0)\n\t" "sd\t$0,8(%0)\n\t" "sd\t$0,16(%0)\n\t" "sd\t$0,24(%0)\n\t" "daddiu\t%0,64\n\t" "cache\t%3,-32(%0)\n\t" "sd\t$0,-32(%0)\n\t" "sd\t$0,-24(%0)\n\t" "sd\t$0,-16(%0)\n\t" "bne\t$1,%0,1b\n\t" "sd\t$0,-8(%0)\n\t" ".set\tmips0\n\t" ".set\tat\n\t" ".set\treorder" :"=r" (page) :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D) :"$1","memory");}/* * Copy an entire page. Note that while the RM7000 has a second level cache * it doesn't have a Create_Dirty_Excl_SD operation. */static void rm7k_copy_page(void * to, void * from){ unsigned long dummy1, dummy2; unsigned long reg1, reg2, reg3, reg4; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" ".set\tmips3\n\t" "daddiu\t$1,%0,%8\n" "1:\tcache\t%9,(%0)\n\t" "lw\t%2,(%1)\n\t" "lw\t%3,4(%1)\n\t" "lw\t%4,8(%1)\n\t" "lw\t%5,12(%1)\n\t" "sw\t%2,(%0)\n\t" "sw\t%3,4(%0)\n\t" "sw\t%4,8(%0)\n\t" "sw\t%5,12(%0)\n\t" "lw\t%2,16(%1)\n\t" "lw\t%3,20(%1)\n\t" "lw\t%4,24(%1)\n\t" "lw\t%5,28(%1)\n\t" "sw\t%2,16(%0)\n\t" "sw\t%3,20(%0)\n\t" "sw\t%4,24(%0)\n\t" "sw\t%5,28(%0)\n\t" "cache\t%9,32(%0)\n\t" "daddiu\t%0,64\n\t" "daddiu\t%1,64\n\t" "lw\t%2,-32(%1)\n\t" "lw\t%3,-28(%1)\n\t" "lw\t%4,-24(%1)\n\t" "lw\t%5,-20(%1)\n\t" "sw\t%2,-32(%0)\n\t" "sw\t%3,-28(%0)\n\t" "sw\t%4,-24(%0)\n\t" "sw\t%5,-20(%0)\n\t" "lw\t%2,-16(%1)\n\t" "lw\t%3,-12(%1)\n\t" "lw\t%4,-8(%1)\n\t" "lw\t%5,-4(%1)\n\t" "sw\t%2,-16(%0)\n\t" "sw\t%3,-12(%0)\n\t" "sw\t%4,-8(%0)\n\t" "bne\t$1,%0,1b\n\t" "sw\t%5,-4(%0)\n\t" ".set\tmips0\n\t" ".set\tat\n\t" ".set\treorder" :"=r" (dummy1), "=r" (dummy2), "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) :"0" (to), "1" (from), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D));}static void __flush_cache_all_d32i32(void){ blast_dcache32(); blast_icache32();}static inline void rm7k_flush_cache_all_d32i32(void){ /* Yes! Caches that don't suck ... */}static void rm7k_flush_cache_range_d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ /* RM7000 caches are sane ... */}static void rm7k_flush_cache_mm_d32i32(struct mm_struct *mm){ /* RM7000 caches are sane ... */}static void rm7k_flush_cache_page_d32i32(struct vm_area_struct *vma, unsigned long page){ /* RM7000 caches are sane ... */}static void rm7k_flush_page_to_ram_d32i32(struct page * page){ /* Yes! Caches that don't suck! */}static void rm7k_flush_icache_range(unsigned long start, unsigned long end){ /* * FIXME: This is overdoing things and harms performance. */ __flush_cache_all_d32i32();}static void rm7k_flush_icache_page(struct vm_area_struct *vma, struct page *page){ /* * FIXME: We should not flush the entire cache but establish some * temporary mapping and use hit_invalidate operation to flush out * the line from the cache. */ __flush_cache_all_d32i32();}/* * Writeback and invalidate the primary cache dcache before DMA. * (XXX These need to be fixed ...) */static voidrm7k_dma_cache_wback_inv(unsigned long addr, unsigned long size){ unsigned long end, a; a = addr & ~(sc_lsize - 1); end = (addr + size) & ~(sc_lsize - 1); while (1) { flush_dcache_line(a); /* Hit_Writeback_Inv_D */ flush_icache_line(a); /* Hit_Invalidate_I */ flush_scache_line(a); /* Hit_Writeback_Inv_SD */ if (a == end) break; a += sc_lsize; } if (!rm7k_tcache_enabled) return; a = addr & ~(tc_pagesize - 1); end = (addr + size) & ~(tc_pagesize - 1); while(1) { invalidate_tcache_page(a); /* Page_Invalidate_T */ if (a == end) break; a += tc_pagesize; }} static voidrm7k_dma_cache_inv(unsigned long addr, unsigned long size){ unsigned long end, a; a = addr & ~(sc_lsize - 1); end = (addr + size) & ~(sc_lsize - 1); while (1) { invalidate_dcache_line(a); /* Hit_Invalidate_D */ flush_icache_line(a); /* Hit_Invalidate_I */ invalidate_scache_line(a); /* Hit_Invalidate_SD */ if (a == end) break; a += sc_lsize; } if (!rm7k_tcache_enabled) return; a = addr & ~(tc_pagesize - 1); end = (addr + size) & ~(tc_pagesize - 1); while(1) { invalidate_tcache_page(a); /* Page_Invalidate_T */ if (a == end) break; a += tc_pagesize; }}static voidrm7k_dma_cache_wback(unsigned long addr, unsigned long size){ panic("rm7k_dma_cache_wback called - should not happen.\n");}/* * While we're protected against bad userland addresses we don't care * very much about what happens in that case. Usually a segmentation * fault will dump the process later on anyway ... */static void rm7k_flush_cache_sigtramp(unsigned long addr){ protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); protected_flush_icache_line(addr & ~(ic_lsize - 1));}/* * Undocumented RM7000: Bit 29 in the info register of the RM7000 v2.0 * indicates if the TLB has 48 or 64 entries. * * 29 1 => 64 entry JTLB * 0 => 48 entry JTLB */static inline int __attribute__((const)) ntlb_entries(void){ if (get_info() & (1 << 29)) return 64; return 48;}void flush_tlb_all(void){ unsigned long flags; unsigned long old_ctx; int entry; __save_and_cli(flags); /* Save old context and create impossible VPN2 value */ old_ctx = get_entryhi() & 0xff; set_entryhi(KSEG0); set_entrylo0(0); set_entrylo1(0); BARRIER; entry = get_wired(); /* Blast 'em all away. */ while (entry < ntlb_entries()) { set_index(entry); BARRIER; tlb_write_indexed(); BARRIER; entry++; } BARRIER; set_entryhi(old_ctx); __restore_flags(flags);}void flush_tlb_mm(struct mm_struct *mm){ if(mm->context != 0) { unsigned long flags; __save_and_cli(flags); get_new_mmu_context(mm, asid_cache); if (mm == current->mm) set_entryhi(mm->context & 0xff); __restore_flags(flags); }}void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){ if(mm->context != 0) { unsigned long flags; int size; __save_and_cli(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= (ntlb_entries() / 2)) { int oldpid = (get_entryhi() & 0xff); int newpid = (mm->context & 0xff); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; set_entryhi(start | newpid); start += (PAGE_SIZE << 1); BARRIER; tlb_probe(); BARRIER; idx = get_index(); set_entrylo0(0); set_entrylo1(0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -