📄 c-r4k.c
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * r4xx0.c: R4000 processor variant specific MMU/Cache routines. * * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org * * To do: * * - this code is a overbloated pig * - many of the bug workarounds are not efficient at all, but at * least they are functional ... */#include <linux/config.h>#include <linux/init.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/mm.h>#include <asm/bootinfo.h>#include <asm/cpu.h>#include <asm/bcache.h>#include <asm/io.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/system.h>#include <asm/mmu_context.h>#include <asm/war.h>/* Primary cache parameters. */static int icache_size, dcache_size; /* Size in bytes */static int ic_lsize, dc_lsize; /* LineSize in bytes *//* Secondary cache (if present) parameters. */static unsigned int scache_size, sc_lsize; /* Again, in bytes */#include <asm/cacheops.h>#include <asm/r4kcache.h>#undef DEBUG_CACHE/* * Dummy cache handling routines for machines without boardcaches */static void no_sc_noop(void) {}static struct bcache_ops no_sc_ops = { (void *)no_sc_noop, (void *)no_sc_noop, (void *)no_sc_noop, (void *)no_sc_noop};struct bcache_ops *bcops = &no_sc_ops;/* * On processors with QED R4600 style two set assosicative cache * this is the bit which selects the way in the cache for the * indexed cachops. */#define icache_waybit (icache_size >> 1)#define dcache_waybit (dcache_size >> 1)/* * If you think for one second that this stuff coming up is a lot * of bulky code eating too many kernel cache lines. Think _again_. * * Consider: * 1) Taken branches have a 3 cycle penalty on R4k * 2) The branch itself is a real dead cycle on even R4600/R5000. * 3) Only one of the following variants of each type is even used by * the kernel based upon the cache parameters we detect at boot time. * * QED. */static inline void r4k_flush_cache_all_s16d16i16(void){ blast_dcache16(); blast_icache16(); blast_scache16();}static inline void r4k_flush_cache_all_s32d16i16(void){ blast_dcache16(); blast_icache16(); blast_scache32();}static inline void r4k_flush_cache_all_s64d16i16(void){ blast_dcache16(); blast_icache16(); blast_scache64();}static inline void r4k_flush_cache_all_s128d16i16(void){ blast_dcache16(); blast_icache16(); blast_scache128();}static inline void r4k_flush_cache_all_s32d32i32(void){ blast_dcache32(); blast_icache32(); blast_scache32();}static inline void r4k_flush_cache_all_s64d32i32(void){ blast_dcache32(); blast_icache32(); blast_scache64();}static inline void r4k_flush_cache_all_s128d32i32(void){ blast_dcache32(); blast_icache32(); blast_scache128();}static inline void r4k_flush_cache_all_d16i16(void){ blast_dcache16(); blast_icache16();}static inline void r4k_flush_cache_all_d32i32(void){ blast_dcache32(); blast_icache32();}static voidr4k_flush_cache_range_s16d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s16d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while (start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache16_page(start); start += PAGE_SIZE; } } }}static voidr4k_flush_cache_range_s32d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s32d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache32_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_s64d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s64d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache64_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s128d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache128_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s32d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache32_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s64d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache64_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; if (mm->context == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if (vma) { if (mm->context != current->active_mm->context) { r4k_flush_cache_all_s128d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache128_page(start); start += PAGE_SIZE; } } }}static void r4k_flush_cache_range_d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif blast_dcache16(); blast_icache16(); }}static void r4k_flush_cache_range_d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif blast_dcache32(); blast_icache32(); }}/* * On architectures like the Sparc, we could get rid of lines in * the cache created only by a certain context, but on the MIPS * (and actually certain Sparc's) we cannot. */static void r4k_flush_cache_mm_s16d16i16(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s16d16i16(); }}static void r4k_flush_cache_mm_s32d16i16(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s32d16i16(); }}static void r4k_flush_cache_mm_s64d16i16(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s64d16i16(); }}static void r4k_flush_cache_mm_s128d16i16(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s128d16i16(); }}static void r4k_flush_cache_mm_s32d32i32(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s32d32i32(); }}static void r4k_flush_cache_mm_s64d32i32(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s64d32i32(); }}static void r4k_flush_cache_mm_s128d32i32(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s128d32i32(); }}static void r4k_flush_cache_mm_d16i16(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_d16i16(); }}static void r4k_flush_cache_mm_d32i32(struct mm_struct *mm){ if (mm->context != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_d32i32(); }}static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (mm->context == 0) return;#ifdef DEBUG_CACHE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -