📄 r4xx0.c
字号:
"ld\t%3,8(%1)\n\t" "sd\t%2,(%0)\n\t" "sd\t%3,8(%0)\n\t" "ld\t%2,16(%1)\n\t" "ld\t%3,24(%1)\n\t" "sd\t%2,16(%0)\n\t" "sd\t%3,24(%0)\n\t" "daddiu\t%0,64\n\t" "daddiu\t%1,64\n\t" "ld\t%2,-32(%1)\n\t" "ld\t%3,-24(%1)\n\t" "sd\t%2,-32(%0)\n\t" "sd\t%3,-24(%0)\n\t" "ld\t%2,-16(%1)\n\t" "ld\t%3,-8(%1)\n\t" "sd\t%2,-16(%0)\n\t" "bne\t$1,%0,1b\n\t" " sd\t%3,-8(%0)\n\t" ".set\tat\n\t" ".set\treorder" :"=r" (dummy1), "=r" (dummy2), "=&r" (reg1), "=&r" (reg2) :"0" (to), "1" (from), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD));}static void r4k_copy_page_s128(void * to, void * from){ unsigned long dummy1, dummy2; unsigned long reg1, reg2, reg3, reg4; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" "daddiu\t$1,%0,%8\n" "1:\tcache\t%9,(%0)\n\t" "ld\t%2,(%1)\n\t" "ld\t%3,8(%1)\n\t" "ld\t%4,16(%1)\n\t" "ld\t%5,24(%1)\n\t" "sd\t%2,(%0)\n\t" "sd\t%3,8(%0)\n\t" "sd\t%4,16(%0)\n\t" "sd\t%5,24(%0)\n\t" "ld\t%2,32(%1)\n\t" "ld\t%3,40(%1)\n\t" "ld\t%4,48(%1)\n\t" "ld\t%5,56(%1)\n\t" "sd\t%2,32(%0)\n\t" "sd\t%3,40(%0)\n\t" "sd\t%4,48(%0)\n\t" "sd\t%5,56(%0)\n\t" "daddiu\t%0,128\n\t" "daddiu\t%1,128\n\t" "ld\t%2,-64(%1)\n\t" "ld\t%3,-56(%1)\n\t" "ld\t%4,-48(%1)\n\t" "ld\t%5,-40(%1)\n\t" "sd\t%2,-64(%0)\n\t" "sd\t%3,-56(%0)\n\t" "sd\t%4,-48(%0)\n\t" "sd\t%5,-40(%0)\n\t" "ld\t%2,-32(%1)\n\t" "ld\t%3,-24(%1)\n\t" "ld\t%4,-16(%1)\n\t" "ld\t%5,-8(%1)\n\t" "sd\t%2,-32(%0)\n\t" "sd\t%3,-24(%0)\n\t" "sd\t%4,-16(%0)\n\t" "bne\t$1,%0,1b\n\t" " sd\t%5,-8(%0)\n\t" ".set\tat\n\t" ".set\treorder" :"=r" (dummy1), "=r" (dummy2), "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) :"0" (to), "1" (from), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD));}/* * If you think for one second that this stuff coming up is a lot * of bulky code eating too many kernel cache lines. Think _again_. * * Consider: * 1) Taken branches have a 3 cycle penalty on R4k * 2) The branch itself is a real dead cycle on even R4600/R5000. * 3) Only one of the following variants of each type is even used by * the kernel based upon the cache parameters we detect at boot time. * * QED. */static inline void r4k_flush_cache_all_s16d16i16(void){ unsigned long flags; __save_and_cli(flags); blast_dcache16(); blast_icache16(); blast_scache16(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s32d16i16(void){ unsigned long flags; __save_and_cli(flags); blast_dcache16(); blast_icache16(); blast_scache32(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s64d16i16(void){ unsigned long flags; __save_and_cli(flags); blast_dcache16(); blast_icache16(); blast_scache64(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s128d16i16(void){ unsigned long flags; __save_and_cli(flags); blast_dcache16(); blast_icache16(); blast_scache128(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s32d32i32(void){ unsigned long flags; __save_and_cli(flags); blast_dcache32(); blast_icache32(); blast_scache32(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s64d32i32(void){ unsigned long flags; __save_and_cli(flags); blast_dcache32(); blast_icache32(); blast_scache64(); __restore_flags(flags);}static inline void r4k_flush_cache_all_s128d32i32(void){ unsigned long flags; __save_and_cli(flags); blast_dcache32(); blast_icache32(); blast_scache128(); __restore_flags(flags);}static inline void r4k_flush_cache_all_d16i16(void){ unsigned long flags; __save_and_cli(flags); blast_dcache16(); blast_icache16(); __restore_flags(flags);}static inline void r4k_flush_cache_all_d32i32(void){ unsigned long flags; __save_and_cli(flags); blast_dcache32(); blast_icache32(); __restore_flags(flags);}static void r4k_flush_cache_range_s16d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s16d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache16_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s32d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s32d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache32_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s64d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s64d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache64_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s128d16i16(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache128_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s32d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache32_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s64d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache64_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ struct vm_area_struct *vma; unsigned long flags; if (CPU_CONTEXT(smp_processor_id(), mm) != 0) return; start &= PAGE_MASK;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif vma = find_vma(mm, start); if(vma) { if (CPU_CONTEXT(smp_processor_id(), mm) != CPU_CONTEXT(smp_processor_id(), current->mm)) { r4k_flush_cache_all_s128d32i32(); } else { pgd_t *pgd; pmd_t *pmd; pte_t *pte; __save_and_cli(flags); while(start < end) { pgd = pgd_offset(mm, start); pmd = pmd_offset(pgd, start); pte = pte_offset(pmd, start); if(pte_val(*pte) & _PAGE_VALID) blast_scache128_page(start); start += PAGE_SIZE; } __restore_flags(flags); } }}static void r4k_flush_cache_range_d16i16(struct mm_struct *mm, unsigned long start, unsigned long end){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) { unsigned long flags;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif __save_and_cli(flags); blast_dcache16(); blast_icache16(); __restore_flags(flags); }}static void r4k_flush_cache_range_d32i32(struct mm_struct *mm, unsigned long start, unsigned long end){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) { unsigned long flags;#ifdef DEBUG_CACHE printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);#endif __save_and_cli(flags); blast_dcache32(); blast_icache32(); __restore_flags(flags); }}/* * On architectures like the Sparc, we could get rid of lines in * the cache created only by a certain context, but on the MIPS * (and actually certain Sparc's) we cannot. */static void r4k_flush_cache_mm_s16d16i16(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s16d16i16(); }}static void r4k_flush_cache_mm_s32d16i16(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s32d16i16(); }}static void r4k_flush_cache_mm_s64d16i16(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s64d16i16(); }}static void r4k_flush_cache_mm_s128d16i16(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s128d16i16(); }}static void r4k_flush_cache_mm_s32d32i32(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s32d32i32(); }}static void r4k_flush_cache_mm_s64d32i32(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s64d32i32(); }}static void r4k_flush_cache_mm_s128d32i32(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_s128d32i32(); }}static void r4k_flush_cache_mm_d16i16(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_d16i16(); }}static void r4k_flush_cache_mm_d32i32(struct mm_struct *mm){ if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {#ifdef DEBUG_CACHE printk("cmm[%d]", (int)mm->context);#endif r4k_flush_cache_all_d32i32(); }}static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; unsigned long flags; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (CPU_CONTEXT(smp_processor_id(), mm) == 0) return;#ifdef DEBUG_CACHE printk("cpage[%d,%08lx]", (int)mm->context, page);#endif __save_and_cli(flags); page &= PAGE_MASK; pgdp = pgd_offset(mm, page); pmdp = pmd_offset(pgdp, page); ptep = pte_offset(pmdp, page); /* * If the page isn't marked valid, the page cannot possibly be * in the cache. */ if(!(pte_val(*ptep) & _PAGE_VALID))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -