📄 cache-sh4.c
字号:
void flush_cache_mm(struct mm_struct *mm){ /* Is there any good way? */ /* XXX: possibly call flush_cache_range for each vm area */ /* * FIXME: Really, the optimal solution here would be able to flush out * individual lines created by the specified context, but this isn't * feasible for a number of architectures (such as MIPS, and some * SPARC) .. is this possible for SuperH? * * In the meantime, we'll just flush all of the caches.. this * seems to be the simplest way to avoid at least a few wasted * cache flushes. -Lethal */ flush_cache_all();}static void __flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long phys){ /* We only need to flush D-cache when we have alias */ if ((address^phys) & CACHE_ALIAS) { /* Loop 4K of the D-cache */ flush_cache_4096( CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS), phys); /* Loop another 4K of the D-cache */ flush_cache_4096( CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS), phys); } if (vma->vm_flags & VM_EXEC) /* Loop 4K (half) of the I-cache */ flush_cache_4096( CACHE_IC_ADDRESS_ARRAY | (address & 0x1000), phys);}/* * Write back and invalidate D-caches. * * START, END: Virtual Address (U0 address) * * NOTE: We need to flush the _physical_ page entry. * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */void flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end){ extern void flush_cache_4096_all(unsigned long start); unsigned long p = start & PAGE_MASK; pgd_t *dir; pmd_t *pmd; pte_t *pte; pte_t entry; unsigned long phys; unsigned long d = 0; dir = pgd_offset(mm, p); pmd = pmd_offset(dir, p); do { if (pmd_none(*pmd) || pmd_bad(*pmd)) { p &= ~((1 << PMD_SHIFT) -1); p += (1 << PMD_SHIFT); pmd++; continue; } pte = pte_offset(pmd, p); do { entry = *pte; if ((pte_val(entry) & _PAGE_PRESENT)) { phys = pte_val(entry)&PTE_PHYS_MASK; if ((p^phys) & CACHE_ALIAS) { d |= 1 << ((p & CACHE_ALIAS)>>12); d |= 1 << ((phys & CACHE_ALIAS)>>12); if (d == 0x0f) goto loop_exit; } } pte++; p += PAGE_SIZE; } while (p < end && (unsigned long)pte & PAGE_MASK); pmd++; } while (p < end); loop_exit: if (d & 1) flush_cache_4096_all(0); if (d & 2) flush_cache_4096_all(0x1000); if (d & 4) flush_cache_4096_all(0x2000); if (d & 8) flush_cache_4096_all(0x3000); flush_icache_all();}/* * Write back and invalidate I/D-caches for the page. * * ADDR: Virtual Address (U0 address) */void flush_cache_page(struct vm_area_struct *vma, unsigned long address){ pgd_t *dir; pmd_t *pmd; pte_t *pte; pte_t entry; unsigned long phys; dir = pgd_offset(vma->vm_mm, address); pmd = pmd_offset(dir, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) return; pte = pte_offset(pmd, address); entry = *pte; if (!(pte_val(entry) & _PAGE_PRESENT)) return; phys = pte_val(entry)&PTE_PHYS_MASK; __flush_cache_page(vma, address, phys);}/* * clear_user_page * @to: P1 address * @address: U0 address to be mapped */void clear_user_page(void *to, unsigned long address){ struct page *page = virt_to_page(to); __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *dir = pgd_offset_k(p3_addr); pmd_t *pmd = pmd_offset(dir, p3_addr); pte_t *pte = pte_offset(pmd, p3_addr); pte_t entry; unsigned long flags; entry = mk_pte_phys(phys_addr, pgprot); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); save_and_cli(flags); __flush_tlb_page(get_asid(), p3_addr); restore_flags(flags); update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); pte_clear(pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); }}/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped */void copy_user_page(void *to, void *from, unsigned long address){ struct page *page = virt_to_page(to); __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *dir = pgd_offset_k(p3_addr); pmd_t *pmd = pmd_offset(dir, p3_addr); pte_t *pte = pte_offset(pmd, p3_addr); pte_t entry; unsigned long flags; entry = mk_pte_phys(phys_addr, pgprot); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); save_and_cli(flags); __flush_tlb_page(get_asid(), p3_addr); restore_flags(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); }}/****************************************************************************/#if defined(CONFIG_SH_CACHE_ASSOC)/* * It is no possible to use the approach implement in clear_page.S when we * are in 2-way set associative mode as it would only clear half the cache, in * general. For the moment we simply implement it as a iteration through the * cache flushing both ways, this in itself is not optimial as the delay latency * for interupts is probably longer than necessary! * * benedict.gaster.superh.com */void __flush_dcache_all(void){ unsigned long flags; unsigned long addr; unsigned long way; save_and_cli(flags);#if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202) jump_to_P2();#endif /* Clear the U and V bits for each line and each way. On SH-4, this * causes write-back if both U and V are set before the address write. */ for (way = 0; way <= 1; ++way) { unsigned long waybit = way << CACHE_OC_WAY_SHIFT; /* Loop all the D-cache */ for (addr = CACHE_OC_ADDRESS_ARRAY + waybit; addr < (CACHE_OC_ADDRESS_ARRAY + waybit + (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT)); addr += (1 << CACHE_OC_ENTRY_SHIFT)) { ctrl_outl(0, addr); } }#if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202) back_to_P1();#endif restore_flags(flags);}void flush_cache_4096_all(unsigned long start){ unsigned long phys = PHYSADDR(start); /* Loop all the D-cache */ flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);}#endif/****************************************************************************/#if defined(CONFIG_SH_CACHE_ASSOC)/* * It is no possible to use the approach implement in clear_page.S when we * are in 2-way set associative mode as it would only clear half the cache, in * general. For the moment we simply implement it as a iteration through the * cache flushing both ways, this in itself is not optimial as the delay latency * for interupts is probably longer than necessary! * * benedict.gaster.superh.com */void __flush_dcache_all(void){ unsigned long flags; unsigned long addr; unsigned long way; save_and_cli(flags);#if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202) jump_to_P2();#endif /* Clear the U and V bits for each line and each way. On SH-4, this * causes write-back if both U and V are set before the address write. */ for (way = 0; way <= 1; ++way) { unsigned long waybit = way << CACHE_OC_WAY_SHIFT; /* Loop all the D-cache */ for (addr = CACHE_OC_ADDRESS_ARRAY + waybit; addr < (CACHE_OC_ADDRESS_ARRAY + waybit + (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT)); addr += (1 << CACHE_OC_ENTRY_SHIFT)) { ctrl_outl(0, addr); } }#if !defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH4_202) back_to_P1();#endif restore_flags(flags);}void flush_cache_4096_all(unsigned long start){ unsigned long phys = PHYSADDR(start); /* Loop all the D-cache */ flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -