📄 cache-sh4.c
字号:
/* $Id: cache-sh4.c,v 1.1.1.1.2.8 2003/07/09 09:59:30 trent Exp $ * * linux/arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka */#include <linux/config.h>#include <linux/init.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/threads.h>#include <asm/addrspace.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/cache.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/pgalloc.h>#include <asm/mmu_context.h>#define CCR 0xff00001c /* Address of Cache Control Register */#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */#define CCR_CACHE_OCI 0x0008 /* OC Invalidate */#define CCR_CACHE_ORA 0x0020 /* OC RAM Mode */#define CCR_CACHE_OIX 0x0080 /* OC Index Enable */#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */#if defined(CONFIG_SH_CACHE_ASSOC)#define CCR_CACHE_EMODE 0x80000000/* CCR setup for associative mode: 16k+32k 2-way, P1 copy-back, enable */#define CCR_CACHE_VAL (CCR_CACHE_EMODE|CCR_CACHE_ENABLE|CCR_CACHE_CB)#else/* Default CCR setup: 8k+16k-byte cache, P1-copy-back, enable */#define CCR_CACHE_VAL (CCR_CACHE_ENABLE|CCR_CACHE_CB)#endif#define CCR_CACHE_INIT (CCR_CACHE_VAL|CCR_CACHE_OCI|CCR_CACHE_ICI)#define CCR_CACHE_ENABLE (CCR_CACHE_OCE|CCR_CACHE_ICE)#define CACHE_IC_ADDRESS_ARRAY 0xf0000000#define CACHE_OC_ADDRESS_ARRAY 0xf4000000#define CACHE_VALID 1#define CACHE_UPDATED 2#define CACHE_ASSOC 8#define CACHE_OC_WAY_SHIFT 14#define CACHE_IC_WAY_SHIFT 13#define CACHE_OC_ENTRY_SHIFT 5#define CACHE_IC_ENTRY_SHIFT 5#define CACHE_OC_ENTRY_MASK 0x3fe0#define CACHE_OC_ENTRY_PHYS_MASK 0x0fe0#define CACHE_IC_ENTRY_MASK 0x1fe0#define CACHE_IC_NUM_ENTRIES 256#define CACHE_OC_NUM_ENTRIES 512#define CACHE_NUM_WAYS 2static void __initdetect_cpu_and_cache_system(void){#ifdef CONFIG_CPU_SUBTYPE_ST40 cpu_data->type = CPU_ST40;#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) cpu_data->type = CPU_SH7750;#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) cpu_data->type = CPU_SH4202;#else#error Unknown SH4 CPU type#endif}void __init cache_init(void){ unsigned long ccr; detect_cpu_and_cache_system(); jump_to_P2(); ccr = ctrl_inl(CCR); if (ccr & CCR_CACHE_ENABLE) { /* * XXX: Should check RA here. * If RA was 1, we only need to flush the half of the caches. */ unsigned long addr, data;#if defined(CONFIG_SH_CACHE_ASSOC) unsigned long way; for (way = 0; way <= CACHE_NUM_WAYS; ++way) { unsigned long waybit = way << CACHE_OC_WAY_SHIFT; for (addr = CACHE_OC_ADDRESS_ARRAY + waybit; addr < (CACHE_OC_ADDRESS_ARRAY + waybit + (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT)); addr += (1 << CACHE_OC_ENTRY_SHIFT)) { data = ctrl_inl(addr); if ((data & (CACHE_UPDATED|CACHE_VALID)) == (CACHE_UPDATED|CACHE_VALID)) ctrl_outl(data & ~CACHE_UPDATED, addr); } }#else for (addr = CACHE_OC_ADDRESS_ARRAY; addr < (CACHE_OC_ADDRESS_ARRAY+ (CACHE_OC_NUM_ENTRIES << CACHE_OC_ENTRY_SHIFT)); addr += (1 << CACHE_OC_ENTRY_SHIFT)) { data = ctrl_inl(addr); if ((data & (CACHE_UPDATED|CACHE_VALID)) == (CACHE_UPDATED|CACHE_VALID)) ctrl_outl(data & ~CACHE_UPDATED, addr); }#endif } ctrl_outl(CCR_CACHE_INIT, CCR); back_to_P1();}/* * SH-4 has virtually indexed and physically tagged cache. */static struct semaphore p3map_sem[4];void __init p3_cache_init(void){ /* In ioremap.c */ extern int remap_area_pages(unsigned long address, unsigned long phys_addr, unsigned long size, unsigned long flags); if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE)) panic("%s failed.", __FUNCTION__); sema_init (&p3map_sem[0], 1); sema_init (&p3map_sem[1], 1); sema_init (&p3map_sem[2], 1); sema_init (&p3map_sem[3], 1);}/* * Write back the dirty D-caches, but not invalidate them. * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */void __flush_wback_region(void *start, int size){ unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { asm volatile("ocbwb %0" : /* no output */ : "m" (__m(v))); }}/* * Write back the dirty D-caches and invalidate them. * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */void __flush_purge_region(void *start, int size){ unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { asm volatile("ocbp %0" : /* no output */ : "m" (__m(v))); }}/* * No write back please */void __flush_invalidate_region(void *start, int size){ unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { asm volatile("ocbi %0" : /* no output */ : "m" (__m(v))); }}void __flush_icache_all(void){ unsigned long flags; save_and_cli(flags); jump_to_P2(); ctrl_outl(CCR_CACHE_VAL|CCR_CACHE_ICI, CCR); back_to_P1(); restore_flags(flags);}/* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */void flush_icache_range(unsigned long start, unsigned long end){ flush_cache_all();}/* * Write back the D-cache and purge the I-cache for signal trampoline. */void flush_cache_sigtramp(unsigned long addr){ unsigned long v, index; unsigned long flags; v = addr & ~(L1_CACHE_BYTES-1); asm volatile("ocbwb %0" : /* no output */ : "m" (__m(v))); index = CACHE_IC_ADDRESS_ARRAY| (v&CACHE_IC_ENTRY_MASK); save_and_cli(flags); jump_to_P2(); ctrl_outl(0, index); /* Clear out Valid-bit */#if defined(CONFIG_SH_CACHE_ASSOC) /* Must invalidate both ways for associative cache */ ctrl_outl(0, index | (1 << CACHE_IC_WAY_SHIFT));#endif back_to_P1(); restore_flags(flags);}static inline void flush_cache_4096(unsigned long start, unsigned long phys){ unsigned long flags; extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset);#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_ST40) || defined(CONFIG_CPU_SUBTYPE_SH4_202) if (start >= CACHE_OC_ADDRESS_ARRAY) { /* * SH7751 and ST40 have no restriction to handle cache. * (While SH7750 must do that at P2 area.) */ __flush_cache_4096(start | CACHE_ASSOC, phys | 0x80000000, 0); } else#endif { save_and_cli(flags); __flush_cache_4096(start | CACHE_ASSOC, phys | 0x80000000, 0x20000000); restore_flags(flags); }}/* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */void flush_dcache_page(struct page *page){ if (test_bit(PG_mapped, &page->flags)) { unsigned long phys = PHYSADDR(page_address(page)); /* Loop all the D-cache */ flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys); flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys); flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys); flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys); }}static inline void flush_icache_all(void){ unsigned long flags; save_and_cli(flags); jump_to_P2(); /* Flush I-cache */ ctrl_outl(CCR_CACHE_VAL|CCR_CACHE_ICI, CCR); back_to_P1(); restore_flags(flags);}void flush_cache_all(void){ extern void __flush_dcache_all(void); __flush_dcache_all(); flush_icache_all();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -