📄 cache.c
字号:
/* The next line needs some explanation. The virtual tags encode bits [31:13] of the virtual address, bit [12] of the 'tag' being implied by the cache set index. */ epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift); if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) { /* TODO : could optimise this call by batching multiple adjacent sets together. */ sh64_dcache_purge_sets(set, 1); break; /* Don't waste time inspecting other ways for this set */ } } } }#endif } else { /* 'Small' range */ unsigned long aligned_start; unsigned long eaddr; unsigned long last_page_start; aligned_start = start & PAGE_MASK; /* 'end' is 1 byte beyond the end of the range */ last_page_start = (end - 1) & PAGE_MASK; eaddr = aligned_start; while (eaddr <= last_page_start) { sh64_dcache_purge_user_page(mm, eaddr); eaddr += PAGE_SIZE; } } return;}static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end){ unsigned long long aligned_start; unsigned long long ull_end; unsigned long long addr; ull_end = end; /* Just wback over the range using the natural addresses. TLB miss handling will be OK (TBC) : the range has just been written to by the signal frame setup code, so the PTEs must exist. Note, if we have CONFIG_PREEMPT and get preempted inside this loop, it doesn't matter, even if the pid->ASID mapping changes whilst we're away. In that case the cache will have been flushed when the mapping was renewed. So the writebacks below will be nugatory (and we'll doubtless have to fault the TLB entry/ies in again with the new ASID), but it's a rare case. */ aligned_start = start & L1_CACHE_ALIGN_MASK; addr = aligned_start; while (addr < ull_end) { asm __volatile__ ("ocbwb %0, 0" : : "r" (addr)); addr += L1_CACHE_BYTES; }}#endif /* !CONFIG_DCACHE_DISABLED *//****************************************************************************//* These *MUST* lie in an area of virtual address space that's otherwise unused. */#define UNIQUE_EADDR_START 0xe0000000UL#define UNIQUE_EADDR_END 0xe8000000ULstatic unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr){ /* Given a physical address paddr, and a user virtual address user_eaddr which will eventually be mapped to it, create a one-off kernel-private eaddr mapped to the same paddr. This is used for creating special destination pages for copy_user_page and clear_user_page */ static unsigned long current_pointer = UNIQUE_EADDR_START; unsigned long coloured_pointer; if (current_pointer == UNIQUE_EADDR_END) { sh64_dcache_purge_all(); current_pointer = UNIQUE_EADDR_START; } coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK); sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); return coloured_pointer;}/****************************************************************************/static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address){ void *coloured_to; /* Discard any existing cache entries of the wrong colour. These are present quite often, if the kernel has recently used the page internally, then given it up, then it's been allocated to the user. */ sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); sh64_page_copy(from, coloured_to); sh64_teardown_dtlb_cache_slot();}static void sh64_clear_user_page_coloured(void *to, unsigned long address){ void *coloured_to; /* Discard any existing kernel-originated lines of the wrong colour (as above) */ sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); sh64_page_clear(coloured_to); sh64_teardown_dtlb_cache_slot();}/****************************************************************************//*########################################################################## EXTERNALLY CALLABLE API. ##########################################################################*//* These functions are described in Documentation/cachetlb.txt. Each one of these functions varies in behaviour depending on whether the I-cache and/or D-cache are configured out. Note that the Linux term 'flush' corresponds to what is termed 'purge' in the sh/sh64 jargon for the D-cache, i.e. write back dirty data then invalidate the cache lines, and 'invalidate' for the I-cache. */#undef FLUSH_TRACEvoid flush_cache_all(void){ /* Invalidate the entire contents of both caches, after writing back to memory any dirty data from the D-cache. */ sh64_dcache_purge_all(); sh64_icache_inv_all();}/****************************************************************************/void flush_cache_mm(struct mm_struct *mm){ /* Invalidate an entire user-address space from both caches, after writing back dirty data (e.g. for shared mmap etc). */ /* This could be coded selectively by inspecting all the tags then doing 4*alloco on any set containing a match (as for flush_cache_range), but fork/exit/execve (where this is called from) are expensive anyway. */ /* Have to do a purge here, despite the comments re I-cache below. There could be odd-coloured dirty data associated with the mm still in the cache - if this gets written out through natural eviction after the kernel has reused the page there will be chaos. */ sh64_dcache_purge_all(); /* The mm being torn down won't ever be active again, so any Icache lines tagged with its ASID won't be visible for the rest of the lifetime of this ASID cycle. Before the ASID gets reused, there will be a flush_cache_all. Hence we don't need to touch the I-cache. This is similar to the lack of action needed in flush_tlb_mm - see fault.c. */}/****************************************************************************/void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){ struct mm_struct *mm = vma->vm_mm; /* Invalidate (from both caches) the range [start,end) of virtual addresses from the user address space specified by mm, after writing back any dirty data. Note(1), 'end' is 1 byte beyond the end of the range to flush. Note(2), this is called with mm->page_table_lock held.*/ sh64_dcache_purge_user_range(mm, start, end); sh64_icache_inv_user_page_range(mm, start, end);}/****************************************************************************/void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr){ /* Invalidate any entries in either cache for the vma within the user address space vma->vm_mm for the page starting at virtual address 'eaddr'. This seems to be used primarily in breaking COW. Note, the I-cache must be searched too in case the page in question is both writable and being executed from (e.g. stack trampolines.) Note(1), this is called with mm->page_table_lock held. */ sh64_dcache_purge_virt_page(vma->vm_mm, eaddr); if (vma->vm_flags & VM_EXEC) { sh64_icache_inv_user_page(vma, eaddr); }}/****************************************************************************/#ifndef CONFIG_DCACHE_DISABLEDvoid copy_user_page(void *to, void *from, unsigned long address, struct page *page){ /* 'from' and 'to' are kernel virtual addresses (within the superpage mapping of the physical RAM). 'address' is the user virtual address where the copy 'to' will be mapped after. This allows a custom mapping to be used to ensure that the new copy is placed in the right cache sets for the user to see it without having to bounce it out via memory. Note however : the call to flush_page_to_ram in (generic)/mm/memory.c:(break_cow) undoes all this good work in that one very important case! TBD : can we guarantee that on every call, any cache entries for 'from' are in the same colour sets as 'address' also? i.e. is this always used just to deal with COW? (I suspect not). */ /* There are two possibilities here for when the page 'from' was last accessed: * by the kernel : this is OK, no purge required. * by the/a user (e.g. for break_COW) : need to purge. If the potential user mapping at 'address' is the same colour as 'from' there is no need to purge any cache lines from the 'from' page mapped into cache sets of colour 'address'. (The copy will be accessing the page through 'from'). */ if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) { sh64_dcache_purge_coloured_phy_page(__pa(from), address); } if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { /* No synonym problem on destination */ sh64_page_copy(from, to); } else { sh64_copy_user_page_coloured(to, from, address); } /* Note, don't need to flush 'from' page from the cache again - it's done anyway by the generic code */}void clear_user_page(void *to, unsigned long address, struct page *page){ /* 'to' is a kernel virtual address (within the superpage mapping of the physical RAM). 'address' is the user virtual address where the 'to' page will be mapped after. This allows a custom mapping to be used to ensure that the new copy is placed in the right cache sets for the user to see it without having to bounce it out via memory. */ if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { /* No synonym problem on destination */ sh64_page_clear(to); } else { sh64_clear_user_page_coloured(to, address); }}#endif /* !CONFIG_DCACHE_DISABLED *//****************************************************************************/void flush_dcache_page(struct page *page){ sh64_dcache_purge_phy_page(page_to_phys(page)); wmb();}/****************************************************************************/void flush_icache_range(unsigned long start, unsigned long end){ /* Flush the range [start,end] of kernel virtual adddress space from the I-cache. The corresponding range must be purged from the D-cache also because the SH-5 doesn't have cache snooping between the caches. The addresses will be visible through the superpage mapping, therefore it's guaranteed that there no cache entries for the range in cache sets of the wrong colour. Primarily used for cohering the I-cache after a module has been loaded. */ /* We also make sure to purge the same range from the D-cache since flush_page_to_ram() won't be doing this for us! */ sh64_dcache_purge_kernel_range(start, end); wmb(); sh64_icache_inv_kernel_range(start, end);}/****************************************************************************/void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len){ /* Flush the range of user (defined by vma->vm_mm) address space starting at 'addr' for 'len' bytes from the cache. The range does not straddle a page boundary, the unique physical page containing the range is 'page'. This seems to be used mainly for invalidating an address range following a poke into the program text through the ptrace() call from another process (e.g. for BRK instruction insertion). */ sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); mb(); if (vma->vm_flags & VM_EXEC) { sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); }}/*########################################################################## ARCH/SH64 PRIVATE CALLABLE API. ##########################################################################*/void flush_cache_sigtramp(unsigned long start, unsigned long end){ /* For the address range [start,end), write back the data from the D-cache and invalidate the corresponding region of the I-cache for the current process. Used to flush signal trampolines on the stack to make them executable. */ sh64_dcache_wback_current_user_range(start, end); wmb(); sh64_icache_inv_current_user_range(start, end);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -