cacheops-vipt.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 276 行

C
276
字号
/* *  linux/arch/arm/mm/fault-armv.c * *  Copyright (C) 1995  Linus Torvalds *  Modifications for ARM processor (c) 1995-2002 Russell King *  Fixes for the VIPT caches by ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */#include <linux/init.h>#include <linux/module.h>#include <linux/mm.h>#include <linux/sched.h>#include <linux/spinlock.h>#include <linux/pagemap.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/shmparam.h>#include <asm/cacheflush.h>#include <asm/tlbflush.h>#include "cacheops-vipt.h"#if SHMLBA > 16384#error Unsupported#endif#define map_address		(0xffff4000)#define map_pgprot		PAGE_KERNELstatic pte_t *map_pte;static spinlock_t cacheops_lock = SPIN_LOCK_UNLOCKED;int vipt_cache_aliasing = 0;static inline int user_page_present(struct vm_area_struct *vma,				    unsigned long user_addr){	pgd_t	*pgd;	pmd_t	*pmd;	pte_t	*pte, entry;	pgd = pgd_offset(vma->vm_mm, user_addr);	if (pgd_none(*pgd) || pgd_bad(*pgd))		return 0;	pmd = pmd_offset(pgd, user_addr);	if (pmd_none(*pmd) || pmd_bad(*pmd))		return 0;	pte = pte_offset_map(pmd, user_addr);	entry = *pte;	if (pte_present(entry))		return 1;	return 0;}/* * Flush the given cache colour of a physical address. * (locking done in the calling function) */static inline void __flush_cache_page_colour(unsigned long paddr,					     unsigned int colour,					     unsigned long flags){	unsigned long addr;	set_pte(map_pte + colour, pfn_pte(paddr >> PAGE_SHIFT, map_pgprot));	addr = map_address + (colour << PAGE_SHIFT);	flush_tlb_kernel_page(addr);	__cpuc_flush_user_range(addr, addr + PAGE_SIZE, flags);}void flush_cache_page_colour(unsigned long paddr, unsigned int colour){	spin_lock(cacheops_lock);	__flush_cache_page_colour(paddr & PAGE_MASK, colour, 0);	spin_unlock(cacheops_lock);}static void flush_unmapped_cache_page(struct vm_area_struct *vma,				      unsigned long vaddr, unsigned long paddr){	vaddr &= PAGE_MASK;	if (current->active_mm == vma->vm_mm && user_page_present(vma, vaddr))		__cpuc_flush_user_range(vaddr, vaddr + PAGE_SIZE, vma->vm_flags);	else {		spin_lock(cacheops_lock);		__flush_cache_page_colour(paddr & PAGE_MASK, DCACHE_COLOUR(vaddr),					  vma->vm_flags);		spin_unlock(cacheops_lock);	}}void flush_cache_mm(struct mm_struct *mm){	if (vipt_cache_aliasing)		__cpuc_flush_user_all();}/* * Only flush the range if the corresponding pages are present. * For a VIPT cache, it is assumed that the TLB is coherent. */void flush_cache_range(struct vm_area_struct *vma, unsigned long start,		       unsigned long end){	unsigned long addr;	if (current->active_mm != vma->vm_mm)		return;	start &= PAGE_MASK;	end = PAGE_ALIGN(end);	for (addr = start; addr < end; addr += PAGE_SIZE)		if (user_page_present(vma, addr))			__cpuc_flush_user_range(addr, addr + PAGE_SIZE,						vma->vm_flags);}/* * Only flush the range if the corresponding pages are present. * For a VIPT cache, it is assumed that the TLB is coherent. */void flush_cache_user_range(struct vm_area_struct *vma, unsigned long start,		       unsigned long end){	unsigned long addr;	if (current->active_mm != vma->vm_mm)		return;	start &= PAGE_MASK;	end = PAGE_ALIGN(end);	for (addr = start; addr < end; addr += PAGE_SIZE)		if (user_page_present(vma, addr))			__cpuc_coherent_user_range(addr, PAGE_ALIGN(addr + PAGE_SIZE));}/* * Only flush the page if it is present. Don't worry about unmapped pages * since they are flushed in update_mmu_cache() when faulted in. * For a VIPT cache, it is assumed that the TLB is coherent. */void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr){	if (current->active_mm != vma->vm_mm)		return;	user_addr &= PAGE_MASK;	if (user_page_present(vma, user_addr))		__cpuc_flush_user_range(user_addr, user_addr + PAGE_SIZE,					vma->vm_flags);}static inline void __flush_dcache_page(struct page *page){	struct address_space *mapping = page_mapping(page);	struct vm_area_struct *mpnt = NULL;	struct prio_tree_iter iter;	unsigned long offset, paddr, vaddr;	pgoff_t pgoff;	__cpuc_flush_dcache_page(page_address(page));	if (!vipt_cache_aliasing)		return;	if (!mapping)		return;	/*	 * With a VIPT cache, we need to also write back	 * and invalidate any user data if there are cache aliases.	 * Flushing one mapping is enough since arch_get_unmapped_area	 * maps pages with the same colour	 */	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);	flush_dcache_mmap_lock(mapping);	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {		/*		 * If this VMA is not in our MM, we can ignore it.		 * Note that we intentionally mask out the VMA		 * that we are fixing up.		 */		if (!mpnt->vm_flags & VM_MAYSHARE)			continue;		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;		paddr = page_to_phys(page);		vaddr = mpnt->vm_start + offset;		if (CACHE_ALIASES(paddr, vaddr))			flush_unmapped_cache_page(mpnt, vaddr, paddr);		break;	}	flush_dcache_mmap_unlock(mapping);}void flush_dcache_page(struct page *page){	struct address_space *mapping = page_mapping(page);	if (mapping && !mapping_mapped(mapping))		set_bit(PG_dcache_dirty, &page->flags);	else		__flush_dcache_page(page);}/* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE.  Basically, there are two * things that we need to take care of: * *  1. If PG_dcache_dirty is set for the page, we need to ensure *     that any cache entries for the kernels virtual memory *     range are written back to the page. *  2. If we have multiple shared mappings of the same space in *     an object, we need to deal with the cache aliasing issues. * * Note that the page_table_lock will be held. */void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte){	unsigned long pfn = pte_pfn(pte);	struct page *page;	if (!pfn_valid(pfn))		return;	page = pfn_to_page(pfn);	if (page_mapping(page)) {		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);		if (dirty)			__cpuc_flush_dcache_page(page_address(page));	}}void __init check_writebuffer_bugs(void){	/* no checks for VIPT caches */}int __init cacheops_init(void){	pgd_t *pgd;	pmd_t *pmd;	unsigned int cache_type;	spin_lock(cacheops_lock);	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));	vipt_cache_aliasing = (cache_type | cache_type >> 12) & (1 << 11);	pgd = pgd_offset_k(map_address);	pmd = pmd_alloc(&init_mm, pgd, map_address);	if (!pmd)		BUG();	map_pte = pte_alloc_kernel(&init_mm, pmd, map_address);	if (!map_pte)		BUG();	spin_unlock(cacheops_lock);	return 0;}__initcall(cacheops_init);EXPORT_SYMBOL(flush_dcache_page);EXPORT_SYMBOL(flush_cache_page);EXPORT_SYMBOL(flush_cache_range);EXPORT_SYMBOL(flush_cache_user_range);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?