pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 738 行 · 第 1/2 页
H
738 行
* so to write-protect kernel memory we must turn on user access */#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)#else#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)#endif#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)/* We want the debuggers to be able to set breakpoints anywhere, so * don't write protect the kernel text */#define _PAGE_RAM_TEXT _PAGE_RAM#else#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)#endif#define PAGE_NONE __pgprot(_PAGE_BASE)#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_KERNEL __pgprot(_PAGE_RAM)#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)/* * The PowerPC can only do execute protection on a segment (256MB) basis, * not on a page basis. So we consider execute permission the same as read. * Also, write permissions imply read permissions. * This is the closest we can get.. */#define __P000 PAGE_NONE#define __P001 PAGE_READONLY_X#define __P010 PAGE_COPY#define __P011 PAGE_COPY_X#define __P100 PAGE_READONLY#define __P101 PAGE_READONLY_X#define __P110 PAGE_COPY#define __P111 PAGE_COPY_X#define __S000 PAGE_NONE#define __S001 PAGE_READONLY_X#define __S010 PAGE_SHARED#define __S011 PAGE_SHARED_X#define __S100 PAGE_READONLY#define __S101 PAGE_READONLY_X#define __S110 PAGE_SHARED#define __S111 PAGE_SHARED_X#ifndef __ASSEMBLY__/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a * kernel without large page PMD support */extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);/* * Conversions between PTE values and page frame numbers. */#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)#define pte_page(x) pfn_to_page(pte_pfn(x))#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[1024];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))#endif /* __ASSEMBLY__ */#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0)#define pmd_none(pmd) (!pmd_val(pmd))#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)#ifndef __ASSEMBLY__/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */static inline int pgd_none(pgd_t pgd) { return 0; }static inline int pgd_bad(pgd_t pgd) { return 0; }static inline int pgd_present(pgd_t pgd) { return 1; }#define pgd_clear(xp) do { } while (0)#define pgd_page(pgd) \ ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }static inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_EXEC; return pte; }static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte;}/* * Atomic PTE updates. * * pte_update clears and sets bit atomically, and returns * the old pte value. * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. */static inline unsigned long pte_update(pte_t *p, unsigned long clr, unsigned long set){ unsigned long old, tmp; __asm__ __volatile__("\1: lwarx %0,0,%3\n\ andc %1,%0,%4\n\ or %1,%1,%5\n" PPC405_ERR77(0,%3)" stwcx. %1,0,%3\n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) : "cc" ); return old;}/* * set_pte stores a linux PTE into the linux page table. * On machines which use an MMU hash table we avoid changing the * _PAGE_HASHPTE bit. */static inline void set_pte(pte_t *ptep, pte_t pte){#if _PAGE_HASHPTE != 0 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);#else *ptep = pte;#endif}extern void flush_hash_one_pte(pte_t *ptep);/* * 2.6 calles this without flushing the TLB entry, this is wrong * for our hash-based implementation, we fix that up here */static inline int ptep_test_and_clear_young(pte_t *ptep){ unsigned long old; old = (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED);#if _PAGE_HASHPTE != 0 if (old & _PAGE_HASHPTE) flush_hash_one_pte(ptep);#endif return old != 0;}static inline int ptep_test_and_clear_dirty(pte_t *ptep){ return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;}static inline pte_t ptep_get_and_clear(pte_t *ptep){ return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));}static inline void ptep_set_wrprotect(pte_t *ptep){ pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);}static inline void ptep_mkdirty(pte_t *ptep){ pte_update(ptep, 0, _PAGE_DIRTY);}#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGSstatic inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty){ unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); pte_update(ptep, 0, bits);}#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ __ptep_set_access_flags(__ptep, __entry, __dirty); \ flush_tlb_page_nohash(__vma, __address); \ } while(0)/* * Macro to mark a page protection value as "uncacheable". */#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)/* * Note that on Book E processors, the pmd contains the kernel virtual * (lowmem) address of the pte page. The physical address is less useful * because everything runs with translation enabled (even the TLB miss * handler). On everything else the pmd contains the physical address * of the pte page. -- paulus */#ifndef CONFIG_BOOKE#define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))#define pmd_page(pmd) \ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))#else#define pmd_page_kernel(pmd) \ ((unsigned long) (pmd_val(pmd) & PAGE_MASK))#define pmd_page(pmd) \ (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT))#endif/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* to find an entry in a page-table-directory */#define pgd_index(address) ((address) >> PGDIR_SHIFT)#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))/* Find an entry in the second-level page table.. */static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){ return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */#define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))#define pte_offset_map(dir, addr) \ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))#define pte_offset_map_nested(dir, addr) \ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)extern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init(void);/* * When flushing the tlb entry for a page, we also need to flush the hash * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. */extern int flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, int count);/* Add an HPTE to the hash table */extern void add_hash_page(unsigned context, unsigned long va, unsigned long pmdval);/* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the *_PAGE_HASHPTE bit (if used). -- paulus */#define __swp_type(entry) ((entry).val & 0x1f)#define __swp_offset(entry) ((entry).val >> 5)#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })/* Encode and decode a nonlinear file mapping entry */#define PTE_FILE_MAX_BITS 29#define pte_to_pgoff(pte) (pte_val(pte) >> 3)#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })/* CONFIG_APUS *//* For virtual address to physical address conversion */extern void cache_clear(__u32 addr, int length);extern void cache_push(__u32 addr, int length);extern int mm_end_of_chunk (unsigned long addr, int len);extern unsigned long iopa(unsigned long addr);extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;/* Values for nocacheflag and cmode *//* These are not used by the APUS kernel_map, but prevents compilation errors. */#define KERNELMAP_FULL_CACHING 0#define KERNELMAP_NOCACHE_SER 1#define KERNELMAP_NOCACHE_NONSER 2#define KERNELMAP_NO_COPYBACK 3/* * Map some physical address range into the kernel address space. */extern unsigned long kernel_map(unsigned long paddr, unsigned long size, int nocacheflag, unsigned long *memavailp );/* * Set cache mode of (kernel space) address range. */extern void kernel_set_cachemode (unsigned long address, unsigned long size, unsigned int cmode);/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define kern_addr_valid(addr) (1)#define io_remap_page_range remap_page_range/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);#endif /* !__ASSEMBLY__ */#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PTEP_GET_AND_CLEAR#define __HAVE_ARCH_PTEP_SET_WRPROTECT#define __HAVE_ARCH_PTEP_MKDIRTY#define __HAVE_ARCH_PTE_SAME#include <asm-generic/pgtable.h>#endif /* _PPC_PGTABLE_H */#endif /* __KERNEL__ */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?