pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 561 行 · 第 1/2 页
H
561 行
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }/* Atomic PTE updates */static inline unsigned long pte_update(pte_t *p, unsigned long clr){ unsigned long old, tmp; __asm__ __volatile__( "1: ldarx %0,0,%3 # pte_update\n\ andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) : "cc" ); return old;}/* PTE updating functions, this function puts the PTE in the * batch, doesn't actually triggers the hash flush immediately, * you need to call flush_tlb_pending() to do that. */extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot);static inline int ptep_test_and_clear_young(pte_t *ptep){ unsigned long old; if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; old = pte_update(ptep, _PAGE_ACCESSED); if (old & _PAGE_HASHPTE) { hpte_update(ptep, old, 0); flush_tlb_pending(); } return (old & _PAGE_ACCESSED) != 0;}/* * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the * moment we always flush but we need to fix hpte_update and test if the * optimisation is worth it. */static inline int ptep_test_and_clear_dirty(pte_t *ptep){ unsigned long old; if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) return 0; old = pte_update(ptep, _PAGE_DIRTY); if (old & _PAGE_HASHPTE) hpte_update(ptep, old, 0); return (old & _PAGE_DIRTY) != 0;}static inline void ptep_set_wrprotect(pte_t *ptep){ unsigned long old; if ((pte_val(*ptep) & _PAGE_RW) == 0) return; old = pte_update(ptep, _PAGE_RW); if (old & _PAGE_HASHPTE) hpte_update(ptep, old, 0);}/* * We currently remove entries from the hashtable regardless of whether * the entry was young or dirty. The generic routines only flush if the * entry was young or dirty which is not good enough. * * We should be more intelligent about this but for the moment we override * these functions and force a tlb flush unconditionally */#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH#define ptep_clear_flush_young(__vma, __address, __ptep) \({ \ int __young = ptep_test_and_clear_young(__ptep); \ __young; \})#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH#define ptep_clear_flush_dirty(__vma, __address, __ptep) \({ \ int __dirty = ptep_test_and_clear_dirty(__ptep); \ flush_tlb_page(__vma, __address); \ __dirty; \})static inline pte_t ptep_get_and_clear(pte_t *ptep){ unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) hpte_update(ptep, old, 0); return __pte(old);}static inline void pte_clear(pte_t * ptep){ unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) hpte_update(ptep, old, 0);}/* * set_pte stores a linux PTE into the linux page table. */static inline void set_pte(pte_t *ptep, pte_t pte){ if (pte_present(*ptep)) { pte_clear(ptep); flush_tlb_pending(); } *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS;}/* Set the dirty and/or accessed bits atomically in a linux PTE, this * function doesn't need to flush the hash entry */#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGSstatic inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty){ unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); unsigned long old, tmp; __asm__ __volatile__( "1: ldarx %0,0,%4\n\ andi. %1,%0,%6\n\ bne- 1b \n\ or %0,%3,%0\n\ stdcx. %0,0,%4\n\ bne- 1b" :"=&r" (old), "=&r" (tmp), "=m" (*ptep) :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) :"cc");}#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ __ptep_set_access_flags(__ptep, __entry, __dirty); \ flush_tlb_page_nohash(__vma, __address); \ } while(0)/* * Macro to mark a page protection value as "uncacheable". */#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)extern unsigned long ioremap_bot, ioremap_base;#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)#define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))#define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))extern pgd_t swapper_pg_dir[1024];extern pgd_t ioremap_dir[1024];extern void paging_init(void);/* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. * We use it to put a corresponding HPTE into the hash table * ahead of time, instead of waiting for the inevitable extra * hash-table miss exception. */struct vm_area_struct;extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);/* Encode and de-code a swap entry */#define __swp_type(entry) (((entry).val >> 1) & 0x3f)#define __swp_offset(entry) ((entry).val >> 8)#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT })#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT })#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE})#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)/* * kern_addr_valid is intended to indicate whether an address is a valid * kernel address. Most 32-bit archs define it as always true (like this) * but most 64-bit archs actually perform a test. What should we do here? * The only use is in fs/ncpfs/dir.c */#define kern_addr_valid(addr) (1)#define io_remap_page_range remap_page_range void pgtable_cache_init(void);extern void hpte_init_native(void);extern void hpte_init_lpar(void);extern void hpte_init_iSeries(void);/* imalloc region types */#define IM_REGION_UNUSED 0x1#define IM_REGION_SUBSET 0x2#define IM_REGION_EXISTS 0x4#define IM_REGION_OVERLAP 0x8#define IM_REGION_SUPERSET 0x10extern struct vm_struct * im_get_free_area(unsigned long size);extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, int region_type);unsigned long im_free(void *addr);extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, int secondary, unsigned long hpteflags, int bolted, int large);extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, int secondary, unsigned long hpteflags, int bolted, int large);/* * find_linux_pte returns the address of a linux pte for a given * effective address and directory. If not found, it returns zero. */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea){ pgd_t *pg; pmd_t *pm; pte_t *pt = NULL; pte_t pte; pg = pgdir + pgd_index(ea); if (!pgd_none(*pg)) { pm = pmd_offset(pg, ea); if (pmd_present(*pm)) { pt = pte_offset_kernel(pm, ea); pte = *pt; if (!pte_present(pte)) pt = NULL; } } return pt;}#endif /* __ASSEMBLY__ */#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PTEP_GET_AND_CLEAR#define __HAVE_ARCH_PTEP_SET_WRPROTECT#define __HAVE_ARCH_PTEP_MKDIRTY#define __HAVE_ARCH_PTE_SAME#include <asm-generic/pgtable.h>#endif /* _PPC64_PGTABLE_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?