pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 523 行 · 第 1/2 页
H
523 行
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)#ifdef CONFIG_64BIT/* The first entry of the permanent pmd is not there if it contains * the gateway marker */#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)#else#define pmd_none(x) (!pmd_val(x))#endif#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)static inline void pmd_clear(pmd_t *pmd) {#ifdef CONFIG_64BIT if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) /* This is the entry pointing to the permanent pmd * attached to the pgd; cannot clear it */ __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); else#endif __pmd_val_set(*pmd, 0);}#if PT_NLEVELS == 3#define pgd_page(pgd) ((unsigned long) __va(pgd_address(pgd)))/* For 64 bit we have three level tables */#define pgd_none(x) (!pgd_val(x))#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)static inline void pgd_clear(pgd_t *pgd) {#ifdef CONFIG_64BIT if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) /* This is the permanent pmd attached to the pgd; cannot * free it */ return;#endif __pgd_val_set(*pgd, 0);}#else/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */extern inline int pgd_none(pgd_t pgd) { return 0; }extern inline int pgd_bad(pgd_t pgd) { return 0; }extern inline int pgd_present(pgd_t pgd) { return 1; }extern inline void pgd_clear(pgd_t * pgdp) { }#endif/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }extern inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_READ; return pte; }extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_READ; return pte; }extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */#define __mk_pte(addr,pgprot) \({ \ pte_t __pte; \ \ pte_val(__pte) = ((addr)+pgprot_val(pgprot)); \ \ __pte; \})#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot){ pte_t pte; pte_val(pte) = (pfn << PAGE_SHIFT) | pgprot_val(pgprot); return pte;}/* This takes a physical page address that is used by the remapping functions */#define mk_pte_phys(physpage, pgprot) \({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }/* Permanent address of a page. On parisc we don't have highmem. */#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_address(pmd)))#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))#define pgd_index(address) ((address) >> PGDIR_SHIFT)/* to find an entry in a page-table-directory */#define pgd_offset(mm, address) \((mm)->pgd + ((address) >> PGDIR_SHIFT))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */#if PT_NLEVELS == 3#define pmd_offset(dir,address) \((pmd_t *) pgd_page(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))#else#define pmd_offset(dir,addr) ((pmd_t *) dir)#endif/* Find an entry in the third-level page table.. */ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))#define pte_offset_kernel(pmd, address) \ ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address))#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)extern void paging_init (void);/* Used for deferring calls to flush_dcache_page() */#define PG_dcache_dirty PG_arch_1struct vm_area_struct; /* forward declaration (include/linux/mm.h) */extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);/* Encode and de-code a swap entry */#define __swp_type(x) ((x).val & 0x1f)#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ (((x).val >> 8) & ~0x7) )#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ ((offset & 0x7) << 6) | \ ((offset & ~0x7) << 8) })#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define __swp_entry_to_pte(x) ((pte_t) { (x).val })static inline int ptep_test_and_clear_young(pte_t *ptep){#ifdef CONFIG_SMP if (!pte_young(*ptep)) return 0; return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));#else pte_t pte = *ptep; if (!pte_young(pte)) return 0; set_pte(ptep, pte_mkold(pte)); return 1;#endif}static inline int ptep_test_and_clear_dirty(pte_t *ptep){#ifdef CONFIG_SMP if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));#else pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; set_pte(ptep, pte_mkclean(pte)); return 1;#endif}extern spinlock_t pa_dbit_lock;static inline pte_t ptep_get_and_clear(pte_t *ptep){ pte_t old_pte; pte_t pte; spin_lock(&pa_dbit_lock); pte = old_pte = *ptep; pte_val(pte) &= ~_PAGE_PRESENT; pte_val(pte) |= _PAGE_FLUSH; set_pte(ptep,pte); spin_unlock(&pa_dbit_lock); return old_pte;}static inline void ptep_set_wrprotect(pte_t *ptep){#ifdef CONFIG_SMP unsigned long new, old; do { old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old);#else pte_t old_pte = *ptep; set_pte(ptep, pte_wrprotect(old_pte));#endif}static inline void ptep_mkdirty(pte_t *ptep){#ifdef CONFIG_SMP set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));#else pte_t old_pte = *ptep; set_pte(ptep, pte_mkdirty(old_pte));#endif}#define pte_same(A,B) (pte_val(A) == pte_val(B))#endif /* !__ASSEMBLY__ */#define io_remap_page_range remap_page_range/* We provide our own get_unmapped_area to provide cache coherency */#define HAVE_ARCH_UNMAPPED_AREA#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PTEP_GET_AND_CLEAR#define __HAVE_ARCH_PTEP_SET_WRPROTECT#define __HAVE_ARCH_PTEP_MKDIRTY#define __HAVE_ARCH_PTE_SAME#include <asm-generic/pgtable.h>#endif /* _PARISC_PGTABLE_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?