📄 pgtable.h
字号:
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))#define pgd_none(pgd) (!pgd_val(pgd))#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))#define pgd_present(pgd) (pgd_val(pgd) != 0UL)#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))/* * The following have defined behavior only work if pte_present() is true. */#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)/* * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the * access rights: */#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))/* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well. */#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)/* * Macro to make mark a page protection value as "write-combining". * Note that "protection" is really a misnomer here as the protection * value contains the memory attribute bits, dirty bits, and various * other bits as well. Accesses through a write-combining translation * works bypasses the caches, but does allow for consecutive writes to * be combined into single (but larger) write transactions. */#ifdef CONFIG_MCKINLEY_A0_SPECIFIC# define pgprot_writecombine(prot) prot#else# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)#endif/* * Return the region index for virtual address ADDRESS. */static inline unsigned longrgn_index (unsigned long address){ ia64_va a; a.l = address; return a.f.reg;}/* * Return the region offset for virtual address ADDRESS. */static inline unsigned longrgn_offset (unsigned long address){ ia64_va a; a.l = address; return a.f.off;}static inline unsigned longpgd_index (unsigned long address){ unsigned long region = address >> 61; unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); return (region << (PAGE_SHIFT - 6)) | l1index;}/* The offset in the 1-level directory is given by the 3 region bits (61..63) and the seven level-1 bits (33-39). */static inline pgd_t*pgd_offset (struct mm_struct *mm, unsigned long address){ return mm->pgd + pgd_index(address);}/* In the kernel's mapped region we have a full 43 bit space available and completely ignore the region number (since we know its in region number 5). */#define pgd_offset_k(addr) \ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))/* Find an entry in the second-level page table.. */#define pmd_offset(dir,addr) \ ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))/* Find an entry in the third-level page table.. */#define pte_offset(dir,addr) \ ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))/* atomic versions of the some PTE manipulations: */static inline intptep_test_and_clear_young (pte_t *ptep){#ifdef CONFIG_SMP return test_and_clear_bit(_PAGE_A_BIT, ptep);#else pte_t pte = *ptep; if (!pte_young(pte)) return 0; set_pte(ptep, pte_mkold(pte)); return 1;#endif}static inline intptep_test_and_clear_dirty (pte_t *ptep){#ifdef CONFIG_SMP return test_and_clear_bit(_PAGE_D_BIT, ptep);#else pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; set_pte(ptep, pte_mkclean(pte)); return 1;#endif}static inline pte_tptep_get_and_clear (pte_t *ptep){#ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0));#else pte_t pte = *ptep; pte_clear(ptep); return pte;#endif}static inline voidptep_set_wrprotect (pte_t *ptep){#ifdef CONFIG_SMP unsigned long new, old; do { old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old);#else pte_t old_pte = *ptep; set_pte(ptep, pte_wrprotect(old_pte));#endif}static inline voidptep_mkdirty (pte_t *ptep){#ifdef CONFIG_SMP set_bit(_PAGE_D_BIT, ptep);#else pte_t old_pte = *ptep; set_pte(ptep, pte_mkdirty(old_pte));#endif}static inline intpte_same (pte_t a, pte_t b){ return pte_val(a) == pte_val(b);}/* * Macros to check the type of access that triggered a page fault. */static inline intis_write_access (int access_type){ return (access_type & 0x2);}static inline intis_exec_access (int access_type){ return (access_type & 0x4);}extern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init (void);#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x) ((pte_t) { (x).val })/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page) (0)#define io_remap_page_range remap_page_range /* XXX is this right? *//* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))/* We provide our own get_unmapped_area to cope with VA holes for userland */#define HAVE_ARCH_UNMAPPED_AREA# endif /* !__ASSEMBLY__ *//* * Identity-mapped regions use a large page size. We'll call such large pages * "granules". If you can think of a better name that's unambiguous, let me * know... */#if defined(CONFIG_IA64_GRANULE_64MB)# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M#elif defined(CONFIG_IA64_GRANULE_16MB)# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M#endif#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)/* * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): */#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)#define KERNEL_TR_PAGE_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)#endif /* _ASM_IA64_PGTABLE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -