pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 564 行 · 第 1/2 页
H
564 行
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))/* * Macro to a page protection value as "uncacheable". Note that "protection" is really a * misnomer here as the protection value contains the memory attribute bits, dirty bits, * and various other bits as well. */#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)/* * Macro to make mark a page protection value as "write-combining". * Note that "protection" is really a misnomer here as the protection * value contains the memory attribute bits, dirty bits, and various * other bits as well. Accesses through a write-combining translation * works bypasses the caches, but does allow for consecutive writes to * be combined into single (but larger) write transactions. */#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)static inline unsigned longpgd_index (unsigned long address){ unsigned long region = address >> 61; unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); return (region << (PAGE_SHIFT - 6)) | l1index;}/* The offset in the 1-level directory is given by the 3 region bits (61..63) and the seven level-1 bits (33-39). */static inline pgd_t*pgd_offset (struct mm_struct *mm, unsigned long address){ return mm->pgd + pgd_index(address);}/* In the kernel's mapped region we have a full 43 bit space available and completely ignore the region number (since we know its in region number 5). */#define pgd_offset_k(addr) \ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))/* Look up a pgd entry in the gate area. On IA-64, the gate-area resides in the kernel-mapped segment, hence we use pgd_offset_k() here. */#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)/* Find an entry in the second-level page table.. */#define pmd_offset(dir,addr) \ ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))/* * Find an entry in the third-level page table. This looks more complicated than it * should be because some platforms place page tables in high memory. */#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)/* atomic versions of the some PTE manipulations: */static inline intptep_test_and_clear_young (pte_t *ptep){#ifdef CONFIG_SMP if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_A_BIT, ptep);#else pte_t pte = *ptep; if (!pte_young(pte)) return 0; set_pte(ptep, pte_mkold(pte)); return 1;#endif}static inline intptep_test_and_clear_dirty (pte_t *ptep){#ifdef CONFIG_SMP if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_D_BIT, ptep);#else pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; set_pte(ptep, pte_mkclean(pte)); return 1;#endif}static inline pte_tptep_get_and_clear (pte_t *ptep){#ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0));#else pte_t pte = *ptep; pte_clear(ptep); return pte;#endif}static inline voidptep_set_wrprotect (pte_t *ptep){#ifdef CONFIG_SMP unsigned long new, old; do { old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old);#else pte_t old_pte = *ptep; set_pte(ptep, pte_wrprotect(old_pte));#endif}static inline voidptep_mkdirty (pte_t *ptep){#ifdef CONFIG_SMP set_bit(_PAGE_D_BIT, ptep);#else pte_t old_pte = *ptep; set_pte(ptep, pte_mkdirty(old_pte));#endif}static inline intpte_same (pte_t a, pte_t b){ return pte_val(a) == pte_val(b);}extern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init (void);/* * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of * bits in the swap-type field of the swap pte. It would be nice to * enforce that, but we can't easily include <linux/swap.h> here. * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...). * * Format of swap pte: * bit 0 : present bit (must be zero) * bit 1 : _PAGE_FILE (must be zero) * bits 2- 8: swap-type * bits 9-62: swap offset * bit 63 : _PAGE_PROTNONE bit * * Format of file pte: * bit 0 : present bit (must be zero) * bit 1 : _PAGE_FILE (must be one) * bits 2-62: file_offset/PAGE_SIZE * bit 63 : _PAGE_PROTNONE bit */#define __swp_type(entry) (((entry).val >> 2) & 0x7f)#define __swp_offset(entry) (((entry).val << 1) >> 10)#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define __swp_entry_to_pte(x) ((pte_t) { (x).val })#define PTE_FILE_MAX_BITS 61#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })#define io_remap_page_range remap_page_range /* XXX is this right? *//* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];extern struct page *zero_page_memmap_ptr;#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)/* We provide our own get_unmapped_area to cope with VA holes for userland */#define HAVE_ARCH_UNMAPPED_AREA#ifdef CONFIG_HUGETLB_PAGE#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))struct mmu_gather;extern void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct * prev, unsigned long start, unsigned long end);#endif/* * IA-64 doesn't have any external MMU info: the page tables contain all the necessary * information. However, we use this routine to take care of any (delayed) i-cache * flushing that may be necessary. */extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte);#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS/* * Update PTEP with ENTRY, which is guaranteed to be a less * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and * WRITABLE bits turned on, when the value at PTEP did not. The * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. * * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without * having to worry about races. On SMP machines, there are only two * cases where this is true: * * (1) *PTEP has the PRESENT bit turned OFF * (2) ENTRY has the DIRTY bit turned ON * * On ia64, we could implement this routine with a cmpxchg()-loop * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. * However, like on x86, we can get a more streamlined version by * observing that it is OK to drop ACCESSED bit updates when * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is * result in an extra Access-bit fault, which would then turn on the * ACCESSED bit in the low-level fault handler (iaccess_bit or * daccess_bit in ivt.S). */#ifdef CONFIG_SMP# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \do { \ if (__safely_writable) { \ set_pte(__ptep, __entry); \ flush_tlb_page(__vma, __addr); \ } \} while (0)#else# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ ptep_establish(__vma, __addr, __ptep, __entry)#endif# ifdef CONFIG_VIRTUAL_MEM_MAP /* arch mem_map init routine is needed due to holes in a virtual mem_map */# define __HAVE_ARCH_MEMMAP_INIT extern void memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn);# endif /* CONFIG_VIRTUAL_MEM_MAP */# endif /* !__ASSEMBLY__ *//* * Identity-mapped regions use a large page size. We'll call such large pages * "granules". If you can think of a better name that's unambiguous, let me * know... */#if defined(CONFIG_IA64_GRANULE_64MB)# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M#elif defined(CONFIG_IA64_GRANULE_16MB)# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M#endif#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)/* * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): */#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)/* These tell get_user_pages() that the first gate page is accessible from user-level. */#define FIXADDR_USER_START GATE_ADDR#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PTEP_GET_AND_CLEAR#define __HAVE_ARCH_PTEP_SET_WRPROTECT#define __HAVE_ARCH_PTEP_MKDIRTY#define __HAVE_ARCH_PTE_SAME#define __HAVE_ARCH_PGD_OFFSET_GATE#include <asm-generic/pgtable.h>#endif /* _ASM_IA64_PGTABLE_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?