📄 pgtable_32.h
字号:
#define pte_update(mm, addr, ptep) do { } while (0)#define pte_update_defer(mm, addr, ptep) do { } while (0)#endif/* local pte updates need not use xchg for locking */static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep){ pte_t res = *ptep; /* Pure native function needs no input for mm, addr */ native_pte_clear(NULL, 0, ptep); return res;}/* * We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \({ \ int __changed = !pte_same(*(ptep), entry); \ if (__changed && dirty) { \ (ptep)->pte_low = (entry).pte_low; \ pte_update_defer((vma)->vm_mm, (address), (ptep)); \ flush_tlb_page(vma, address); \ } \ __changed; \})#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ int __ret = 0; \ if (pte_young(*(ptep))) \ __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ &(ptep)->pte_low); \ if (__ret) \ pte_update((vma)->vm_mm, addr, ptep); \ __ret; \})#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH#define ptep_clear_flush_young(vma, address, ptep) \({ \ int __young; \ __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ if (__young) \ flush_tlb_page(vma, address); \ __young; \})#define __HAVE_ARCH_PTEP_GET_AND_CLEARstatic inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep){ pte_t pte = native_ptep_get_and_clear(ptep); pte_update(mm, addr, ptep); return pte;}#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULLstatic inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full){ pte_t pte; if (full) { /* * Full address destruction in progress; paravirt does not * care about updates and native needs no locking */ pte = native_local_ptep_get_and_clear(ptep); } else { pte = ptep_get_and_clear(mm, addr, ptep); } return pte;}#define __HAVE_ARCH_PTEP_SET_WRPROTECTstatic inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep){ clear_bit(_PAGE_BIT_RW, &ptep->pte_low); pte_update(mm, addr, ptep);}/* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * * dst - pointer to pgd range anwhere on a pgd page * src - "" * count - the number of pgds to copy. * * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count){ memcpy(dst, src, count * sizeof(pgd_t));}/* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. */#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte.pte_low &= _PAGE_CHG_MASK; pte.pte_low |= pgprot_val(newprot);#ifdef CONFIG_X86_PAE /* * Chop off the NX bit (if present), and add the NX portion of * the newprot (if present): */ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); pte.pte_high |= (pgprot_val(newprot) >> 32) & \ (__supported_pte_mask >> 32);#endif return pte;}#define pmd_large(pmd) \((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))/* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * * this macro returns the index of the entry in the pgd page which would * control the given virtual address */#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))#define pgd_index_k(addr) pgd_index(addr)/* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))/* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * * this macro returns the index of the entry in the pmd page which would * control the given virtual address */#define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))/* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * * this macro returns the index of the entry in the pte page which would * control the given virtual address */#define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))#define pmd_page_vaddr(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))/* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */extern pte_t *lookup_address(unsigned long address);/* * Make a given kernel text page executable/non-executable. * Returns the previous executability setting of that page (which * is used to restore the previous state). Used by the SMP bootup code. * NOTE: this is an __init function for security reasons. */#ifdef CONFIG_X86_PAE extern int set_kernel_exec(unsigned long vaddr, int enable);#else static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}#endif#if defined(CONFIG_HIGHPTE)#define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))#define pte_offset_map_nested(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)#else#define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)#endif/* Clear a kernel PTE and flush it from the TLB */#define kpte_clear_flush(ptep, vaddr) \do { \ pte_clear(&init_mm, vaddr, ptep); \ __flush_tlb_one(vaddr); \} while (0)/* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */#define update_mmu_cache(vma,address,pte) do { } while (0)void native_pagetable_setup_start(pgd_t *base);void native_pagetable_setup_done(pgd_t *base);#ifndef CONFIG_PARAVIRTstatic inline void paravirt_pagetable_setup_start(pgd_t *base){ native_pagetable_setup_start(base);}static inline void paravirt_pagetable_setup_done(pgd_t *base){ native_pagetable_setup_done(base);}#endif /* !CONFIG_PARAVIRT */#endif /* !__ASSEMBLY__ */#ifdef CONFIG_FLATMEM#define kern_addr_valid(addr) (1)#endif /* CONFIG_FLATMEM */#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot)#include <asm-generic/pgtable.h>#endif /* _I386_PGTABLE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -