pgtable.h
来自「linux 内核源代码」· C头文件 代码 · 共 593 行 · 第 1/2 页
H
593 行
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD)#define PAGE_KERNEL_NOCACHE \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ _PAGE_ACCESSED | _PAGE_HW_SHARED | \ _PAGE_FLAGS_HARD)#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ _PAGE_DIRTY | _PAGE_ACCESSED | \ _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)#define PAGE_KERNEL_PCC(slot, type) \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ (type))#else /* no mmu */#define PAGE_NONE __pgprot(0)#define PAGE_SHARED __pgprot(0)#define PAGE_COPY __pgprot(0)#define PAGE_EXECREAD __pgprot(0)#define PAGE_RWX __pgprot(0)#define PAGE_READONLY __pgprot(0)#define PAGE_WRITEONLY __pgprot(0)#define PAGE_KERNEL __pgprot(0)#define PAGE_KERNEL_NOCACHE __pgprot(0)#define PAGE_KERNEL_RO __pgprot(0)#define PAGE_KERNEL_PCC(slot, type) \ __pgprot(0)#endif#endif /* __ASSEMBLY__ *//* * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page * protection for execute, and considers it the same as a read. Also, write * permission implies read permission. This is the closest we can get.. * * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, * not only supporting separate execute, read, and write bits, but having * completely separate permission bits for user and kernel space. */ /*xwr*/#define __P000 PAGE_NONE#define __P001 PAGE_READONLY#define __P010 PAGE_COPY#define __P011 PAGE_COPY#define __P100 PAGE_EXECREAD#define __P101 PAGE_EXECREAD#define __P110 PAGE_COPY#define __P111 PAGE_COPY#define __S000 PAGE_NONE#define __S001 PAGE_READONLY#define __S010 PAGE_WRITEONLY#define __S011 PAGE_SHARED#define __S100 PAGE_EXECREAD#define __S101 PAGE_EXECREAD#define __S110 PAGE_RWX#define __S111 PAGE_RWX#ifndef __ASSEMBLY__/* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */#ifdef CONFIG_X2TLBstatic inline void set_pte(pte_t *ptep, pte_t pte){ ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low;}#else#define set_pte(pteptr, pteval) (*(pteptr) = pteval)#endif#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)/* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))#define pfn_pte(pfn, prot) \ __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))#define pfn_pmd(pfn, prot) \ __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))#define pte_none(x) (!pte_val(x))#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)#define pmd_none(x) (!pmd_val(x))#define pmd_present(x) (pmd_val(x))#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))#define pte_page(x) pfn_to_page(pte_pfn(x))/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)#ifdef CONFIG_X2TLB#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)#else#define pte_write(pte) ((pte).pte_low & _PAGE_RW)#endif#define PTE_BIT_FUNC(h,fn,op) \static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }#ifdef CONFIG_X2TLB/* * We cheat a bit in the SH-X2 TLB case. As the permission bits are * individually toggled (and user permissions are entirely decoupled from * kernel permissions), we attempt to couple them a bit more sanely here. */PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);#elsePTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);#endifPTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);/* * Macro and implementation to make a page protection as uncachable. */#define pgprot_writecombine(prot) \ __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)#define pgprot_noncached pgprot_writecombine/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. * * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) */#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte.pte_low &= _PAGE_CHG_MASK; pte.pte_low |= pgprot_val(newprot);#ifdef CONFIG_X2TLB pte.pte_high |= pgprot_val(newprot) >> 32;#endif return pte;}#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))/* to find an entry in a page-table-directory. */#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the third-level page table.. */#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)#ifdef CONFIG_X2TLB#define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ &(e), (e).pte_high, (e).pte_low)#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))#else#define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))#endifstruct vm_area_struct;extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte);/* * Encode and de-code a swap entry * * Constraints: * _PAGE_FILE at bit 0 * _PAGE_PRESENT at bit 8 * _PAGE_PROTNONE at bit 9 * * For the normal case, we encode the swap type into bits 0:7 and the * swap offset into bits 10:30. For the 64-bit PTE case, we keep the * preserved bits in the low 32-bits and use the upper 32 as the swap * offset (along with a 5-bit type), following the same approach as x86 * PAE. This keeps the logic quite simple, and allows for a full 32 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with * in the pte_low case. * * As is evident by the Alpha code, if we ever get a 64-bit unsigned * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes * much cleaner.. * * NOTE: We should set ZEROs at the position of _PAGE_PRESENT * and _PAGE_PROTNONE bits */#ifdef CONFIG_X2TLB#define __swp_type(x) ((x).val & 0x1f)#define __swp_offset(x) ((x).val >> 5)#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })/* * Encode and decode a nonlinear file mapping entry */#define pte_to_pgoff(pte) ((pte).pte_high)#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })#define PTE_FILE_MAX_BITS 32#else#define __swp_type(x) ((x).val & 0xff)#define __swp_offset(x) ((x).val >> 10)#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })/* * Encode and decode a nonlinear file mapping entry */#define PTE_FILE_MAX_BITS 29#define pte_to_pgoff(pte) (pte_val(pte) >> 1)#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })#endiftypedef pte_t *pte_addr_t;#define kern_addr_valid(addr) (1)#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot)struct mm_struct;/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)#ifndef CONFIG_MMUextern unsigned int kobjsize(const void *objp);#endif /* !CONFIG_MMU */#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ defined(CONFIG_SH7705_CACHE_32KB))#define __HAVE_ARCH_PTEP_GET_AND_CLEARextern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);#endifextern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init(void);#include <asm-generic/pgtable.h>#endif /* !__ASSEMBLY__ */#endif /* __ASM_SH_PAGE_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?