📄 pgtable.h
字号:
#define __S111 PAGE_SHARED/* zero page used for uninitialized stuff */extern unsigned long empty_zero_page;#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))/* number of bits that fit into a memory pointer */#define BITS_PER_PTR (8*sizeof(unsigned long))/* to align the pointer to a pointer address */#define PTR_MASK (~(sizeof(void*)-1))/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 *//* 64-bit machines, beware! SRB. */#define SIZEOF_PTR_LOG2 2/* to find an entry in a page-table */#define PAGE_PTR(address) \((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)/* to set the page-dir */#define SET_PAGE_DIR(tsk,pgdir)#define pte_none(x) (!pte_val(x))#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)#define pmd_none(x) (!pmd_val(x))/* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries. */#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE)#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */static inline int pgd_none(pgd_t pgd) { return 0; }static inline int pgd_bad(pgd_t pgd) { return 0; }static inline int pgd_present(pgd_t pgd) { return 1; }static inline void pgd_clear(pgd_t * pgdp) { }/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_READ; }static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }static inline pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); return pte;}static inline pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte;}static inline pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte;}static inline pte_t pte_mkclean(pte_t pte){ pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); return pte; }static inline pte_t pte_mkold(pte_t pte){ pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); return pte;}static inline pte_t pte_mkwrite(pte_t pte){ pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte;}static inline pte_t pte_mkread(pte_t pte){ pte_val(pte) |= _PAGE_READ; if (pte_val(pte) & _PAGE_ACCESSED) pte_val(pte) |= _PAGE_SILENT_READ; return pte;}static inline pte_t pte_mkexec(pte_t pte){ pte_val(pte) |= _PAGE_READ; if (pte_val(pte) & _PAGE_ACCESSED) pte_val(pte) |= _PAGE_SILENT_READ; return pte;}static inline pte_t pte_mkdirty(pte_t pte){ pte_val(pte) |= _PAGE_MODIFIED; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte;}static inline pte_t pte_mkyoung(pte_t pte){ pte_val(pte) |= _PAGE_ACCESSED; if (pte_val(pte) & _PAGE_READ) { pte_val(pte) |= _PAGE_SILENT_READ; if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) == (_PAGE_WRITE | _PAGE_MODIFIED)) pte_val(pte) |= _PAGE_SILENT_WRITE; } return pte;}/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. *//* What actually goes as arguments to the various functions is less than * obvious, but a rule of thumb is that struct page's goes as struct page *, * really physical DRAM addresses are unsigned long's, and DRAM "virtual" * addresses (the 0xc0xxxxxx's) goes as void *'s. */static inline pte_t __mk_pte(void * page, pgprot_t pgprot){ pte_t pte; /* the PTE needs a physical address */ pte_val(pte) = __pa(page) | pgprot_val(pgprot); return pte;}#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))#define mk_pte_phys(physpage, pgprot) \({ \ pte_t __pte; \ \ pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ __pte; \})static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval * __pte_page(pte_val) refers to the "virtual" DRAM interval * pte_pagenr refers to the page-number counted starting from the virtual DRAM start */static inline unsigned long __pte_page(pte_t pte){ /* the PTE contains a physical address */ return (unsigned long)__va(pte_val(pte) & PAGE_MASK);}#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)/* permanent address of a page */#define page_address(page) ((page)->virtual)#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))#define pte_page(pte) (mem_map+pte_pagenr(pte))/* only the pte's themselves need to point to physical DRAM (see above) * the pagetable links are purely handled within the kernel SW and thus * don't need the __pa and __va transformations. */static inline unsigned long pmd_page(pmd_t pmd){ return pmd_val(pmd) & PAGE_MASK; }static inline void pmd_set(pmd_t * pmdp, pte_t * ptep){ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; }/* to find an entry in a page-table-directory. */#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))/* to find an entry in a page-table-directory */static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address){ return mm->pgd + pgd_index(address);}/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){ return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */ static inline pte_t * pte_offset(pmd_t * dir, unsigned long address){ return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));}#define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))#define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%08lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S *//* * CRIS doesn't have any external MMU info: the kernel page * tables contain all the necessary information. * * Actually I am not sure on what this could be used for. */static inline void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){}/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) *//* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */#define SWP_TYPE(x) (((x).val >> 5) & 0x7f)#define SWP_OFFSET(x) ((x).val >> 12)#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x) ((pte_t) { (x).val })/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page) (0)#define kern_addr_valid(addr) (1)#include <asm-generic/pgtable.h>/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)#endif /* _CRIS_PGTABLE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -