📄 pgtable.h
字号:
{ pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte;}extern inline pte_t pte_mkclean(pte_t pte){ pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); return pte;}extern inline pte_t pte_mkold(pte_t pte){ pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); return pte;}extern inline pte_t pte_mkwrite(pte_t pte){ pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte;}extern inline pte_t pte_mkread(pte_t pte){ pte_val(pte) |= _PAGE_READ; if (pte_val(pte) & _PAGE_ACCESSED) pte_val(pte) |= _PAGE_SILENT_READ; return pte;}extern inline pte_t pte_mkdirty(pte_t pte){ pte_val(pte) |= _PAGE_MODIFIED; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte;}extern inline pte_t pte_mkyoung(pte_t pte){ pte_val(pte) |= _PAGE_ACCESSED; if (pte_val(pte) & _PAGE_READ) pte_val(pte) |= _PAGE_SILENT_READ; return pte;}/* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well. */#define pgprot_noncached pgprot_noncachedstatic inline pgprot_t pgprot_noncached(pgprot_t _prot){ unsigned long prot = pgprot_val(_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; return __pgprot(prot);}/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */#ifndef CONFIG_DISCONTIGMEM#define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)#else#define PAGE_TO_PA(page) \ ((((page)-(page)->zone->zone_mem_map) << PAGE_SHIFT) \ + ((page)->zone->zone_start_paddr))#endif#define mk_pte(page, pgprot) \({ \ pte_t __pte; \ \ pte_val(__pte) = ((unsigned long)(PAGE_TO_PA(page))) | \ pgprot_val(pgprot); \ \ __pte; \})extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot){ return __pte(physpage | pgprot_val(pgprot));}extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));}#define page_pte(page) page_pte_prot(page, __pgprot(0))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, 0)#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))/* to find an entry in a page-table-directory */extern inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address){ return mm->pgd + pgd_index(address);}/* Find an entry in the second-level page table.. */extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){ return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));}/* Find an entry in the third-level page table.. */ extern inline pte_t *pte_offset(pmd_t * dir, unsigned long address){ return (pte_t *) (pmd_page(*dir)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));}/* * Initialize a new pgd / pmd table with invalid pointers. */extern void pgd_init(unsigned long page);extern void pmd_init(unsigned long page, unsigned long pagetable);extern pgd_t swapper_pg_dir[1024];extern void paging_init(void);extern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);/* * Non-present pages: high 24 bits are offset, next 8 bits type, * low 32 bits zero. */extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset){ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }#define SWP_TYPE(x) (((x).val >> 32) & 0xff)#define SWP_OFFSET(x) ((x).val >> 40)#define SWP_ENTRY(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x) ((pte_t) { (x).val })/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page) (0)#ifndef CONFIG_DISCONTIGMEM#define kern_addr_valid(addr) (1)#endif/* TLB operations. */extern inline void tlb_probe(void){ __asm__ __volatile__( ".set noreorder\n\t" "tlbp\n\t" ".set reorder");}extern inline void tlb_read(void){ __asm__ __volatile__( ".set noreorder\n\t" "tlbr\n\t" ".set reorder");}extern inline void tlb_write_indexed(void){ __asm__ __volatile__( ".set noreorder\n\t" "tlbwi\n\t" ".set reorder");}extern inline void tlb_write_random(void){ __asm__ __volatile__( ".set noreorder\n\t" "tlbwr\n\t" ".set reorder");}/* Dealing with various CP0 mmu/cache related registers. *//* CP0_PAGEMASK register */extern inline unsigned long get_pagemask(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $5\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_pagemask(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $5\n\t" ".set reorder" : : "Jr" (val));}/* CP0_ENTRYLO0 and CP0_ENTRYLO1 registers */extern inline unsigned long get_entrylo0(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $2\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_entrylo0(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $2\n\t" ".set reorder" : : "Jr" (val));}extern inline unsigned long get_entrylo1(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $3\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_entrylo1(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $3\n\t" ".set reorder" : : "Jr" (val));}/* CP0_ENTRYHI register */extern inline unsigned long get_entryhi(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $10\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_entryhi(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $10\n\t" ".set reorder" : : "Jr" (val));}/* CP0_INDEX register */extern inline unsigned int get_index(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $0\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_index(unsigned int val){ __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $0\n\t" ".set reorder\n\t" : : "Jr" (val));}/* CP0_WIRED register */extern inline unsigned long get_wired(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $6\n\t" ".set reorder\n\t" : "=r" (val)); return val;}extern inline void set_wired(unsigned long val){ __asm__ __volatile__( "\n\t.set noreorder\n\t" "mtc0 %z0, $6\n\t" ".set reorder" : : "Jr" (val));}extern inline unsigned long get_info(void){ unsigned long val; __asm__( ".set push\n\t" ".set reorder\n\t" "mfc0 %0, $7\n\t" ".set pop" : "=r" (val)); return val;}/* CP0_TAGLO and CP0_TAGHI registers */extern inline unsigned long get_taglo(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $28\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_taglo(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $28\n\t" ".set reorder" : : "Jr" (val));}extern inline unsigned long get_taghi(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $29\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_taghi(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $29\n\t" ".set reorder" : : "Jr" (val));}/* CP0_CONTEXT register */extern inline unsigned long get_context(void){ unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $4\n\t" ".set reorder" : "=r" (val)); return val;}extern inline void set_context(unsigned long val){ __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $4\n\t" ".set reorder" : : "Jr" (val));}#include <asm-generic/pgtable.h>#endif /* !defined (_LANGUAGE_ASSEMBLY) *//* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)#endif /* _ASM_PGTABLE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -