📄 pgtable.h
字号:
#define pte_exec(pte) (1)extern __inline__ int pte_write(pte_t pte){ return pte_val(pte) & PTE_AP_WRITE;}extern __inline__ int pte_cacheable(pte_t pte){ return pte_val(pte) & PTE_CACHEABLE;}extern __inline__ int pte_dirty(pte_t pte){ return pte_val(pte) & PTE_BUFFERABLE;}extern __inline__ int pte_young(pte_t pte){ return pte_val(pte) & PTE_AP_READ;}extern __inline__ pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~PTE_AP_WRITE; return pte;}extern __inline__ pte_t pte_nocache(pte_t pte){ pte_val(pte) &= ~PTE_CACHEABLE; return pte;}extern __inline__ pte_t pte_mkclean(pte_t pte){ pte_val(pte) &= ~PTE_BUFFERABLE; return pte;}extern __inline__ pte_t pte_mkold(pte_t pte){ pte_val(pte) &= ~PTE_AP_READ; return pte;}extern __inline__ pte_t pte_mkwrite(pte_t pte){ pte_val(pte) |= PTE_AP_WRITE; return pte;}extern __inline__ pte_t pte_mkdirty(pte_t pte){ pte_val(pte) |= PTE_BUFFERABLE; return pte;}extern __inline__ pte_t pte_mkyoung(pte_t pte){ pte_val(pte) |= PTE_AP_READ; return pte;}/* * The following are unable to be implemented on this MMU */#if 0extern __inline__ pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ); return pte;}extern __inline__ pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ); return pte;}extern __inline__ pte_t pte_mkread(pte_t pte){ pte_val(pte) |= PTE_CACHEABLE; return pte;}extern __inline__ pte_t pte_mkexec(pte_t pte){ pte_val(pte) |= PTE_CACHEABLE; return pte;}#endif/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot){ pte_t pte; pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot); return pte;}extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte;}extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval){ *pteptr = pteval; __flush_pte_to_ram(pteptr);}extern __inline__ unsigned long pte_page(pte_t pte){ return __phys_to_virt(pte_val(pte) & PAGE_MASK);}extern __inline__ pmd_t mk_pmd(pte_t *ptep){ pmd_t pmd; pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_TABLE; return pmd;}#if 1#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd)#elseextern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd){ *pmdp = pmd; __flush_pte_to_ram(pmdp);}#endifextern __inline__ unsigned long pmd_page(pmd_t pmd){ return __phys_to_virt(pmd_val(pmd) & 0xfffffc00);}/* to find an entry in a page-table-directory */extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address){ return mm->pgd + (address >> PGDIR_SHIFT);}/* Find an entry in the second-level page table.. */#define pmd_offset(dir, address) ((pmd_t *)(dir))/* Find an entry in the third-level page table.. */extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address){ return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));}extern unsigned long get_small_page(int priority);extern void free_small_page(unsigned long page);/* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. */extern __inline__ void pte_free_kernel(pte_t * pte){ free_small_page((unsigned long) pte);}extern const char bad_pmd_string[];extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address){ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (pmd_none(*pmd)) { pte_t *page = (pte_t *) get_small_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (page) { memzero (page, PTRS_PER_PTE * BYTES_PER_PTR); set_pmd(pmd, mk_pmd(page)); return page + address; } set_pmd(pmd, mk_pmd(BAD_PAGETABLE)); return NULL; } free_small_page((unsigned long) page); } if (pmd_bad(*pmd)) { printk(bad_pmd_string, pmd_val(*pmd)); set_pmd(pmd, mk_pmd(BAD_PAGETABLE)); return NULL; } return (pte_t *) pmd_page(*pmd) + address;}/* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */#define pmd_free_kernel(pmdp) pmd_val(*(pmdp)) = 0;#define pmd_alloc_kernel(pgdp, address) ((pmd_t *)(pgdp))extern __inline__ void pte_free(pte_t * pte){ free_small_page((unsigned long) pte);}extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address){ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (pmd_none(*pmd)) { pte_t *page = (pte_t *) get_small_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (page) { memzero (page, PTRS_PER_PTE * BYTES_PER_PTR); set_pmd(pmd, mk_pmd(page)); return page + address; } set_pmd(pmd, mk_pmd(BAD_PAGETABLE)); return NULL; } free_small_page ((unsigned long) page); } if (pmd_bad(*pmd)) { printk(bad_pmd_string, pmd_val(*pmd)); set_pmd(pmd, mk_pmd(BAD_PAGETABLE)); return NULL; } return (pte_t *) pmd_page(*pmd) + address;}/* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */#define pmd_free(pmdp) pmd_val(*(pmdp)) = 0;#define pmd_alloc(pgdp, address) ((pmd_t *)(pgdp))/* * Free a page directory. Takes the virtual address. */extern __inline__ void pgd_free(pgd_t * pgd){ free_pages((unsigned long) pgd, 2);}/* * Allocate a new page directory. Return the virtual address of it. */extern __inline__ pgd_t * pgd_alloc(void){ unsigned long pgd; /* * need to get a 16k page for level 1 */ pgd = __get_free_pages(GFP_KERNEL,2,0); if (pgd) memzero ((void *)pgd, PTRS_PER_PGD * BYTES_PER_PTR); return (pgd_t *)pgd;}extern pgd_t swapper_pg_dir[PTRS_PER_PGD];/* * The sa110 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */extern __inline__ void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){}#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)#define SWP_OFFSET(entry) ((entry) >> 9)#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))#elseextern inline void flush_cache_mm(struct mm_struct *mm){}extern inline void flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end){}/* Push the page at kernel virtual address and clear the icache */extern inline void flush_page_to_ram (unsigned long address){}/* Push n pages at kernel virtual address and clear the icache */extern inline void flush_pages_to_ram (unsigned long address, int n){}#define __flush_entry_to_ram(entry)#endif#endif /* __ASM_PROC_PAGE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -