📄 pgtable.h
字号:
/* to find an entry in a page-table */#define PAGE_PTR(address) \((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)/* to set the page-dir */#define SET_PAGE_DIR(tsk,pgdir) \do { \ unsigned long __pgdir = __pa(pgdir); \ (tsk)->tss.cr3 = __pgdir; \ if ((tsk) == current) \ __asm__ __volatile__("movl %0,%%cr3": :"r" (__pgdir)); \} while (0)#define pte_none(x) (!pte_val(x))#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)#define pmd_none(x) (!pmd_val(x))#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */extern inline int pgd_none(pgd_t pgd) { return 0; }extern inline int pgd_bad(pgd_t pgd) { return 0; }extern inline int pgd_present(pgd_t pgd) { return 1; }extern inline void pgd_clear(pgd_t * pgdp) { }/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; }extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */#define mk_pte(page, pgprot) \({ pte_t __pte; pte_val(__pte) = __pa(page) + pgprot_val(pgprot); __pte; })/* This takes a physical page address that is used by the remapping functions */#define mk_pte_phys(physpage, pgprot) \({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }#define pte_page(pte) \((unsigned long) __va(pte_val(pte) & PAGE_MASK))#define pmd_page(pmd) \((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))/* to find an entry in a page-table-directory */#define pgd_offset(mm, address) \((mm)->pgd + ((address) >> PGDIR_SHIFT))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){ return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */ #define pte_offset(pmd, address) \((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))/* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. */#define pgd_quicklist (current_cpu_data.pgd_quick)#define pmd_quicklist ((unsigned long *)0)#define pte_quicklist (current_cpu_data.pte_quick)#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)extern __inline__ pgd_t *get_pgd_slow(void){ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init; if (ret) { init = pgd_offset(&init_mm, 0); memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return ret;}extern __inline__ pgd_t *get_pgd_fast(void){ unsigned long *ret; if((ret = pgd_quicklist) != NULL) { pgd_quicklist = (unsigned long *)(*ret); ret[0] = ret[1]; pgtable_cache_size--; } else ret = (unsigned long *)get_pgd_slow(); return (pgd_t *)ret;}extern __inline__ void free_pgd_fast(pgd_t *pgd){ *(unsigned long *)pgd = (unsigned long) pgd_quicklist; pgd_quicklist = (unsigned long *) pgd; pgtable_cache_size++;}extern __inline__ void free_pgd_slow(pgd_t *pgd){ free_page((unsigned long)pgd);}extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);extern __inline__ pte_t *get_pte_fast(void){ unsigned long *ret; if((ret = (unsigned long *)pte_quicklist) != NULL) { pte_quicklist = (unsigned long *)(*ret); ret[0] = ret[1]; pgtable_cache_size--; } return (pte_t *)ret;}extern __inline__ void free_pte_fast(pte_t *pte){ *(unsigned long *)pte = (unsigned long) pte_quicklist; pte_quicklist = (unsigned long *) pte; pgtable_cache_size++;}extern __inline__ void free_pte_slow(pte_t *pte){ free_page((unsigned long)pte);}/* We don't use pmd cache, so these are dummy routines */extern __inline__ pmd_t *get_pmd_fast(void){ return (pmd_t *)0;}extern __inline__ void free_pmd_fast(pmd_t *pmd){}extern __inline__ void free_pmd_slow(pmd_t *pmd){}extern void __bad_pte(pmd_t *pmd);extern void __bad_pte_kernel(pmd_t *pmd);#define pte_free_kernel(pte) free_pte_fast(pte)#define pte_free(pte) free_pte_fast(pte)#define pgd_free(pgd) free_pgd_fast(pgd)#define pgd_alloc() get_pgd_fast()extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address){ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (pmd_none(*pmd)) { pte_t * page = (pte_t *) get_pte_fast(); if (!page) return get_pte_kernel_slow(pmd, address); pmd_val(*pmd) = _KERNPG_TABLE + __pa(page); return page + address; } if (pmd_bad(*pmd)) { __bad_pte_kernel(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + address;}extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address){ address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1); if (pmd_none(*pmd)) goto getnew; if (pmd_bad(*pmd)) goto fix; return (pte_t *) (pmd_page(*pmd) + address);getnew:{ unsigned long page = (unsigned long) get_pte_fast(); if (!page) return get_pte_slow(pmd, address); pmd_val(*pmd) = _PAGE_TABLE + __pa(page); return (pte_t *) (page + address);}fix: __bad_pte(pmd); return NULL;}/* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */extern inline void pmd_free(pmd_t * pmd){}extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address){ return (pmd_t *) pgd;}#define pmd_free_kernel pmd_free#define pmd_alloc_kernel pmd_allocextern int do_check_pgt_cache(int, int);extern inline void set_pgdir(unsigned long address, pgd_t entry){ struct task_struct * p; pgd_t *pgd;#ifdef __SMP__ int i;#endif read_lock(&tasklist_lock); for_each_task(p) { if (!p->mm) continue; *pgd_offset(p->mm,address) = entry; } read_unlock(&tasklist_lock);#ifndef __SMP__ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) pgd[address >> PGDIR_SHIFT] = entry;#else /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can modify pgd caches of other CPUs as well. -jj */ for (i = 0; i < NR_CPUS; i++) for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) pgd[address >> PGDIR_SHIFT] = entry;#endif}extern pgd_t swapper_pg_dir[1024];/* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */extern inline void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){}#define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)#define SWP_OFFSET(entry) ((entry) >> 8)#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))#define module_map vmalloc#define module_unmap vfree#endif /* !__ASSEMBLY__ */#endif /* !OSKIT *//* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page) (0)#define kern_addr_valid(addr) (1)#endif /* _I386_PAGE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -