pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 443 行 · 第 1/2 页
H
443 行
#define __S111 PAGE_SHARED#ifndef __ASSEMBLY__extern unsigned long phys_base;extern unsigned long pfn_base;extern struct page *mem_map_zero;#define ZERO_PAGE(vaddr) (mem_map_zero)/* PFNs are real physical page numbers. However, mem_map only begins to record * per-page information starting at pfn_base. This is to handle systems where * the first physical page in the machine is at some huge physical address, such * as 4GB. This is common on a partitioned E10000, for example. */#define pfn_pte(pfn, prot) \ __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS)#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)#define pte_page(x) pfn_to_page(pte_pfn(x))#define page_pte_prot(page, prot) mk_pte(page, prot)#define page_pte(page) page_pte_prot(page, __pgprot(0))static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot){ pte_t __pte; const unsigned long preserve_mask = (_PFN_MASK | _PAGE_MODIFIED | _PAGE_ACCESSED | _PAGE_CACHE | _PAGE_E | _PAGE_PRESENT | _PAGE_SZBITS); pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | (pgprot_val(new_prot) & ~preserve_mask); return __pte;}#define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))#define pgd_set(pgdp, pmdp) \ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))#define __pmd_page(pmd) \ ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))#define pgd_page(pgd) \ ((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL)))#define pte_none(pte) (!pte_val(pte))#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)#define pmd_none(pmd) (!pmd_val(pmd))#define pmd_bad(pmd) (0)#define pmd_present(pmd) (pmd_val(pmd) != 0U)#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)#define pgd_none(pgd) (!pgd_val(pgd))#define pgd_bad(pgd) (0)#define pgd_present(pgd) (pgd_val(pgd) != 0U)#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0U)/* The following only work if pte_present() is true. * Undefined behaviour if not.. */#define pte_read(pte) (pte_val(pte) & _PAGE_READ)#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))#define pte_rdprotect(pte) \ (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))#define pte_mkclean(pte) \ (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))#define pte_mkold(pte) \ (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))/* Permanent address of a page. */#define __page_address(page) page_address(page)/* Be very careful when you change these three, they are delicate. */#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE))#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))/* to find an entry in a page-table-directory. */#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD))#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */#define pmd_offset(dir, address) \ ((pmd_t *) pgd_page(*(dir)) + \ ((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))/* Find an entry in the third-level page table.. */#define pte_index(dir, address) \ ((pte_t *) __pmd_page(*(dir)) + \ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))#define pte_offset_kernel pte_index#define pte_offset_map pte_index#define pte_offset_map_nested pte_index#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)/* Actual page table PTE updates. */extern void tlb_batch_add(pte_t *ptep, pte_t orig);static inline void set_pte(pte_t *ptep, pte_t pte){ pte_t orig = *ptep; *ptep = pte; if (pte_present(orig)) tlb_batch_add(ptep, orig);}#define pte_clear(ptep) set_pte((ptep), __pte(0UL))extern pgd_t swapper_pg_dir[1];/* These do nothing with the way I have things setup. */#define mmu_lockarea(vaddr, len) (vaddr)#define mmu_unlockarea(vaddr, len) do { } while(0)struct vm_area_struct;extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);/* Make a non-present pseudo-TTE. */static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space){ pte_t pte; pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE); pte_val(pte) |= (((unsigned long)space) << 32); return pte;}/* Encode and de-code a swap entry */#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))#define __swp_entry(type, offset) \ ( (swp_entry_t) \ { \ (((long)(type) << PAGE_SHIFT) | \ ((long)(offset) << (PAGE_SHIFT + 8UL))) \ } )#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define __swp_entry_to_pte(x) ((pte_t) { (x).val })/* File offset in PTE support. */#define pte_file(pte) (pte_val(pte) & _PAGE_FILE)#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE))#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)extern unsigned long prom_virt_to_phys(unsigned long, int *);static __inline__ unsigned longsun4u_get_pte (unsigned long addr){ pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; if (addr >= PAGE_OFFSET) return addr & _PAGE_PADDR; if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) return prom_virt_to_phys(addr, NULL); pgdp = pgd_offset_k(addr); pmdp = pmd_offset(pgdp, addr); ptep = pte_offset_kernel(pmdp, addr); return pte_val(*ptep) & _PAGE_PADDR;}static __inline__ unsigned long__get_phys (unsigned long addr){ return sun4u_get_pte (addr);}static __inline__ int__get_iospace (unsigned long addr){ return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);}extern unsigned long *sparc64_valid_addr_bitmap;/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space);#include <asm-generic/pgtable.h>/* We provide our own get_unmapped_area to cope with VA holes for userland */#define HAVE_ARCH_UNMAPPED_AREA/* We provide a special get_unmapped_area for framebuffer mmaps to try and use * the largest alignment possible such that larget PTEs can be used. */extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);#define HAVE_ARCH_FB_UNMAPPED_AREA/* * No page table caches to initialise */#define pgtable_cache_init() do { } while (0)extern void check_pgt_cache(void);#endif /* !(__ASSEMBLY__) */#endif /* !(_SPARC64_PGTABLE_H) */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?