pgtable.h
来自「linux 内核源代码」· C头文件 代码 · 共 770 行 · 第 1/2 页
H
770 行
"\n662: nop\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%4), %1\n" " sllx %1, 32, %1\n" " .word 662b\n" " or %1, %%lo(%4), %1\n" " or %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); return __pte(val);}static inline pte_t pte_mkclean(pte_t pte){ unsigned long val = pte_val(pte), tmp; __asm__ __volatile__( "\n661: andn %0, %3, %0\n" " nop\n" "\n662: nop\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%4), %1\n" " sllx %1, 32, %1\n" " .word 662b\n" " or %1, %%lo(%4), %1\n" " andn %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); return __pte(val);}static inline pte_t pte_mkwrite(pte_t pte){ unsigned long val = pte_val(pte), mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); return __pte(val | mask);}static inline pte_t pte_wrprotect(pte_t pte){ unsigned long val = pte_val(pte), tmp; __asm__ __volatile__( "\n661: andn %0, %3, %0\n" " nop\n" "\n662: nop\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%4), %1\n" " sllx %1, 32, %1\n" " .word 662b\n" " or %1, %%lo(%4), %1\n" " andn %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); return __pte(val);}static inline pte_t pte_mkold(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); mask |= _PAGE_R; return __pte(pte_val(pte) & ~mask);}static inline pte_t pte_mkyoung(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); mask |= _PAGE_R; return __pte(pte_val(pte) | mask);}static inline unsigned long pte_young(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); return (pte_val(pte) & mask);}static inline unsigned long pte_dirty(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); return (pte_val(pte) & mask);}static inline unsigned long pte_write(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " sethi %%uhi(%2), %0\n" " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); return (pte_val(pte) & mask);}static inline unsigned long pte_exec(pte_t pte){ unsigned long mask; __asm__ __volatile__( "\n661: sethi %%hi(%1), %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" " .word 661b\n" " mov %2, %0\n" " .previous\n" : "=r" (mask) : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); return (pte_val(pte) & mask);}static inline unsigned long pte_file(pte_t pte){ unsigned long val = pte_val(pte); __asm__ __volatile__( "\n661: and %0, %2, %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" " .word 661b\n" " and %0, %3, %0\n" " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); return val;}static inline unsigned long pte_present(pte_t pte){ unsigned long val = pte_val(pte); __asm__ __volatile__( "\n661: and %0, %2, %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" " .word 661b\n" " and %0, %3, %0\n" " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); return val;}#define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))#define pud_set(pudp, pmdp) \ (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))#define __pmd_page(pmd) \ ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))#define pud_page_vaddr(pud) \ ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))#define pmd_none(pmd) (!pmd_val(pmd))#define pmd_bad(pmd) (0)#define pmd_present(pmd) (pmd_val(pmd) != 0U)#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)#define pud_none(pud) (!pud_val(pud))#define pud_bad(pud) (0)#define pud_present(pud) (pud_val(pud) != 0U)#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)/* Same in both SUN4V and SUN4U. */#define pte_none(pte) (!pte_val(pte))/* to find an entry in a page-table-directory. */#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */#define pmd_offset(pudp, address) \ ((pmd_t *) pud_page_vaddr(*(pudp)) + \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))/* Find an entry in the third-level page table.. */#define pte_index(dir, address) \ ((pte_t *) __pmd_page(*(dir)) + \ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))#define pte_offset_kernel pte_index#define pte_offset_map pte_index#define pte_offset_map_nested pte_index#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)/* Actual page table PTE updates. */extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte){ pte_t orig = *ptep; *ptep = pte; /* It is more efficient to let flush_tlb_kernel_range() * handle init_mm tlb flushes. * * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * and SUN4V pte layout, so this inline test is fine. */ if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) tlb_batch_add(mm, addr, ptep, orig);}#define pte_clear(mm,addr,ptep) \ set_pte_at((mm), (addr), (ptep), __pte(0UL))#ifdef DCACHE_ALIASING_POSSIBLE#define __HAVE_ARCH_MOVE_PTE#define move_pte(pte, prot, old_addr, new_addr) \({ \ pte_t newpte = (pte); \ if (tlb_type != hypervisor && pte_present(pte)) { \ unsigned long this_pfn = pte_pfn(pte); \ \ if (pfn_valid(this_pfn) && \ (((old_addr) ^ (new_addr)) & (1 << 13))) \ flush_dcache_page_all(current->mm, \ pfn_to_page(this_pfn)); \ } \ newpte; \})#endifextern pgd_t swapper_pg_dir[2048];extern pmd_t swapper_low_pmd_dir[2048];extern void paging_init(void);extern unsigned long find_ecache_flush_span(unsigned long size);/* These do nothing with the way I have things setup. */#define mmu_lockarea(vaddr, len) (vaddr)#define mmu_unlockarea(vaddr, len) do { } while(0)struct vm_area_struct;extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);/* Encode and de-code a swap entry */#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))#define __swp_entry(type, offset) \ ( (swp_entry_t) \ { \ (((long)(type) << PAGE_SHIFT) | \ ((long)(offset) << (PAGE_SHIFT + 8UL))) \ } )#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })#define __swp_entry_to_pte(x) ((pte_t) { (x).val })/* File offset in PTE support. */extern unsigned long pte_file(pte_t);#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)extern pte_t pgoff_to_pte(unsigned long);#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)extern unsigned long *sparc64_valid_addr_bitmap;/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))extern int page_in_phys_avail(unsigned long paddr);extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);/* * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in * its high 4 bits. These macros/functions put it there or get it from there. */#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)#include <asm-generic/pgtable.h>/* We provide our own get_unmapped_area to cope with VA holes and * SHM area cache aliasing for userland. */#define HAVE_ARCH_UNMAPPED_AREA#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN/* We provide a special get_unmapped_area for framebuffer mmaps to try and use * the largest alignment possible such that larget PTEs can be used. */extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);#define HAVE_ARCH_FB_UNMAPPED_AREAextern void pgtable_cache_init(void);extern void sun4v_register_fault_status(void);extern void sun4v_ktsb_register(void);extern unsigned long cmdline_memory_size;#endif /* !(__ASSEMBLY__) */#endif /* !(_SPARC64_PGTABLE_H) */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?