⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgtable.h

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })#define pte_modify(_pte, newprot) \	(__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))#define page_pte_prot(page,prot)	mk_pte(page, prot)#define page_pte(page)			page_pte_prot(page, __pgprot(0))#define pte_none(pte) 			(!pte_val(pte))#define pte_present(pte)		(pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))#define pte_clear(pte)			(pte_val(*(pte)) = 0UL)/* pte_page() returns the "struct page *" corresponding to the PTE: */#define pte_page(pte)			(mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))#define pmd_set(pmdp, ptep) 		(pmd_val(*(pmdp)) = __pa(ptep))#define pmd_none(pmd)			(!pmd_val(pmd))#define pmd_bad(pmd)			(!ia64_phys_addr_valid(pmd_val(pmd)))#define pmd_present(pmd)		(pmd_val(pmd) != 0UL)#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)#define pmd_page(pmd)			((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))#define pgd_set(pgdp, pmdp)		(pgd_val(*(pgdp)) = __pa(pmdp))#define pgd_none(pgd)			(!pgd_val(pgd))#define pgd_bad(pgd)			(!ia64_phys_addr_valid(pgd_val(pgd)))#define pgd_present(pgd)		(pgd_val(pgd) != 0UL)#define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0UL)#define pgd_page(pgd)			((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))/* * The following have defined behavior only work if pte_present() is true. */#define pte_read(pte)		(((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)#define pte_write(pte)	((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)#define pte_exec(pte)		((pte_val(pte) & _PAGE_AR_RX) != 0)#define pte_dirty(pte)		((pte_val(pte) & _PAGE_D) != 0)#define pte_young(pte)		((pte_val(pte) & _PAGE_A) != 0)/* * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the * access rights: */#define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~_PAGE_AR_RW))#define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_AR_RW))#define pte_mkexec(pte)		(__pte(pte_val(pte) | _PAGE_AR_RX))#define pte_mkold(pte)		(__pte(pte_val(pte) & ~_PAGE_A))#define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_A))#define pte_mkclean(pte)	(__pte(pte_val(pte) & ~_PAGE_D))#define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_D))/* * Macro to make mark a page protection value as "uncacheable".  Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well. */#define pgprot_noncached(prot)		__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)/* * Macro to make mark a page protection value as "write-combining". * Note that "protection" is really a misnomer here as the protection * value contains the memory attribute bits, dirty bits, and various * other bits as well.  Accesses through a write-combining translation * works bypasses the caches, but does allow for consecutive writes to * be combined into single (but larger) write transactions. */#define pgprot_writecombine(prot)	__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)/* * Return the region index for virtual address ADDRESS. */static inline unsigned longrgn_index (unsigned long address){	ia64_va a;	a.l = address;	return a.f.reg;}/* * Return the region offset for virtual address ADDRESS. */static inline unsigned longrgn_offset (unsigned long address){	ia64_va a;	a.l = address;	return a.f.off;}static inline unsigned longpgd_index (unsigned long address){	unsigned long region = address >> 61;	unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);	return (region << (PAGE_SHIFT - 6)) | l1index;}/* The offset in the 1-level directory is given by the 3 region bits   (61..63) and the seven level-1 bits (33-39).  */static inline pgd_t*pgd_offset (struct mm_struct *mm, unsigned long address){	return mm->pgd + pgd_index(address);}/* In the kernel's mapped region we have a full 43 bit space available and completely   ignore the region number (since we know its in region number 5). */#define pgd_offset_k(addr) \	(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))/* Find an entry in the second-level page table.. */#define pmd_offset(dir,addr) \	((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))/* Find an entry in the third-level page table.. */#define pte_offset(dir,addr) \	((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))/* atomic versions of the some PTE manipulations: */static inline intptep_test_and_clear_young (pte_t *ptep){#ifdef CONFIG_SMP	return test_and_clear_bit(_PAGE_A_BIT, ptep);#else	pte_t pte = *ptep;	if (!pte_young(pte))		return 0;	set_pte(ptep, pte_mkold(pte));	return 1;#endif}static inline intptep_test_and_clear_dirty (pte_t *ptep){#ifdef CONFIG_SMP	return test_and_clear_bit(_PAGE_D_BIT, ptep);#else	pte_t pte = *ptep;	if (!pte_dirty(pte))		return 0;	set_pte(ptep, pte_mkclean(pte));	return 1;#endif}static inline pte_tptep_get_and_clear (pte_t *ptep){#ifdef CONFIG_SMP	return __pte(xchg((long *) ptep, 0));#else	pte_t pte = *ptep;	pte_clear(ptep);	return pte;#endif}static inline voidptep_set_wrprotect (pte_t *ptep){#ifdef CONFIG_SMP	unsigned long new, old;	do {		old = pte_val(*ptep);		new = pte_val(pte_wrprotect(__pte (old)));	} while (cmpxchg((unsigned long *) ptep, old, new) != old);#else	pte_t old_pte = *ptep;	set_pte(ptep, pte_wrprotect(old_pte));#endif}static inline voidptep_mkdirty (pte_t *ptep){#ifdef CONFIG_SMP	set_bit(_PAGE_D_BIT, ptep);#else	pte_t old_pte = *ptep;	set_pte(ptep, pte_mkdirty(old_pte));#endif}static inline intpte_same (pte_t a, pte_t b){	return pte_val(a) == pte_val(b);}/* * Macros to check the type of access that triggered a page fault. */static inline intis_write_access (int access_type){	return (access_type & 0x2);}static inline intis_exec_access (int access_type){	return (access_type & 0x4);}extern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init (void);#define SWP_TYPE(entry)			(((entry).val >> 1) & 0xff)#define SWP_OFFSET(entry)		(((entry).val << 1) >> 10)#define SWP_ENTRY(type,offset)		((swp_entry_t) { ((type) << 1) | ((offset) << 9) })#define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x)		((pte_t) { (x).val })/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page)		(0)#define io_remap_page_range remap_page_range	/* XXX is this right? *//* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))/* We provide our own get_unmapped_area to cope with VA holes for userland */#define HAVE_ARCH_UNMAPPED_AREA# endif /* !__ASSEMBLY__ */#endif /* _ASM_IA64_PGTABLE_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -