pgtable.h

来自「linux 内核源代码」· C头文件 代码 · 共 823 行 · 第 1/2 页

H
823
字号
#ifdef CONFIG_44x#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)#else#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)#endif#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)#define _PAGE_KERNEL	(_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)#ifdef CONFIG_PPC_STD_MMU/* On standard PPC MMU, no user access implies kernel read/write access, * so to write-protect kernel memory we must turn on user access */#define _PAGE_KERNEL_RO	(_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)#else#define _PAGE_KERNEL_RO	(_PAGE_BASE | _PAGE_SHARED)#endif#define _PAGE_IO	(_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)#define _PAGE_RAM	(_PAGE_KERNEL | _PAGE_HWEXEC)#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)/* We want the debuggers to be able to set breakpoints anywhere, so * don't write protect the kernel text */#define _PAGE_RAM_TEXT	_PAGE_RAM#else#define _PAGE_RAM_TEXT	(_PAGE_KERNEL_RO | _PAGE_HWEXEC)#endif#define PAGE_NONE	__pgprot(_PAGE_BASE)#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_KERNEL		__pgprot(_PAGE_RAM)#define PAGE_KERNEL_NOCACHE	__pgprot(_PAGE_IO)/* * The PowerPC can only do execute protection on a segment (256MB) basis, * not on a page basis.  So we consider execute permission the same as read. * Also, write permissions imply read permissions. * This is the closest we can get.. */#define __P000	PAGE_NONE#define __P001	PAGE_READONLY_X#define __P010	PAGE_COPY#define __P011	PAGE_COPY_X#define __P100	PAGE_READONLY#define __P101	PAGE_READONLY_X#define __P110	PAGE_COPY#define __P111	PAGE_COPY_X#define __S000	PAGE_NONE#define __S001	PAGE_READONLY_X#define __S010	PAGE_SHARED#define __S011	PAGE_SHARED_X#define __S100	PAGE_READONLY#define __S101	PAGE_READONLY_X#define __S110	PAGE_SHARED#define __S111	PAGE_SHARED_X#ifndef __ASSEMBLY__/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a * kernel without large page PMD support */extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);/* * Conversions between PTE values and page frame numbers. *//* in some case we want to additionaly adjust where the pfn is in the pte to * allow room for more flags */#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)#define PFN_SHIFT_OFFSET	(PAGE_SHIFT + 8)#else#define PFN_SHIFT_OFFSET	(PAGE_SHIFT)#endif#define pte_pfn(x)		(pte_val(x) >> PFN_SHIFT_OFFSET)#define pte_page(x)		pfn_to_page(pte_pfn(x))#define pfn_pte(pfn, prot)	__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\					pgprot_val(prot))#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[1024];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))#endif /* __ASSEMBLY__ */#define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)#define pte_clear(mm,addr,ptep)	do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)#define pmd_none(pmd)		(!pmd_val(pmd))#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)#ifndef __ASSEMBLY__/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */static inline int pgd_none(pgd_t pgd)		{ return 0; }static inline int pgd_bad(pgd_t pgd)		{ return 0; }static inline int pgd_present(pgd_t pgd)	{ return 1; }#define pgd_clear(xp)				do { } while (0)#define pgd_page_vaddr(pgd) \	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }static inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }static inline pte_t pte_wrprotect(pte_t pte) {	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }static inline pte_t pte_mkclean(pte_t pte) {	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }static inline pte_t pte_mkold(pte_t pte) {	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }static inline pte_t pte_mkwrite(pte_t pte) {	pte_val(pte) |= _PAGE_RW; return pte; }static inline pte_t pte_mkdirty(pte_t pte) {	pte_val(pte) |= _PAGE_DIRTY; return pte; }static inline pte_t pte_mkyoung(pte_t pte) {	pte_val(pte) |= _PAGE_ACCESSED; return pte; }static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);	return pte;}/* * When flushing the tlb entry for a page, we also need to flush the hash * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S. */extern int flush_hash_pages(unsigned context, unsigned long va,			    unsigned long pmdval, int count);/* Add an HPTE to the hash table */extern void add_hash_page(unsigned context, unsigned long va,			  unsigned long pmdval);/* * Atomic PTE updates. * * pte_update clears and sets bit atomically, and returns * the old pte value.  In the 64-bit PTE case we lock around the * low PTE word since we expect ALL flag bits to be there */#ifndef CONFIG_PTE_64BITstatic inline unsigned long pte_update(pte_t *p, unsigned long clr,				       unsigned long set){	unsigned long old, tmp;	__asm__ __volatile__("\1:	lwarx	%0,0,%3\n\	andc	%1,%0,%4\n\	or	%1,%1,%5\n"	PPC405_ERR77(0,%3)"	stwcx.	%1,0,%3\n\	bne-	1b"	: "=&r" (old), "=&r" (tmp), "=m" (*p)	: "r" (p), "r" (clr), "r" (set), "m" (*p)	: "cc" );	return old;}#elsestatic inline unsigned long long pte_update(pte_t *p, unsigned long clr,				       unsigned long set){	unsigned long long old;	unsigned long tmp;	__asm__ __volatile__("\1:	lwarx	%L0,0,%4\n\	lwzx	%0,0,%3\n\	andc	%1,%L0,%5\n\	or	%1,%1,%6\n"	PPC405_ERR77(0,%3)"	stwcx.	%1,0,%4\n\	bne-	1b"	: "=&r" (old), "=&r" (tmp), "=m" (*p)	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)	: "cc" );	return old;}#endif/* * set_pte stores a linux PTE into the linux page table. * On machines which use an MMU hash table we avoid changing the * _PAGE_HASHPTE bit. */static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,			      pte_t *ptep, pte_t pte){#if _PAGE_HASHPTE != 0	pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);#else	*ptep = pte;#endif}/* * 2.6 calles this without flushing the TLB entry, this is wrong * for our hash-based implementation, we fix that up here */#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNGstatic inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep){	unsigned long old;	old = pte_update(ptep, _PAGE_ACCESSED, 0);#if _PAGE_HASHPTE != 0	if (old & _PAGE_HASHPTE) {		unsigned long ptephys = __pa(ptep) & PAGE_MASK;		flush_hash_pages(context, addr, ptephys, 1);	}#endif	return (old & _PAGE_ACCESSED) != 0;}#define ptep_test_and_clear_young(__vma, __addr, __ptep) \	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)#define __HAVE_ARCH_PTEP_GET_AND_CLEARstatic inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,				       pte_t *ptep){	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));}#define __HAVE_ARCH_PTEP_SET_WRPROTECTstatic inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,				      pte_t *ptep){	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);}#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGSstatic inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty){	unsigned long bits = pte_val(entry) &		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);	pte_update(ptep, 0, bits);}#define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \({									   \	int __changed = !pte_same(*(__ptep), __entry);			   \	if (__changed) {						   \		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \		flush_tlb_page_nohash(__vma, __address);		   \	}								   \	__changed;							   \})/* * Macro to mark a page protection value as "uncacheable". */#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))struct file;extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,				     unsigned long size, pgprot_t vma_prot);#define __HAVE_PHYS_MEM_ACCESS_PROT#define __HAVE_ARCH_PTE_SAME#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)/* * Note that on Book E processors, the pmd contains the kernel virtual * (lowmem) address of the pte page.  The physical address is less useful * because everything runs with translation enabled (even the TLB miss * handler).  On everything else the pmd contains the physical address * of the pte page.  -- paulus */#ifndef CONFIG_BOOKE#define pmd_page_vaddr(pmd)	\	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))#define pmd_page(pmd)		\	(mem_map + (pmd_val(pmd) >> PAGE_SHIFT))#else#define pmd_page_vaddr(pmd)	\	((unsigned long) (pmd_val(pmd) & PAGE_MASK))#define pmd_page(pmd)		\	(mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT))#endif/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* to find an entry in a page-table-directory */#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))/* Find an entry in the second-level page table.. */static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){	return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */#define pte_index(address)		\	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir, addr)	\	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))#define pte_offset_map(dir, addr)		\	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))#define pte_offset_map_nested(dir, addr)	\	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))#define pte_unmap(pte)		kunmap_atomic(pte, KM_PTE0)#define pte_unmap_nested(pte)	kunmap_atomic(pte, KM_PTE1)extern pgd_t swapper_pg_dir[PTRS_PER_PGD];extern void paging_init(void);/* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the *_PAGE_HASHPTE bit (if used).  -- paulus */#define __swp_type(entry)		((entry).val & 0x1f)#define __swp_offset(entry)		((entry).val >> 5)#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })/* Encode and decode a nonlinear file mapping entry */#define PTE_FILE_MAX_BITS	29#define pte_to_pgoff(pte)	(pte_val(pte) >> 3)#define pgoff_to_pte(off)	((pte_t) { ((off) << 3) | _PAGE_FILE })/* Values for nocacheflag and cmode *//* These are not used by the APUS kernel_map, but prevents   compilation errors. */#define	KERNELMAP_FULL_CACHING		0#define	KERNELMAP_NOCACHE_SER		1#define	KERNELMAP_NOCACHE_NONSER	2#define	KERNELMAP_NO_COPYBACK		3/* * Map some physical address range into the kernel address space. */extern unsigned long kernel_map(unsigned long paddr, unsigned long size,				int nocacheflag, unsigned long *memavailp );/* * Set cache mode of (kernel space) address range. */extern void kernel_set_cachemode (unsigned long address, unsigned long size,                                 unsigned int cmode);/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define kern_addr_valid(addr)	(1)#ifdef CONFIG_PHYS_64BITextern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,			unsigned long paddr, unsigned long size, pgprot_t prot);static inline int io_remap_pfn_range(struct vm_area_struct *vma,					unsigned long vaddr,					unsigned long pfn,					unsigned long size,					pgprot_t prot){	phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);	return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);}#else#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\		remap_pfn_range(vma, vaddr, pfn, size, prot)#endif/* * No page table caches to initialise */#define pgtable_cache_init()	do { } while (0)extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,		      pmd_t **pmdp);#include <asm-generic/pgtable.h>#endif /* !__ASSEMBLY__ */#endif /* _PPC_PGTABLE_H */#endif /* __KERNEL__ */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?