pgtable.h

来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 803 行 · 第 1/2 页

H
803
字号
	/* A pte is neither clean nor dirty on s/390. The dirty bit	 * is in the storage key. See page_test_and_clear_dirty for	 * details.	 */	return 0;}extern inline int pte_young(pte_t pte){	/* A pte is neither young nor old on s/390. The young bit	 * is in the storage key. See page_test_and_clear_young for	 * details.	 */	return 0;}/* * pgd/pmd/pte modification functions */#ifndef __s390x__extern inline void pgd_clear(pgd_t * pgdp)      { }extern inline void pmd_clear(pmd_t * pmdp){	pmd_val(pmdp[0]) = _PAGE_TABLE_INV;	pmd_val(pmdp[1]) = _PAGE_TABLE_INV;	pmd_val(pmdp[2]) = _PAGE_TABLE_INV;	pmd_val(pmdp[3]) = _PAGE_TABLE_INV;}#else /* __s390x__ */extern inline void pgd_clear(pgd_t * pgdp){	pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;}extern inline void pmd_clear(pmd_t * pmdp){	pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;	pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;}#endif /* __s390x__ */extern inline void pte_clear(pte_t *ptep){	pte_val(*ptep) = _PAGE_INVALID_EMPTY;}/* * The following pte modification functions only work if * pte_present() is true. Undefined behaviour if not.. */extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){	pte_val(pte) &= PAGE_MASK;	pte_val(pte) |= pgprot_val(newprot);	return pte;}extern inline pte_t pte_wrprotect(pte_t pte){	/* Do not clobber _PAGE_INVALID_NONE pages!  */	if (!(pte_val(pte) & _PAGE_INVALID))		pte_val(pte) |= _PAGE_RO;	return pte;}extern inline pte_t pte_mkwrite(pte_t pte) {	pte_val(pte) &= ~_PAGE_RO;	return pte;}extern inline pte_t pte_mkclean(pte_t pte){	/* The only user of pte_mkclean is the fork() code.	   We must *not* clear the *physical* page dirty bit	   just because fork() wants to clear the dirty bit in	   *one* of the page's mappings.  So we just do nothing. */	return pte;}extern inline pte_t pte_mkdirty(pte_t pte){	/* We do not explicitly set the dirty bit because the	 * sske instruction is slow. It is faster to let the	 * next instruction set the dirty bit.	 */	return pte;}extern inline pte_t pte_mkold(pte_t pte){	/* S/390 doesn't keep its dirty/referenced bit in the pte.	 * There is no point in clearing the real referenced bit.	 */	return pte;}extern inline pte_t pte_mkyoung(pte_t pte){	/* S/390 doesn't keep its dirty/referenced bit in the pte.	 * There is no point in setting the real referenced bit.	 */	return pte;}static inline int ptep_test_and_clear_young(pte_t *ptep){	return 0;}static inline intptep_clear_flush_young(struct vm_area_struct *vma,			unsigned long address, pte_t *ptep){	/* No need to flush TLB; bits are in storage key */	return ptep_test_and_clear_young(ptep);}static inline int ptep_test_and_clear_dirty(pte_t *ptep){	return 0;}static inline intptep_clear_flush_dirty(struct vm_area_struct *vma,			unsigned long address, pte_t *ptep){	/* No need to flush TLB; bits are in storage key */	return ptep_test_and_clear_dirty(ptep);}static inline pte_t ptep_get_and_clear(pte_t *ptep){	pte_t pte = *ptep;	pte_clear(ptep);	return pte;}static inline pte_tptep_clear_flush(struct vm_area_struct *vma,		 unsigned long address, pte_t *ptep){	pte_t pte = *ptep;#ifndef __s390x__	if (!(pte_val(pte) & _PAGE_INVALID)) {		/* S390 has 1mb segments, we are emulating 4MB segments */		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);		__asm__ __volatile__ ("ipte %2,%3"				      : "=m" (*ptep) : "m" (*ptep),				        "a" (pto), "a" (address) );	}#else /* __s390x__ */	if (!(pte_val(pte) & _PAGE_INVALID)) 		__asm__ __volatile__ ("ipte %2,%3"				      : "=m" (*ptep) : "m" (*ptep),				        "a" (ptep), "a" (address) );#endif /* __s390x__ */	pte_clear(ptep);	return pte;}static inline void ptep_set_wrprotect(pte_t *ptep){	pte_t old_pte = *ptep;	set_pte(ptep, pte_wrprotect(old_pte));}static inline void ptep_mkdirty(pte_t *ptep){	pte_mkdirty(*ptep);}static inline voidptep_establish(struct vm_area_struct *vma, 	       unsigned long address, pte_t *ptep,	       pte_t entry){	ptep_clear_flush(vma, address, ptep);	set_pte(ptep, entry);}#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \	ptep_establish(__vma, __address, __ptep, __entry)/* * Test and clear dirty bit in storage key. * We can't clear the changed bit atomically. This is a potential * race against modification of the referenced bit. This function * should therefore only be called if it is not mapped in any * address space. */#define page_test_and_clear_dirty(_page)				  \({									  \	struct page *__page = (_page);					  \	unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);  \	int __skey = page_get_storage_key(__physpage);			  \	if (__skey & _PAGE_CHANGED)					  \		page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\	(__skey & _PAGE_CHANGED);					  \})/* * Test and clear referenced bit in storage key. */#define page_test_and_clear_young(page)					  \({									  \	struct page *__page = (page);					  \	unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);  \	int __ccode;							  \	asm volatile ("rrbe 0,%1\n\t"					  \		      "ipm  %0\n\t"					  \		      "srl  %0,28\n\t" 					  \                      : "=d" (__ccode) : "a" (__physpage) : "cc" );	  \	(__ccode & 2);							  \})/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot){	pte_t __pte;	pte_val(__pte) = physpage + pgprot_val(pgprot);	return __pte;}#define mk_pte(pg, pgprot)                                                \({                                                                        \	struct page *__page = (pg);                                       \	pgprot_t __pgprot = (pgprot);					  \	unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);  \	pte_t __pte = mk_pte_phys(__physpage, __pgprot);                  \	__pte;                                                            \})#define pfn_pte(pfn, pgprot)                                              \({                                                                        \	pgprot_t __pgprot = (pgprot);					  \	unsigned long __physpage = __pa((pfn) << PAGE_SHIFT);             \	pte_t __pte = mk_pte_phys(__physpage, __pgprot);                  \	__pte;                                                            \})#define SetPageUptodate(_page) \	do {								      \		struct page *__page = (_page);				      \		if (!test_and_set_bit(PG_uptodate, &__page->flags))	      \			page_test_and_clear_dirty(_page);		      \	} while (0)#ifdef __s390x__#define pfn_pmd(pfn, pgprot)                                              \({                                                                        \	pgprot_t __pgprot = (pgprot);                                     \	unsigned long __physpage = __pa((pfn) << PAGE_SHIFT);             \	pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot));           \	__pmd;                                                            \})#endif /* __s390x__ */#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)#define pte_page(x) pfn_to_page(pte_pfn(x))#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK)#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK)/* to find an entry in a page-table-directory */#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)#ifndef __s390x__/* Find an entry in the second-level page table.. */extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){        return (pmd_t *) dir;}#else /* __s390x__ *//* Find an entry in the second-level page table.. */#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))#define pmd_offset(dir,addr) \	((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr))#endif /* __s390x__ *//* Find an entry in the third-level page table.. */#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))#define pte_offset_kernel(pmd, address) \	((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address))#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)/* * 31 bit swap entry format: * A page-table entry has some bits we have to treat in a special way. * Bits 0, 20 and bit 23 have to be zero, otherwise an specification * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. * Bit 21 and bit 22 are the page invalid bit and the page protection * bit. We set both to indicate a swapped page. * Bit 30 and 31 are used to distinguish the different page types. For * a swapped page these bits need to be zero. * This leaves the bits 1-19 and bits 24-29 to store type and offset. * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 * plus 24 for the offset. * 0|     offset        |0110|o|type |00| * 0 0000000001111111111 2222 2 22222 33 * 0 1234567890123456789 0123 4 56789 01 * * 64 bit swap entry format: * A page-table entry has some bits we have to treat in a special way. * Bits 52 and bit 55 have to be zero, otherwise an specification * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. * Bit 53 and bit 54 are the page invalid bit and the page protection * bit. We set both to indicate a swapped page. * Bit 62 and 63 are used to distinguish the different page types. For * a swapped page these bits need to be zero. * This leaves the bits 0-51 and bits 56-61 to store type and offset. * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 * plus 56 for the offset. * |                      offset                        |0110|o|type |00| *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66 *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23 */extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset){	pte_t pte;	pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) |		((offset & 1) << 7) | ((offset & 0xffffe) << 11);	return pte;}#define __swp_type(entry)	(((entry).val >> 2) & 0x1f)#define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })#define __swp_entry_to_pte(x)	((pte_t) { (x).val })#ifndef __s390x__# define PTE_FILE_MAX_BITS	26#else /* __s390x__ */# define PTE_FILE_MAX_BITS	59#endif /* __s390x__ */#define pte_to_pgoff(__pte) \	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))#define pgoff_to_pte(__off) \	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \		   | _PAGE_INVALID_FILE })#endif /* !__ASSEMBLY__ */#define kern_addr_valid(addr)   (1)/* * No page table caches to initialise */#define pgtable_cache_init()	do { } while (0)#define __HAVE_ARCH_PTEP_ESTABLISH#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH#define __HAVE_ARCH_PTEP_GET_AND_CLEAR#define __HAVE_ARCH_PTEP_CLEAR_FLUSH#define __HAVE_ARCH_PTEP_SET_WRPROTECT#define __HAVE_ARCH_PTEP_MKDIRTY#define __HAVE_ARCH_PTE_SAME#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG#include <asm-generic/pgtable.h>#endif /* _S390_PAGE_H */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?