⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgtable.h

📁 this SRC packet is the headfiles that MIZI vivi bootloader needed when compling
💻 H
📖 第 1 页 / 共 2 页
字号:
	return pmd_val(pmd) & _PMD_ENTRY_INV;}extern inline int pmd_bad(pmd_t pmd){	return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY;}extern inline int pte_present(pte_t pte){	return pte_val(pte) & _PAGE_PRESENT;}extern inline int pte_none(pte_t pte){	return ((pte_val(pte) & 		 (_PAGE_INVALID | _PAGE_RO | _PAGE_PRESENT)) == _PAGE_INVALID);} #define pte_same(a,b)	(pte_val(a) == pte_val(b))/* * query functions pte_write/pte_dirty/pte_young only work if * pte_present() is true. Undefined behaviour if not.. */extern inline int pte_write(pte_t pte){	return (pte_val(pte) & _PAGE_RO) == 0;}extern inline int pte_dirty(pte_t pte){	int skey;	asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));	return skey & _PAGE_CHANGED;}extern inline int pte_young(pte_t pte){	int skey;	asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));	return skey & _PAGE_REFERENCED;}/* * pgd/pmd/pte modification functions */extern inline void pgd_clear(pgd_t * pgdp){	pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;}extern inline void pmd_clear(pmd_t * pmdp){	pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;	pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;}extern inline void pte_clear(pte_t *ptep){	pte_val(*ptep) = _PAGE_INVALID;}#define PTE_INIT(x) pte_clear(x)/* * The following pte_modification functions only work if  * pte_present() is true. Undefined behaviour if not.. */extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){	pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot);	return pte; }extern inline pte_t pte_wrprotect(pte_t pte){	pte_val(pte) |= _PAGE_RO;	return pte;}extern inline pte_t pte_mkwrite(pte_t pte){	pte_val(pte) &= ~_PAGE_RO;	return pte;}extern inline pte_t pte_mkclean(pte_t pte){	/* The only user of pte_mkclean is the fork() code.	   We must *not* clear the *physical* page dirty bit	   just because fork() wants to clear the dirty bit in	   *one* of the page's mappings.  So we just do nothing. */	return pte;}extern inline pte_t pte_mkdirty(pte_t pte){ 	/* We can't set the changed bit atomically either. For now we         * set (!) the page referenced bit. */	asm volatile ("sske %0,%1" 	              : : "d" (_PAGE_CHANGED|_PAGE_REFERENCED),		          "a" (pte_val(pte)));	pte_val(pte) &= ~_PAGE_MKCLEAR;	return pte;}extern inline pte_t pte_mkold(pte_t pte){	asm volatile ("rrbe 0,%0" : : "a" (pte_val(pte)) : "cc" );	return pte;}extern inline pte_t pte_mkyoung(pte_t pte){	/* To set the referenced bit we read the first word from the real	 * page with a special instruction: load using real address (lura).	 * Isn't S/390 a nice architecture ?! */	asm volatile ("lura 0,%0" : : "a" (pte_val(pte) & PAGE_MASK) : "0" );	return pte;}static inline int ptep_test_and_clear_young(pte_t *ptep){	int ccode;	asm volatile ("rrbe 0,%1\n\t"		      "ipm  %0\n\t"		      "srl  %0,28\n\t"		      : "=d" (ccode) : "a" (pte_val(*ptep)) : "cc" );	return ccode & 2;}static inline int ptep_test_and_clear_dirty(pte_t *ptep){	int skey;	asm volatile ("iske %0,%1" : "=d" (skey) : "a" (*ptep));	if ((skey & _PAGE_CHANGED) == 0)		return 0;	/* We can't clear the changed bit atomically. For now we         * clear (!) the page referenced bit. */	asm volatile ("sske %0,%1" 	              : : "d" (0), "a" (*ptep));	return 1;}static inline pte_t ptep_get_and_clear(pte_t *ptep){	pte_t pte = *ptep;	pte_clear(ptep);	return pte;}static inline void ptep_set_wrprotect(pte_t *ptep){	pte_t old_pte = *ptep;	set_pte(ptep, pte_wrprotect(old_pte));}static inline void ptep_mkdirty(pte_t *ptep){	pte_mkdirty(*ptep);}/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot){	pte_t __pte;	pte_val(__pte) = physpage + pgprot_val(pgprot);	return __pte;}#define mk_pte(pg, pgprot)                                                \({                                                                        \	struct page *__page = (pg);                                       \	unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);  \	pte_t __pte = mk_pte_phys(__physpage, (pgprot));                  \	                                                                  \	if (__page != ZERO_PAGE(__physpage)) {                            \		int __users = page_count(__page);                         \		__users -= !!__page->buffers + !!__page->mapping;         \	                                                                  \		if (__users == 1)                                         \			pte_val(__pte) |= _PAGE_MKCLEAR;                  \        }                                                                 \	                                                                  \	__pte;                                                            \})#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))#define pmd_page(pmd) \        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))/* to find an entry in a page-table-directory */#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))#define pgd_page(pmd) \        ((unsigned long) __va(pgd_val(pmd) & PAGE_MASK))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* Find an entry in the second-level page table.. */#define pmd_offset(dir,addr) \	((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))/* Find an entry in the third-level page table.. */#define pte_offset(dir,addr) \	((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))/* * A page-table entry has some bits we have to treat in a special way. * Bits 52 and bit 55 have to be zero, otherwise an specification * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. * Bit 53 and bit 54 are the page invalid bit and the page protection * bit. We set both to indicate a swapped page. * Bit 63 is used as the software page present bit. If a page is * swapped this obviously has to be zero. * This leaves the bits 0-51 and bits 56-62 to store type and offset. * We use the 7 bits from 56-62 for the type and the 52 bits from 0-51 * for the offset. * |                     offset                       |0110|type |0 * 0000000000111111111122222222223333333333444444444455555555556666 * 0123456789012345678901234567890123456789012345678901234567890123 */extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset){	pte_t pte;	pte_val(pte) = (type << 1) | (offset << 12) | _PAGE_INVALID | _PAGE_RO;	pte_val(pte) &= 0xfffffffffffff6fe;  /* better to be paranoid */	return pte;}#define SWP_TYPE(entry)		(((entry).val >> 1) & 0x3f)#define SWP_OFFSET(entry)	((entry).val >> 12)#define SWP_ENTRY(type,offset)	((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })#define pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x)	((pte_t) { (x).val })#endif /* !__ASSEMBLY__ *//* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page)          (0)#define kern_addr_valid(addr)   (1)/* * No page table caches to initialise */#define pgtable_cache_init()	do { } while (0)#endif /* _S390_PAGE_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -