⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgtable.h

📁 讲述linux的初始化过程
💻 H
📖 第 1 页 / 共 2 页
字号:
/* * The PowerPC can only do execute protection on a segment (256MB) basis, * not on a page basis.  So we consider execute permission the same as read. * Also, write permissions imply read permissions. * This is the closest we can get.. */#define __P000	PAGE_NONE#define __P001	PAGE_READONLY#define __P010	PAGE_COPY#define __P011	PAGE_COPY#define __P100	PAGE_READONLY#define __P101	PAGE_READONLY#define __P110	PAGE_COPY#define __P111	PAGE_COPY#define __S000	PAGE_NONE#define __S001	PAGE_READONLY#define __S010	PAGE_SHARED#define __S011	PAGE_SHARED#define __S100	PAGE_READONLY#define __S101	PAGE_READONLY#define __S110	PAGE_SHARED#define __S111	PAGE_SHARED#ifndef __ASSEMBLY__/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[1024];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))/* * BAD_PAGETABLE is used when we need a bogus page-table, while * BAD_PAGE is used for a bogus page. * * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern pte_t __bad_page(void);extern pte_t * __bad_pagetable(void);#define BAD_PAGETABLE	__bad_pagetable()#define BAD_PAGE	__bad_page()#endif /* __ASSEMBLY__ *//* number of bits that fit into a memory pointer */#define BITS_PER_PTR	(8*sizeof(unsigned long))/* to align the pointer to a pointer address */#define PTR_MASK	(~(sizeof(void*)-1))/* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 *//* 64-bit machines, beware!  SRB. */#define SIZEOF_PTR_LOG2	2#define pte_none(pte)		(!pte_val(pte))#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)#define pte_clear(ptep)		do { pte_val(*(ptep)) = 0; } while (0)#define pmd_none(pmd)		(!pmd_val(pmd))#define	pmd_bad(pmd)		((pmd_val(pmd) & ~PAGE_MASK) != 0)#define	pmd_present(pmd)	((pmd_val(pmd) & PAGE_MASK) != 0)#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)/* * Permanent address of a page. */#define page_address(page)  ((page)->virtual)#define pages_to_mb(x)		((x) >> (20-PAGE_SHIFT))#define pte_page(x)		(mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))#ifndef __ASSEMBLY__/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */extern inline int pgd_none(pgd_t pgd)		{ return 0; }extern inline int pgd_bad(pgd_t pgd)		{ return 0; }extern inline int pgd_present(pgd_t pgd)	{ return 1; }#define pgd_clear(xp)				do { } while (0)#define pgd_page(pgd) \	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */extern inline int pte_read(pte_t pte)		{ return pte_val(pte) & _PAGE_USER; }extern inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }extern inline int pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_USER; }extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }extern inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }extern inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }extern inline pte_t pte_rdprotect(pte_t pte) {	pte_val(pte) &= ~_PAGE_USER; return pte; }extern inline pte_t pte_exprotect(pte_t pte) {	pte_val(pte) &= ~_PAGE_USER; return pte; }extern inline pte_t pte_wrprotect(pte_t pte) {	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }extern inline pte_t pte_mkclean(pte_t pte) {	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }extern inline pte_t pte_mkold(pte_t pte) {	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }extern inline pte_t pte_mkread(pte_t pte) {	pte_val(pte) |= _PAGE_USER; return pte; }extern inline pte_t pte_mkexec(pte_t pte) {	pte_val(pte) |= _PAGE_USER; return pte; }extern inline pte_t pte_mkwrite(pte_t pte){	pte_val(pte) |= _PAGE_RW;	if (pte_val(pte) & _PAGE_DIRTY)		pte_val(pte) |= _PAGE_HWWRITE;	return pte;}extern inline pte_t pte_mkdirty(pte_t pte){	pte_val(pte) |= _PAGE_DIRTY;	if (pte_val(pte) & _PAGE_RW)		pte_val(pte) |= _PAGE_HWWRITE;	return pte;}extern inline pte_t pte_mkyoung(pte_t pte) {	pte_val(pte) |= _PAGE_ACCESSED; return pte; }/* Certain architectures need to do special things when pte's * within a page table are directly modified.  Thus, the following * hook is made available. */#define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot){	pte_t pte;	pte_val(pte) = physpage | pgprot_val(pgprot);	return pte;}#define mk_pte(page,pgprot) \({									\	pte_t pte;							\	pte_val(pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot); \	pte;							\})extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot){	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);	return pte;}#define pmd_page(pmd)	(pmd_val(pmd))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* to find an entry in a page-table-directory */#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))/* Find an entry in the second-level page table.. */extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){	return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */ extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address){	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));}extern pgd_t swapper_pg_dir[1024];extern void paging_init(void);/* * Page tables may have changed.  We don't need to do anything here * as entries are faulted into the hash table by the low-level * data/instruction access exception handlers. */#define update_mmu_cache(vma, addr, pte)	do { } while (0)/* * When flushing the tlb entry for a page, we also need to flush the * hash table entry.  flush_hash_page is assembler (for speed) in head.S. */extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);extern void flush_hash_page(unsigned context, unsigned long va);/* Encode and de-code a swap entry */#define SWP_TYPE(entry)			(((entry).val >> 1) & 0x3f)#define SWP_OFFSET(entry)		((entry).val >> 8)#define SWP_ENTRY(type, offset)		((swp_entry_t) { ((type) << 1) | ((offset) << 8) })#define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })#define swp_entry_to_pte(x)		((pte_t) { (x).val })/* CONFIG_APUS *//* For virtual address to physical address conversion */extern void cache_clear(__u32 addr, int length);extern void cache_push(__u32 addr, int length);extern int mm_end_of_chunk (unsigned long addr, int len);extern unsigned long iopa(unsigned long addr);extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));/* Values for nocacheflag and cmode *//* These are not used by the APUS kernel_map, but prevents   compilation errors. */#define	KERNELMAP_FULL_CACHING		0#define	KERNELMAP_NOCACHE_SER		1#define	KERNELMAP_NOCACHE_NONSER	2#define	KERNELMAP_NO_COPYBACK		3/* * Map some physical address range into the kernel address space. */extern unsigned long kernel_map(unsigned long paddr, unsigned long size,				int nocacheflag, unsigned long *memavailp );/* * Set cache mode of (kernel space) address range.  */extern void kernel_set_cachemode (unsigned long address, unsigned long size,                                 unsigned int cmode);/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define PageSkip(page)		(0)#define kern_addr_valid(addr)	(1)#define io_remap_page_range remap_page_range #include <asm-generic/pgtable.h>#endif __ASSEMBLY__#endif /* _PPC_PGTABLE_H */#endif /* __KERNEL__ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -