⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgtable.h

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 H
📖 第 1 页 / 共 2 页
字号:
#define _PAGE_WRENABLE	_PAGE_RW | _PAGE_DIRTY#define _PAGE_KERNEL	_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED#define _PAGE_IO	_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED#define PAGE_NONE	__pgprot(_PAGE_BASE)#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_SHARED)#define PAGE_KERNEL_CI	__pgprot(_PAGE_IO)/* * The PowerPC can only do execute protection on a segment (256MB) basis, * not on a page basis.  So we consider execute permission the same as read. * Also, write permissions imply read permissions. * This is the closest we can get.. */#define __P000	PAGE_NONE#define __P001	PAGE_READONLY_X#define __P010	PAGE_COPY#define __P011	PAGE_COPY_X#define __P100	PAGE_READONLY#define __P101	PAGE_READONLY_X#define __P110	PAGE_COPY#define __P111	PAGE_COPY_X#define __S000	PAGE_NONE#define __S001	PAGE_READONLY_X#define __S010	PAGE_SHARED#define __S011	PAGE_SHARED_X#define __S100	PAGE_READONLY#define __S101	PAGE_READONLY_X#define __S110	PAGE_SHARED#define __S111	PAGE_SHARED_X#ifndef __ASSEMBLY__/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[1024];#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))#endif /* __ASSEMBLY__ */#define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)#define pte_clear(ptep)		do { set_pte((ptep), __pte(0)); } while (0)#define pmd_none(pmd)		(!pmd_val(pmd))#define	pmd_bad(pmd)		((pmd_val(pmd) & ~PAGE_MASK) != 0)#define	pmd_present(pmd)	((pmd_val(pmd) & PAGE_MASK) != 0)#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)/* * Permanent address of a page. */#define page_address(page)	((page)->virtual)#define pte_page(x)		(mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))#ifndef __ASSEMBLY__/* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded * into the pgd entry) */static inline int pgd_none(pgd_t pgd)		{ return 0; }static inline int pgd_bad(pgd_t pgd)		{ return 0; }static inline int pgd_present(pgd_t pgd)	{ return 1; }#define pgd_clear(xp)				do { } while (0)#define pgd_page(pgd) \	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static inline int pte_read(pte_t pte)		{ return pte_val(pte) & _PAGE_USER; }static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }static inline int pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }static inline pte_t pte_rdprotect(pte_t pte) {	pte_val(pte) &= ~_PAGE_USER; return pte; }static inline pte_t pte_wrprotect(pte_t pte) {	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }static inline pte_t pte_exprotect(pte_t pte) {	pte_val(pte) &= ~_PAGE_EXEC; return pte; }static inline pte_t pte_mkclean(pte_t pte) {	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }static inline pte_t pte_mkold(pte_t pte) {	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }static inline pte_t pte_mkread(pte_t pte) {	pte_val(pte) |= _PAGE_USER; return pte; }static inline pte_t pte_mkexec(pte_t pte) {	pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }static inline pte_t pte_mkwrite(pte_t pte) {	pte_val(pte) |= _PAGE_RW; return pte; }static inline pte_t pte_mkdirty(pte_t pte) {	pte_val(pte) |= _PAGE_DIRTY; return pte; }static inline pte_t pte_mkyoung(pte_t pte) {	pte_val(pte) |= _PAGE_ACCESSED; return pte; }/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot){	pte_t pte;	pte_val(pte) = physpage | pgprot_val(pgprot);	return pte;}#define mk_pte(page,pgprot) \({									\	pte_t pte;							\	pte_val(pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot); \	pte;							\})static inline pte_t pte_modify(pte_t pte, pgprot_t newprot){	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);	return pte;}/* * Atomic PTE updates. * * pte_update clears and sets bit atomically, and returns * the old pte value. */static inline unsigned long pte_update(pte_t *p, unsigned long clr,				       unsigned long set){	unsigned long old, tmp;		__asm__ __volatile__("\1:	lwarx	%0,0,%3\n\	andc	%1,%0,%4\n\	or	%1,%1,%5\n\	stwcx.	%1,0,%3\n\	bne-	1b"	: "=&r" (old), "=&r" (tmp), "=m" (*p)	: "r" (p), "r" (clr), "r" (set), "m" (*p)	: "cc" );	return old;}/* * Writing a new value into the PTE doesn't disturb the state of the * _PAGE_HASHPTE bit, on those machines which use an MMU hash table. */extern void set_pte(pte_t *ptep, pte_t pte);static inline int ptep_test_and_clear_young(pte_t *ptep){	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;}static inline int ptep_test_and_clear_dirty(pte_t *ptep){	return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;}static inline pte_t ptep_get_and_clear(pte_t *ptep){	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));}static inline void ptep_set_wrprotect(pte_t *ptep){	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);}static inline void ptep_mkdirty(pte_t *ptep){	pte_update(ptep, 0, _PAGE_DIRTY);}#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)#define pmd_page(pmd)	(pmd_val(pmd))/* to find an entry in a kernel page-table-directory */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* to find an entry in a page-table-directory */#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))/* Find an entry in the second-level page table.. */static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){	return (pmd_t *) dir;}/* Find an entry in the third-level page table.. */ static inline pte_t * pte_offset(pmd_t * dir, unsigned long address){	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));}extern pgd_t swapper_pg_dir[1024];extern void paging_init(void);/* * When flushing the tlb entry for a page, we also need to flush the hash * table entry.  flush_hash_page is assembler (for speed) in hashtable.S. */extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);/* Add an HPTE to the hash table */extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);/* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit * (if used).  -- paulus */#define SWP_TYPE(entry)			((entry).val & 0x3f)#define SWP_OFFSET(entry)		((entry).val >> 6)#define SWP_ENTRY(type, offset)		((swp_entry_t) { (type) | ((offset) << 6) })#define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 2 })#define swp_entry_to_pte(x)		((pte_t) { (x).val << 2 })/* CONFIG_APUS *//* For virtual address to physical address conversion */extern void cache_clear(__u32 addr, int length);extern void cache_push(__u32 addr, int length);extern int mm_end_of_chunk (unsigned long addr, int len);extern unsigned long iopa(unsigned long addr);extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));/* Values for nocacheflag and cmode *//* These are not used by the APUS kernel_map, but prevents   compilation errors. */#define	KERNELMAP_FULL_CACHING		0#define	KERNELMAP_NOCACHE_SER		1#define	KERNELMAP_NOCACHE_NONSER	2#define	KERNELMAP_NO_COPYBACK		3/* * Map some physical address range into the kernel address space. */extern unsigned long kernel_map(unsigned long paddr, unsigned long size,				int nocacheflag, unsigned long *memavailp );/* * Set cache mode of (kernel space) address range.  */extern void kernel_set_cachemode (unsigned long address, unsigned long size,                                 unsigned int cmode);/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */#define kern_addr_valid(addr)	(1)#define io_remap_page_range remap_page_range /* * No page table caches to initialise */#define pgtable_cache_init()	do { } while (0)#endif /* __ASSEMBLY__ */#endif /* _PPC_PGTABLE_H */#endif /* __KERNEL__ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -