pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 523 行 · 第 1/2 页
H
523 行
#ifndef _PARISC_PGTABLE_H#define _PARISC_PGTABLE_H#include <linux/config.h>#include <asm/fixmap.h>#ifndef __ASSEMBLY__/* * we simulate an x86-style page table for the linux mm code */#include <linux/spinlock.h>#include <asm/processor.h>#include <asm/cache.h>#include <asm/bitops.h>/* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= * PAGE_OFFSET. This operation can be relatively expensive (e.g., * require a hash-, or multi-level tree-lookup or something of that * sort) but it guarantees to return TRUE only if accessing the page * at that address does not cause an error. Note that there may be * addresses for which kern_addr_valid() returns FALSE even though an * access would not cause an error (e.g., this is typically true for * memory mapped I/O regions. * * XXX Need to implement this for parisc. */#define kern_addr_valid(addr) (1)/* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */#define set_pte(pteptr, pteval) \ do{ \ *(pteptr) = (pteval); \ } while(0)#endif /* !__ASSEMBLY__ */#define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))#define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) /* Note: If you change ISTACK_SIZE, you need to change the corresponding * values in vmlinux.lds and vmlinux64.lds (init_istack section). Also, * the "order" and size need to agree. */#define ISTACK_SIZE 32768 /* Interrupt Stack Size */#define ISTACK_ORDER 3/* This is the size of the initially mapped kernel memory (i.e. currently * 0 to 1<<23 == 8MB */#ifdef CONFIG_64BIT#define KERNEL_INITIAL_ORDER 24#else#define KERNEL_INITIAL_ORDER 23#endif#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)#ifdef CONFIG_64BIT#define PT_NLEVELS 3#define PGD_ORDER 1 /* Number of pages per pgd */#define PMD_ORDER 1 /* Number of pages per pmd */#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */#else#define PT_NLEVELS 2#define PGD_ORDER 1 /* Number of pages per pgd */#define PGD_ALLOC_ORDER PGD_ORDER#endif/* Definitions for 3rd level (we use PLD here for Page Lower directory * because PTE_SHIFT is used lower down to mean shift that has to be * done to get usable bits out of the PTE) */#define PLD_SHIFT PAGE_SHIFT#define PLD_SIZE PAGE_SIZE#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)#define PTRS_PER_PTE (1UL << BITS_PER_PTE)/* Definitions for 2nd level */#define pgtable_cache_init() do { } while (0)#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)#define PMD_SIZE (1UL << PMD_SHIFT)#define PMD_MASK (~(PMD_SIZE-1))#if PT_NLEVELS == 3#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)#else#define BITS_PER_PMD 0#endif#define PTRS_PER_PMD (1UL << BITS_PER_PMD)/* Definitions for 1st level */#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)#define PGDIR_SIZE (1UL << PGDIR_SHIFT)#define PGDIR_MASK (~(PGDIR_SIZE-1))#define PTRS_PER_PGD (1UL << BITS_PER_PGD)#define USER_PTRS_PER_PGD PTRS_PER_PGD#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)#define MAX_ADDRESS (1UL << MAX_ADDRBITS)#define SPACEID_SHIFT (MAX_ADDRBITS - 32)/* This calculates the number of initial pages we need for the initial * page tables */#define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))/* * pgd entries used up by user/kernel: */#define FIRST_USER_PGD_NR 0#ifndef __ASSEMBLY__extern void *vmalloc_start;#define PCXL_DMA_MAP_SIZE (8*1024*1024)#define VMALLOC_START ((unsigned long)vmalloc_start)/* this is a fixmap remnant, see fixmap.h */#define VMALLOC_END (KERNEL_MAP_END)#endif/* NB: The tlb miss handlers make certain assumptions about the order *//* of the following bits, so be careful (One example, bits 25-31 *//* are moved together in one instruction). */#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ /* for cache flushing only */#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page *//* N.B. The bits are defined in terms of a 32 bit word above, so the *//* following macro is ok for both 32 and 64 bit. */#define xlate_pabit(x) (31 - x)/* this defines the shift to the usable bits in the PTE it is set so * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set * to zero */#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)/* this is how many bits may be used by the file functions */#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT))#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds * are page-aligned, we don't care about the PAGE_OFFSET bits, except * for a few meta-information bits, so we shift the address to be * able to effectively address 40-bits of physical address space. */#define _PxD_PRESENT_BIT 31#define _PxD_ATTACHED_BIT 30#define _PxD_VALID_BIT 29#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))#define PxD_FLAG_MASK (0xf)#define PxD_FLAG_SHIFT (4)#define PxD_VALUE_SHIFT (8)#ifndef __ASSEMBLY__#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)/* Others seem to make this executable, I don't know if that's correct or not. The stack is mapped this way though so this is necessary in the short term - dhd@linuxcare.com, 2000-08-08 */#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)#define PAGE_COPY PAGE_EXECREAD#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)/* * We could have an execute only page using "gateway - promote to priv * level 3", but that is kind of silly. So, the way things are defined * now, we must always have read permission for pages with execute * permission. For the fun of it we'll go ahead and support write only * pages. */ /*xwr*/#define __P000 PAGE_NONE#define __P001 PAGE_READONLY#define __P010 __P000 /* copy on write */#define __P011 __P001 /* copy on write */#define __P100 PAGE_EXECREAD#define __P101 PAGE_EXECREAD#define __P110 __P100 /* copy on write */#define __P111 __P101 /* copy on write */#define __S000 PAGE_NONE#define __S001 PAGE_READONLY#define __S010 PAGE_WRITEONLY#define __S011 PAGE_SHARED#define __S100 PAGE_EXECREAD#define __S101 PAGE_EXECREAD#define __S110 PAGE_RWX#define __S111 PAGE_RWXextern pgd_t swapper_pg_dir[]; /* declared in init_task.c *//* initial page tables for 0-8MB for kernel */extern pte_t pg0[];/* zero page used for uninitialized stuff */extern unsigned long *empty_zero_page;/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH))
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?