pgtable.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 499 行 · 第 1/2 页
H
499 行
#ifndef __ASM_SH64_PGTABLE_H#define __ASM_SH64_PGTABLE_H/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * include/asm-sh64/pgtable.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * * This file contains the functions and defines necessary to modify and use * the SuperH page table tree. */#ifndef __ASSEMBLY__#include <asm/processor.h>#include <asm/page.h>#include <linux/threads.h>#include <linux/config.h>extern void paging_init(void);/* We provide our own get_unmapped_area to avoid cache synonym issue */#define HAVE_ARCH_UNMAPPED_AREA/* * Basically we have the same two-level (which is the logical three level * Linux page table layout folded) page tables as the i386. *//* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned char empty_zero_page[PAGE_SIZE];#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))#endif /* !__ASSEMBLY__ *//* * NEFF and NPHYS related defines. * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103 * implement 32 bits effective and 32 bits physical. But future implementations may * extend beyond this. */#define NEFF 32#define NEFF_SIGN (1LL << (NEFF - 1))#define NEFF_MASK (-1LL << NEFF)#define NPHYS 32#define NPHYS_SIGN (1LL << (NPHYS - 1))#define NPHYS_MASK (-1LL << NPHYS)/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond that 3-level would be appropriate. */#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */#define PTE_SHIFT PAGE_SHIFT#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)/* top level: PMD. */#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)#define PGD_BITS (NEFF - PGDIR_SHIFT)#define PTRS_PER_PGD (1<<PGD_BITS)/* middle level: PMD. This doesn't do anything for the 2-level case. */#define PTRS_PER_PMD (1)#define PGDIR_SIZE (1UL << PGDIR_SHIFT)#define PGDIR_MASK (~(PGDIR_SIZE-1))#define PMD_SHIFT PGDIR_SHIFT#define PMD_SIZE PGDIR_SIZE#define PMD_MASK PGDIR_MASK#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)/* * three-level asymmetric paging structure: PGD is top level. * The asymmetry comes from 32-bit pointers and 64-bit PTEs. *//* bottom level: PTE. It's 9 bits = 512 pointers */#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */#define PTE_SHIFT PAGE_SHIFT#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)/* middle level: PMD. It's 10 bits = 1024 pointers */#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *))#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */#define PMD_SHIFT (PTE_SHIFT + PTE_BITS)#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE)/* top level: PMD. It's 1 bit = 2 pointers */#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS)#define PGD_BITS (NEFF - PGDIR_SHIFT)#define PTRS_PER_PGD (1<<PGD_BITS)#define PMD_SIZE (1UL << PMD_SHIFT)#define PMD_MASK (~(PMD_SIZE-1))#define PGDIR_SIZE (1UL << PGDIR_SHIFT)#define PGDIR_MASK (~(PGDIR_SIZE-1))#else#error "No defined number of page table levels"#endif/* * Error outputs. */#define pte_ERROR(e) \ printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))#define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))/* * Table setting routines. Used within arch/mm only. */#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)static __inline__ void set_pte(pte_t *pteptr, pte_t pteval){ unsigned long long x = ((unsigned long long) pteval.pte); unsigned long long *xp = (unsigned long long *) pteptr; /* * Sign-extend based on NPHYS. */ *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;}static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep){ pmd_val(*pmdp) = (unsigned long) ptep;}/* * PGD defines. Top level. *//* To find an entry in a generic PGD. */#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))#define __pgd_offset(address) pgd_index(address)#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))/* To find an entry in a kernel PGD. */#define pgd_offset_k(address) pgd_offset(&init_mm, address)/* * PGD level access routines. * * Note1: * There's no need to use physical addresses since the tree walk is all * in performed in software, until the PTE translation. * * Note 2: * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a * bad pgd that must be notified via printk(). * */#define _PGD_EMPTY 0x0#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)static inline int pgd_none(pgd_t pgd) { return 0; }static inline int pgd_bad(pgd_t pgd) { return 0; }#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0)#define pgd_clear(xx) do { } while(0)#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)#define pgd_present(pgd_entry) (1)#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY)/* TODO: Think later about what a useful definition of 'bad' would be now. */#define pgd_bad(pgd_entry) (0)#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY)))#endif#define pgd_page(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK))/* * PMD defines. Middle level. *//* PGD to PMD dereferencing */#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address){ return (pmd_t *) dir;}#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)#define __pmd_offset(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))#define pmd_offset(dir, addr) \ ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr)))#endif/* * PMD level access routines. Same notes as above. */#define _PMD_EMPTY 0x0/* Either the PMD is empty or present, it's not paged out */#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)#define pmd_page_kernel(pmd_entry) \ ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))#define pmd_page(pmd) \ (virt_to_page(pmd_val(pmd)))/* PMD to PTE dereferencing */#define pte_index(address) \ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))#define pte_offset_kernel(dir, addr) \ ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)#define pte_unmap(pte) do { } while (0)#define pte_unmap_nested(pte) do { } while (0)/* Round it up ! */#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)#define FIRST_USER_PGD_NR 0#ifndef __ASSEMBLY__#define VMALLOC_END 0xff000000#define VMALLOC_START 0xf0000000#define VMALLOC_VMADDR(x) ((unsigned long)(x))#define IOBASE_VADDR 0xff000000#define IOBASE_END 0xffffffff/* * PTEL coherent flags. * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?