📄 pgtable.h
字号:
/* * BK Id: SCCS/s.pgtable.h 1.15 09/22/01 11:26:52 trini */#ifdef __KERNEL__#ifndef _PPC_PGTABLE_H#define _PPC_PGTABLE_H#include <linux/config.h>#ifndef __ASSEMBLY__#include <linux/sched.h>#include <linux/threads.h>#include <asm/processor.h> /* For TASK_SIZE */#include <asm/mmu.h>#include <asm/page.h>#if defined(CONFIG_4xx)extern void local_flush_tlb_all(void);extern void local_flush_tlb_mm(struct mm_struct *mm);extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);#define update_mmu_cache(vma, addr, pte) do { } while (0)#elif defined(CONFIG_8xx)#define __tlbia() asm volatile ("tlbia" : : )static inline void local_flush_tlb_all(void) { __tlbia(); }static inline void local_flush_tlb_mm(struct mm_struct *mm) { __tlbia(); }static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { __tlbia(); }static inline void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { __tlbia(); }#define update_mmu_cache(vma, addr, pte) do { } while (0)#else /* 6xx, 7xx, 7xxx cpus */struct mm_struct;struct vm_area_struct;extern void local_flush_tlb_all(void);extern void local_flush_tlb_mm(struct mm_struct *mm);extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);/* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. * We use it to put a corresponding HPTE into the hash table * ahead of time, instead of waiting for the inevitable extra * hash-table miss exception. */extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);#endif#define flush_tlb_all local_flush_tlb_all#define flush_tlb_mm local_flush_tlb_mm#define flush_tlb_page local_flush_tlb_page#define flush_tlb_range local_flush_tlb_range/* * This is called in munmap when we have freed up some page-table * pages. We don't need to do anything here, there's nothing special * about our page-table pages. -- paulus */static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end){}/* * No cache flushing is required when address mappings are * changed, because the caches on PowerPCs are physically * addressed. -- paulus * Also, when SMP we use the coherency (M) bit of the * BATs and PTEs. -- Cort */#define flush_cache_all() do { } while (0)#define flush_cache_mm(mm) do { } while (0)#define flush_cache_range(mm, a, b) do { } while (0)#define flush_cache_page(vma, p) do { } while (0)#define flush_icache_page(vma, page) do { } while (0)extern void flush_icache_range(unsigned long, unsigned long);extern void __flush_page_to_ram(unsigned long page_va);extern void flush_page_to_ram(struct page *page);#define flush_dcache_page(page) do { } while (0)extern unsigned long va_to_phys(unsigned long address);extern pte_t *va_to_pte(unsigned long address);extern unsigned long ioremap_bot, ioremap_base;#endif /* __ASSEMBLY__ *//* * The PowerPC MMU uses a hash table containing PTEs, together with * a set of 16 segment registers (on 32-bit implementations), to define * the virtual to physical address mapping. * * We use the hash table as an extended TLB, i.e. a cache of currently * active mappings. We maintain a two-level page table tree, much * like that used by the i386, for the sake of the Linux memory * management code. Low-level assembler code in hashtable.S * (procedure hash_page) is responsible for extracting ptes from the * tree and putting them into the hash table when necessary, and * updating the accessed and modified bits in the page table tree. *//* * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. * We also use the two level tables, but we can put the real bits in them * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit * based upon user/super access. The TLB does not have accessed nor write * protect. We assume that if the TLB get loaded with an entry it is * accessed, and overload the changed bit for write protect. We use * two bits in the software pte that are supposed to be set to zero in * the TLB entry (24 and 25) for these indicators. Although the level 1 * descriptor contains the guarded and writethrough/copyback bits, we can * set these at the page level since they get copied from the Mx_TWC * register when the TLB entry is loaded. We will use bit 27 for guard, since * that is where it exists in the MD_TWC, and bit 26 for writethrough. * These will get masked from the level 2 descriptor at TLB load time, and * copied to the MD_TWC before it gets loaded. *//* * At present, all PowerPC 400-class processors share a similar TLB * architecture. The instruction and data sides share a unified, * 64-entry, fully-associative TLB which is maintained totally under * software control. In addition, the instruction side has a * hardware-managed, 4-entry, fully-associative TLB which serves as a * first level to the shared TLB. These two TLBs are known as the UTLB * and ITLB, respectively (see "mmu.h" for definitions). *//* PMD_SHIFT determines the size of the area mapped by the second-level page tables */#define PMD_SHIFT 22#define PMD_SIZE (1UL << PMD_SHIFT)#define PMD_MASK (~(PMD_SIZE-1))/* PGDIR_SHIFT determines what a third-level page table entry can map */#define PGDIR_SHIFT 22#define PGDIR_SIZE (1UL << PGDIR_SHIFT)#define PGDIR_MASK (~(PGDIR_SIZE-1))/* * entries per page directory level: our page-table tree is two-level, so * we don't really have any PMD directory. */#define PTRS_PER_PTE 1024#define PTRS_PER_PMD 1#define PTRS_PER_PGD 1024#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)#define FIRST_USER_PGD_NR 0#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)#define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))#define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))#define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))/* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 64MB value just means that there will be a 64MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) * * We no longer map larger than phys RAM with the BATs so we don't have * to worry about the VMALLOC_OFFSET causing problems. We do have to worry * about clashes between our early calls to ioremap() that start growing down * from ioremap_base being run into the VM area allocations (growing upwards * from VMALLOC_START). For this reason we have ioremap_bot to check when * we actually run into our mappings setup in the early boot with the VM * system. This really does become a problem for machines with good amounts * of RAM. -- Cort */#define VMALLOC_OFFSET (0x1000000) /* 16M */#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))#define VMALLOC_VMADDR(x) ((unsigned long)(x))#define VMALLOC_END ioremap_bot/* * Bits in a linux-style PTE. These match the bits in the * (hardware-defined) PowerPC PTE as closely as possible. */#if defined(CONFIG_4xx)/* Definitions for 4xx embedded chips. */#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */#define _PAGE_COHERENT 0x002 /* M: enforece memory coherence */#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */#define _PAGE_USER 0x010 /* matches one of the zone permission bits */#define _PAGE_EXEC 0x020 /* software: i-cache coherency required */#define _PAGE_PRESENT 0x040 /* software: PTE contains a translation */#define _PAGE_DIRTY 0x100 /* C: page changed */#define _PAGE_RW 0x200 /* Writes permitted */#define _PAGE_ACCESSED 0x400 /* R: page referenced */#elif defined(CONFIG_8xx)/* Definitions for 8xx embedded chips. */#define _PAGE_PRESENT 0x0001 /* Page is valid */#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */#define _PAGE_SHARED 0x0004 /* No ASID (context) compare *//* These five software bits must be masked out when the entry is loaded * into the TLB. */#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */#define _PAGE_GUARDED 0x0010 /* software: guarded access */#define _PAGE_WRITETHRU 0x0020 /* software: use writethrough cache */#define _PAGE_RW 0x0040 /* software: user write access allowed */#define _PAGE_ACCESSED 0x0080 /* software: page referenced */#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */#define _PAGE_DIRTY 0x0200 /* software: page changed */#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */#else /* CONFIG_6xx *//* Definitions for 60x, 740/750, etc. */#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */#define _PAGE_USER 0x004 /* usermode access allowed */#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */#define _PAGE_DIRTY 0x080 /* C: page changed */#define _PAGE_ACCESSED 0x100 /* R: page referenced */#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */#define _PAGE_RW 0x400 /* software: user write access allowed */#endif/* The non-standard PowerPC MMUs, which includes the 4xx and 8xx (and * mabe 603e) have TLB miss handlers that unconditionally set the * _PAGE_ACCESSED flag as a performance optimization. This causes * problems for the page_none() macro, just like the HASHPTE flag does * for the standard PowerPC MMUs. Depending upon the MMU configuration, * either HASHPTE or ACCESSED will have to be masked to give us a * proper pte_none() condition. */#ifndef _PAGE_HASHPTE#define _PAGE_HASHPTE 0#define _PTE_NONE_MASK _PAGE_ACCESSED#else#define _PTE_NONE_MASK _PAGE_HASHPTE#endif#ifndef _PAGE_SHARED#define _PAGE_SHARED 0#endif#ifndef _PAGE_HWWRITE#define _PAGE_HWWRITE 0#endif/* We can't use _PAGE_HWWRITE on any SMP due to the lack of ability * to atomically manage _PAGE_HWWRITE and it's coordination flags, * _PAGE_DIRTY or _PAGE_RW. The SMP systems must manage HWWRITE * or its logical equivalent in the MMU management software. */#if CONFIG_SMP && _PAGE_HWWRITE#error "You can't configure SMP and HWWRITE"#endif#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)/* * Note: the _PAGE_COHERENT bit automatically gets set in the hardware * PTE if CONFIG_SMP is defined (hash_page does this); there is no need * to have it in the Linux PTE, and in fact the bit could be reused for * another purpose. -- paulus. */#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -