📄 pgtable.h
字号:
#ifndef _I386_PGTABLE_H
#define _I386_PGTABLE_H
#include <linux/config.h>
/*
* Define USE_PENTIUM_MM if you want the 4MB page table optimizations.
* This works only on a intel Pentium.
*/
#define USE_PENTIUM_MM 1
/*
* The Linux memory management assumes a three-level page table setup. On
* the i386, we use that, but "fold" the mid level into the top-level page
* table, so that we physically have the same two-level page table as the
* i386 mmu expects.
*
* This file contains the functions and defines necessary to modify and use
* the i386 page table tree.
*/
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_pages_to_ram(page,n) do { } while (0)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
/*
* NOTE! The intel "invlpg" semantics are extremely strange. The
* chip will add the segment base to the memory address, even though
* no segment checking is done. We correct for this by using an
* offset of 0x40000000 that will wrap around the kernel segment base
* of 0xC0000000 to get the correct address (it will always be outside
* the kernel segment, but we're only interested in the final linear
* address.
*/
#define __invlpg_mem(addr) \
(((char *)(addr))[0x40000000])
#define __invlpg(addr) \
__asm__ __volatile__("invlpg %0": :"m" (__invlpg_mem(addr)))
/*
* The i386 doesn't have a page-granular invalidate. Invalidate
* everything for it.
*/
#ifdef CONFIG_M386
#define __flush_tlb_one(addr) __flush_tlb()
#else
#define __flush_tlb_one(addr) __invlpg(addr)
#endif
#ifndef __SMP__
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb()
#define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm == current->mm)
__flush_tlb();
}
#else
/*
* We aren't very clever about this yet - SMP could certainly
* avoid some global flushes..
*/
#include <asm/smp.h>
#define local_flush_tlb() \
__flush_tlb()
#define CLEVER_SMP_INVALIDATE
#ifdef CLEVER_SMP_INVALIDATE
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts. (Its not allowed anyway).
*/
static inline void flush_tlb_current_task(void)
{
if (current->mm->count == 1) /* just one copy of this mm */
local_flush_tlb(); /* and that's us, so.. */
else
smp_flush_tlb();
}
#define flush_tlb() flush_tlb_current_task()
#define flush_tlb_all() smp_flush_tlb()
static inline void flush_tlb_mm(struct mm_struct * mm)
{
if (mm == current->mm && mm->count == 1)
local_flush_tlb();
else
smp_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct * vma,
unsigned long va)
{
if (vma->vm_mm == current->mm && current->mm->count == 1)
__flush_tlb_one(va);
else
smp_flush_tlb();
}
static inline void flush_tlb_range(struct mm_struct * mm,
unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
#else
#define flush_tlb() \
smp_flush_tlb()
#define flush_tlb_all() flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
flush_tlb();
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
flush_tlb();
}
#endif
#endif
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT 22
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT 22
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* entries per page directory level: the i386 is two-level, so
* we don't really have any PMD directory physically.
*/
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) (TASK_SIZE + (unsigned long)(x))
/*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H"
* of the Pentium details, but assuming intel did the straightforward
* thing, this bit set in the page directory entry just means that
* the page directory entry points directly to a 4MB-aligned block of
* memory.
*/
#define _PAGE_PRESENT 0x001
#define _PAGE_RW 0x002
#define _PAGE_USER 0x004
#define _PAGE_PCD 0x010
#define _PAGE_ACCESSED 0x020
#define _PAGE_DIRTY 0x040
#define _PAGE_4M 0x080 /* 4 MB page, Pentium+.. */
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
/*
* The i386 can't do page protection for execute, and considers that the same are read.
* Also, write permissions imply read permissions. This is the closest we can get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -