⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pgtable.h

📁 基于组件方式开发操作系统的OSKIT源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
#ifndef _I386_PGTABLE_H#define _I386_PGTABLE_H#include <linux/config.h>/* * The Linux memory management assumes a three-level page table setup. On * the i386, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * i386 mmu expects. * * This file contains the functions and defines necessary to modify and use * the i386 page table tree. */#ifndef __ASSEMBLY__#include <asm/processor.h>#include <asm/fixmap.h>#include <linux/tasks.h>/* Caches aren't brain-dead on the intel. */#define flush_cache_all()			do { } while (0)#define flush_cache_mm(mm)			do { } while (0)#define flush_cache_range(mm, start, end)	do { } while (0)#define flush_cache_page(vma, vmaddr)		do { } while (0)#define flush_page_to_ram(page)			do { } while (0)#define flush_icache_range(start, end)		do { } while (0)#ifndef OSKIT/* We do not have to worry about context switch TLB flushes *//* * TLB flushing: * *  - flush_tlb() flushes the current mm struct TLBs *  - flush_tlb_all() flushes all processes TLBs *  - flush_tlb_mm(mm) flushes the specified mm context TLB's *  - flush_tlb_page(vma, vmaddr) flushes one page *  - flush_tlb_range(mm, start, end) flushes a range of pages * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */#define __flush_tlb() \do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)#ifndef CONFIG_X86_INVLPG#define __flush_tlb_one(addr) flush_tlb()#else#define __flush_tlb_one(addr) \__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))#endif #ifndef __SMP__#define flush_tlb() __flush_tlb()#define flush_tlb_all() __flush_tlb()#define local_flush_tlb() __flush_tlb()static inline void flush_tlb_mm(struct mm_struct *mm){	if (mm == current->mm)		__flush_tlb();}static inline void flush_tlb_page(struct vm_area_struct *vma,	unsigned long addr){	if (vma->vm_mm == current->mm)		__flush_tlb_one(addr);}static inline void flush_tlb_range(struct mm_struct *mm,	unsigned long start, unsigned long end){	if (mm == current->mm)		__flush_tlb();}#else/* * We aren't very clever about this yet -  SMP could certainly * avoid some global flushes.. */#include <asm/smp.h>#define local_flush_tlb() \	__flush_tlb()#define CLEVER_SMP_INVALIDATE#ifdef CLEVER_SMP_INVALIDATE/* *	Smarter SMP flushing macros.  *		c/o Linus Torvalds. * *	These mean you can really definitely utterly forget about *	writing to user space from interrupts. (Its not allowed anyway). */ static inline void flush_tlb_current_task(void){	/* just one copy of this mm? */	if (atomic_read(&current->mm->count) == 1)		local_flush_tlb();	/* and that's us, so.. */	else		smp_flush_tlb();}#define flush_tlb() flush_tlb_current_task()#define flush_tlb_all() smp_flush_tlb()static inline void flush_tlb_mm(struct mm_struct * mm){	if (mm == current->mm && atomic_read(&mm->count) == 1)		local_flush_tlb();	else		smp_flush_tlb();}static inline void flush_tlb_page(struct vm_area_struct * vma,	unsigned long va){	if (vma->vm_mm == current->mm && atomic_read(&current->mm->count) == 1)		__flush_tlb_one(va);	else		smp_flush_tlb();}static inline void flush_tlb_range(struct mm_struct * mm,	unsigned long start, unsigned long end){	flush_tlb_mm(mm);}#else#define flush_tlb() \	smp_flush_tlb()#define flush_tlb_all() flush_tlb()static inline void flush_tlb_mm(struct mm_struct *mm){	flush_tlb();}static inline void flush_tlb_page(struct vm_area_struct *vma,	unsigned long addr){	flush_tlb();}static inline void flush_tlb_range(struct mm_struct *mm,	unsigned long start, unsigned long end){	flush_tlb();}#endif#endif#endif /* !OSKIT */#endif /* !__ASSEMBLY__ *//* Certain architectures need to do special things when PTEs * within a page table are directly modified.  Thus, the following * hook is made available. */#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))/* PMD_SHIFT determines the size of the area a second-level page table can map */#define PMD_SHIFT	22#define PMD_SIZE	(1UL << PMD_SHIFT)#define PMD_MASK	(~(PMD_SIZE-1))/* PGDIR_SHIFT determines what a third-level page table entry can map */#define PGDIR_SHIFT	22#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)#define PGDIR_MASK	(~(PGDIR_SIZE-1))/* * entries per page directory level: the i386 is two-level, so * we don't really have any PMD directory physically. */#define PTRS_PER_PTE	1024#define PTRS_PER_PMD	1#define PTRS_PER_PGD	1024#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)/* * pgd entries used up by user/kernel: */#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)#define __USER_PGD_PTRS ((__PAGE_OFFSET >> PGDIR_SHIFT) & 0x3ff)#define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS)#ifndef __ASSEMBLY__/* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts.  That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */#define VMALLOC_OFFSET	(8*1024*1024)#define VMALLOC_START	(((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))#define VMALLOC_VMADDR(x) ((unsigned long)(x))#define VMALLOC_END	(FIXADDR_START)/* * The 4MB page is guessing..  Detailed in the infamous "Chapter H" * of the Pentium details, but assuming intel did the straightforward * thing, this bit set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory.  */#define _PAGE_PRESENT	0x001#define _PAGE_RW	0x002#define _PAGE_USER	0x004#define _PAGE_PWT	0x008#define _PAGE_PCD	0x010#define _PAGE_ACCESSED	0x020#define _PAGE_DIRTY	0x040#define _PAGE_4M	0x080	/* 4 MB page, Pentium+, if present.. */#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */#define _PAGE_PROTNONE	0x080	/* If not present */#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)/* * The i386 can't do page protection for execute, and considers that the same are read. * Also, write permissions imply read permissions. This is the closest we can get.. */#define __P000	PAGE_NONE#define __P001	PAGE_READONLY#define __P010	PAGE_COPY#define __P011	PAGE_COPY#define __P100	PAGE_READONLY#define __P101	PAGE_READONLY#define __P110	PAGE_COPY#define __P111	PAGE_COPY#define __S000	PAGE_NONE#define __S001	PAGE_READONLY#define __S010	PAGE_SHARED#define __S011	PAGE_SHARED#define __S100	PAGE_READONLY#define __S101	PAGE_READONLY#define __S110	PAGE_SHARED#define __S111	PAGE_SHARED#ifndef OSKIT/* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'verify_area(VERIFY_WRITE,..)' */#undef TEST_VERIFY_AREA/* page table for 0-4MB for everybody */extern unsigned long pg0[1024];/* zero page used for uninitialized stuff */extern unsigned long empty_zero_page[1024];/* * BAD_PAGETABLE is used when we need a bogus page-table, while * BAD_PAGE is used for a bogus page. * * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern pte_t __bad_page(void);extern pte_t * __bad_pagetable(void);#define BAD_PAGETABLE __bad_pagetable()#define BAD_PAGE __bad_page()#define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)/* number of bits that fit into a memory pointer */#define BITS_PER_PTR			(8*sizeof(unsigned long))/* to align the pointer to a pointer address */#define PTR_MASK			(~(sizeof(void*)-1))/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 *//* 64-bit machines, beware!  SRB. */#define SIZEOF_PTR_LOG2			2

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -