📄 init.c
字号:
/* * linux/arch/i386/mm/init.c * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#ifdef CONFIG_BLK_DEV_INITRD#include <linux/blk.h>#endif#include <linux/highmem.h>#include <linux/pagemap.h>#include <linux/bootmem.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>unsigned long highstart_pfn, highend_pfn;static unsigned long totalram_pages;static unsigned long totalhigh_pages;/* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving an inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized * to point to BAD_PAGE entries. * * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. *//* * These are allocated in head.S so that we get proper page alignment. * If you change the size of these then change head.S as well. */extern char empty_bad_page[PAGE_SIZE];#if CONFIG_X86_PAEextern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];#endifextern pte_t empty_bad_pte_table[PTRS_PER_PTE];/* * We init them before every return and make them writable-shared. * This guarantees we get out of the kernel in some more or less sane * way. */#if CONFIG_X86_PAEstatic pmd_t * get_bad_pmd_table(void){ pmd_t v; int i; set_pmd(&v, __pmd(_PAGE_TABLE + __pa(empty_bad_pte_table))); for (i = 0; i < PAGE_SIZE/sizeof(pmd_t); i++) empty_bad_pmd_table[i] = v; return empty_bad_pmd_table;}#endifstatic pte_t * get_bad_pte_table(void){ pte_t v; int i; v = pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED)); for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) empty_bad_pte_table[i] = v; return empty_bad_pte_table;}void __handle_bad_pmd(pmd_t *pmd){ pmd_ERROR(*pmd); set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));}void __handle_bad_pmd_kernel(pmd_t *pmd){ pmd_ERROR(*pmd); set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));}pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset){ pte_t *pte; pte = (pte_t *) __get_free_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (pte) { clear_page(pte); set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))); return pte + offset; } set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table()))); return NULL; } free_page((unsigned long)pte); if (pmd_bad(*pmd)) { __handle_bad_pmd_kernel(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + offset;}pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset){ unsigned long pte; pte = (unsigned long) __get_free_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (pte) { clear_page((void *)pte); set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); return (pte_t *)pte + offset; } set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table()))); return NULL; } free_page(pte); if (pmd_bad(*pmd)) { __handle_bad_pmd(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + offset;}int do_check_pgt_cache(int low, int high){ int freed = 0; if(pgtable_cache_size > high) { do { if(pgd_quicklist) free_pgd_slow(get_pgd_fast()), freed++; if(pmd_quicklist) free_pmd_slow(get_pmd_fast()), freed++; if(pte_quicklist) free_pte_slow(get_pte_fast()), freed++; } while(pgtable_cache_size > low); } return freed;}/* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */#if CONFIG_HIGHMEMpte_t *kmap_pte;pgprot_t kmap_prot;#define kmap_get_fixmap_pte(vaddr) \ pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))void __init kmap_init(void){ unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL;}#endif /* CONFIG_HIGHMEM */void show_mem(void){ int i, total = 0, reserved = 0; int shared = 0, cached = 0; int highmem = 0; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); i = max_mapnr; while (i-- > 0) { total++; if (PageHighMem(mem_map+i)) highmem++; if (PageReserved(mem_map+i)) reserved++; else if (PageSwapCache(mem_map+i)) cached++; else if (page_count(mem_map+i)) shared += page_count(mem_map+i) - 1; } printk("%d pages of RAM\n", total); printk("%d pages of HIGHMEM\n",highmem); printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); printk("%d pages swap cached\n",cached); printk("%ld pages in page table cache\n",pgtable_cache_size); show_buffers();}/* References to section boundaries */extern char _text, _etext, _edata, __bss_start, _end;extern char __init_begin, __init_end;static inline void set_pte_phys (unsigned long vaddr, unsigned long phys, pgprot_t flags){ pgprot_t prot; pgd_t *pgd; pmd_t *pmd; pte_t *pte; pgd = swapper_pg_dir + __pgd_offset(vaddr); if (pgd_none(*pgd)) { printk("PAE BUG #00!\n"); return; } pmd = pmd_offset(pgd, vaddr); if (pmd_none(*pmd)) { printk("PAE BUG #01!\n"); return; } pte = pte_offset(pmd, vaddr); if (pte_val(*pte)) pte_ERROR(*pte); pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags); set_pte(pte, mk_pte_phys(phys, prot)); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr);}void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags){ unsigned long address = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) { printk("Invalid __set_fixmap\n"); return; } set_pte_phys(address, phys, flags);}static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base){ pgd_t *pgd; pmd_t *pmd; pte_t *pte; int i, j; unsigned long vaddr; vaddr = start; i = __pgd_offset(vaddr); j = __pmd_offset(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {#if CONFIG_X86_PAE if (pgd_none(*pgd)) { pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pgd(pgd, __pgd(__pa(pmd) + 0x1)); if (pmd != pmd_offset(pgd, 0)) printk("PAE BUG #02!\n"); } pmd = pmd_offset(pgd, vaddr);#else pmd = (pmd_t *)pgd;#endif for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))); if (pte != pte_offset(pmd, 0)) BUG(); } vaddr += PMD_SIZE; } j = 0; }}static void __init pagetable_init (void){ unsigned long vaddr, end; pgd_t *pgd, *pgd_base; int i, j, k; pmd_t *pmd; pte_t *pte; /* * This can be zero as well - no problem, in that case we exit * the loops anyway due to the PTRS_PER_* conditions. */ end = (unsigned long)__va(max_low_pfn*PAGE_SIZE); pgd_base = swapper_pg_dir;#if CONFIG_X86_PAE for (i = 0; i < PTRS_PER_PGD; i++) { pgd = pgd_base + i; __pgd_clear(pgd); }#endif i = __pgd_offset(PAGE_OFFSET); pgd = pgd_base + i; for (; i < PTRS_PER_PGD; pgd++, i++) { vaddr = i*PGDIR_SIZE; if (end && (vaddr >= end)) break;#if CONFIG_X86_PAE pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pgd(pgd, __pgd(__pa(pmd) + 0x1));#else pmd = (pmd_t *)pgd;#endif if (pmd != pmd_offset(pgd, 0))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -