📄 init_32.c
字号:
/* * linux/arch/i386/mm/init.c * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */#include <linux/module.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/hugetlb.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/highmem.h>#include <linux/pagemap.h>#include <linux/pfn.h>#include <linux/poison.h>#include <linux/bootmem.h>#include <linux/slab.h>#include <linux/proc_fs.h>#include <linux/efi.h>#include <linux/memory_hotplug.h>#include <linux/initrd.h>#include <linux/cpumask.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>#include <asm/tlb.h>#include <asm/tlbflush.h>#include <asm/sections.h>#include <asm/paravirt.h>unsigned int __VMALLOC_RESERVE = 128 << 20;DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);unsigned long highstart_pfn, highend_pfn;static int noinline do_test_wp_bit(void);/* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */static pmd_t * __init one_md_table_init(pgd_t *pgd){ pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) BUG(); }#endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table;}/* * Create a page table and place a pointer to it in a middle page * directory entry. */static pte_t * __init one_page_table_init(pmd_t *pmd){ if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL;#ifdef CONFIG_DEBUG_PAGEALLOC page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);#endif if (!page_table) page_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0);}/* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. *//* * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base){ pgd_t *pgd; pmd_t *pmd; int pgd_idx, pmd_idx; unsigned long vaddr; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { one_page_table_init(pmd); vaddr += PMD_SIZE; } pmd_idx = 0; }}static inline int is_kernel_text(unsigned long addr){ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) return 1; return 0;}/* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET. */static void __init kernel_physical_mapping_init(pgd_t *pgd_base){ unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int pgd_idx, pmd_idx, pte_ofs; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= max_low_pfn) continue; for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; /* Map with big pages if possible, otherwise create normal page tables. */ if (cpu_has_pse) { unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(address) || is_kernel_text(address2)) set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); else set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); pfn += PTRS_PER_PTE; } else { pte = one_page_table_init(pmd); for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { if (is_kernel_text(address)) set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); else set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); } } } }}static inline int page_kills_ppro(unsigned long pagenr){ if (pagenr >= 0x70000 && pagenr <= 0x7003F) return 1; return 0;}int page_is_ram(unsigned long pagenr){ int i; unsigned long addr, end; if (efi_enabled) { efi_memory_desc_t *md; void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (!is_available_memory(md)) continue; addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; if ((pagenr >= addr) && (pagenr < end)) return 1; } return 0; } for (i = 0; i < e820.nr_map; i++) { if (e820.map[i].type != E820_RAM) /* not usable memory */ continue; /* * !!!FIXME!!! Some BIOSen report areas as RAM that * are not. Notably the 640->1Mb area. We need a sanity * check here. */ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; if ((pagenr >= addr) && (pagenr < end)) return 1; } return 0;}#ifdef CONFIG_HIGHMEMpte_t *kmap_pte;pgprot_t kmap_prot;#define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))static void __init kmap_init(void){ unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL;}static void __init permanent_kmaps_init(pgd_t *pgd_base){ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long vaddr; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; }static void __meminit free_new_highpage(struct page *page){ init_page_count(page); __free_page(page); totalhigh_pages++;}void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro){ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { ClearPageReserved(page); free_new_highpage(page); } else SetPageReserved(page);}static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn){ free_new_highpage(page); totalram_pages++;#ifdef CONFIG_FLATMEM max_mapnr = max(pfn, max_mapnr);#endif num_physpages++; return 0;}/* * Not currently handling the NUMA case. * Assuming single node and all memory that * has been added dynamically that would be * onlined here is in HIGHMEM */void __meminit online_page(struct page *page){ ClearPageReserved(page); add_one_highpage_hotplug(page, page_to_pfn(page));}#ifdef CONFIG_NUMAextern void set_highmem_pages_init(int);#elsestatic void __init set_highmem_pages_init(int bad_ppro){ int pfn; for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) { /* * Holes under sparsemem might not have no mem_map[]: */ if (pfn_valid(pfn)) add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); } totalram_pages += totalhigh_pages;}#endif /* CONFIG_FLATMEM */#else#define kmap_init() do { } while (0)#define permanent_kmaps_init(pgd_base) do { } while (0)#define set_highmem_pages_init(bad_ppro) do { } while (0)#endif /* CONFIG_HIGHMEM */unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;EXPORT_SYMBOL(__PAGE_KERNEL);unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;#ifdef CONFIG_NUMAextern void __init remap_numa_kva(void);#else#define remap_numa_kva() do {} while (0)#endifvoid __init native_pagetable_setup_start(pgd_t *base){#ifdef CONFIG_X86_PAE int i; /* * Init entries of the first-level page table to the * zero page, if they haven't already been set up. * * In a normal native boot, we'll be running on a * pagetable rooted in swapper_pg_dir, but not in PAE * mode, so this will end up clobbering the mappings * for the lower 24Mbytes of the address space, * without affecting the kernel address space. */ for (i = 0; i < USER_PTRS_PER_PGD; i++) set_pgd(&base[i], __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); /* Make sure kernel address space is empty so that a pagetable will be allocated for it. */ memset(&base[USER_PTRS_PER_PGD], 0, KERNEL_PGD_PTRS * sizeof(pgd_t));#else paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);#endif}void __init native_pagetable_setup_done(pgd_t *base){#ifdef CONFIG_X86_PAE /* * Add low memory identity-mappings - SMP needs it when * starting up on an AP from real-mode. In the non-PAE * case we already have these mappings through head.S. * All user-space mappings are explicitly cleared after * SMP startup. */ set_pgd(&base[0], base[USER_PTRS_PER_PGD]);#endif}/* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/i386/kernel/head.S, and not running in PAE mode * (even if we'll end up running in PAE). The root of the pagetable * will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */static void __init pagetable_init (void){ unsigned long vaddr, end; pgd_t *pgd_base = swapper_pg_dir; paravirt_pagetable_setup_start(pgd_base); /* Enable PSE if available */ if (cpu_has_pse) set_in_cr4(X86_CR4_PSE); /* Enable PGE if available */ if (cpu_has_pge) { set_in_cr4(X86_CR4_PGE); __PAGE_KERNEL |= _PAGE_GLOBAL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -