📄 init_32.c
字号:
__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; } kernel_physical_mapping_init(pgd_base); remap_numa_kva(); /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); permanent_kmaps_init(pgd_base); paravirt_pagetable_setup_done(pgd_base);}#if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)/* * Swap suspend & friends need this for resume because things like the intel-agp * driver might have split up a kernel 4MB mapping. */char __nosavedata swsusp_pg_dir[PAGE_SIZE] __attribute__ ((aligned (PAGE_SIZE)));static inline void save_pg_dir(void){ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);}#elsestatic inline void save_pg_dir(void){}#endifvoid zap_low_mappings (void){ int i; save_pg_dir(); /* * Zap initial low-memory mappings. * * Note that "pgd_clear()" doesn't do it for * us, because pgd_clear() is a no-op on i386. */ for (i = 0; i < USER_PTRS_PER_PGD; i++)#ifdef CONFIG_X86_PAE set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));#else set_pgd(swapper_pg_dir+i, __pgd(0));#endif flush_tlb_all();}int nx_enabled = 0;#ifdef CONFIG_X86_PAEstatic int disable_nx __initdata = 0;u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;EXPORT_SYMBOL_GPL(__supported_pte_mask);/* * noexec = on|off * * Control non executable mappings. * * on Enable * off Disable */static int __init noexec_setup(char *str){ if (!str || !strcmp(str, "on")) { if (cpu_has_nx) { __supported_pte_mask |= _PAGE_NX; disable_nx = 0; } } else if (!strcmp(str,"off")) { disable_nx = 1; __supported_pte_mask &= ~_PAGE_NX; } else return -EINVAL; return 0;}early_param("noexec", noexec_setup);static void __init set_nx(void){ unsigned int v[4], l, h; if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); if ((v[3] & (1 << 20)) && !disable_nx) { rdmsr(MSR_EFER, l, h); l |= EFER_NX; wrmsr(MSR_EFER, l, h); nx_enabled = 1; __supported_pte_mask |= _PAGE_NX; } }}/* * Enables/disables executability of a given kernel page and * returns the previous setting. */int __init set_kernel_exec(unsigned long vaddr, int enable){ pte_t *pte; int ret = 1; if (!nx_enabled) goto out; pte = lookup_address(vaddr); BUG_ON(!pte); if (!pte_exec_kernel(*pte)) ret = 0; if (enable) pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); else pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); pte_update_defer(&init_mm, vaddr, pte); __flush_tlb_all();out: return ret;}#endif/* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */void __init paging_init(void){#ifdef CONFIG_X86_PAE set_nx(); if (nx_enabled) printk("NX (Execute Disable) protection: active\n");#endif pagetable_init(); load_cr3(swapper_pg_dir);#ifdef CONFIG_X86_PAE /* * We will bail out later - printk doesn't work right now so * the user would just see a hanging kernel. */ if (cpu_has_pae) set_in_cr4(X86_CR4_PAE);#endif __flush_tlb_all(); kmap_init();}/* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This * used to involve black magic jumps to work around some nasty CPU bugs, * but fortunately the switch to using exceptions got rid of all that. */static void __init test_wp_bit(void){ printk("Checking if this processor honours the WP bit even in supervisor mode... "); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk("No.\n");#ifdef CONFIG_X86_WP_WORKS_OK panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");#endif } else { printk("Ok.\n"); }}static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void){ extern int ppro_with_ram_bug(void); int codesize, reservedpages, datasize, initsize; int tmp; int bad_ppro;#ifdef CONFIG_FLATMEM BUG_ON(!mem_map);#endif bad_ppro = ppro_with_ram_bug();#ifdef CONFIG_HIGHMEM /* check that fixmap and pkmap do not overlap */ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); BUG(); }#endif /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; set_highmem_pages_init(bad_ppro); codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) );#if 1 /* double-sanity-check paranoia */ printk("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"#ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"#endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,#ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10,#endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10);#ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE);#endif BUG_ON(VMALLOC_START > VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START);#endif /* double-sanity-check paranoia */#ifdef CONFIG_X86_PAE if (!cpu_has_pae) panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");#endif if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); /* * Subtle. SMP is doing it's boot stuff late (because it has to * fork idle threads) - but it also needs low mappings for the * protected-mode entry to work. We zap these entries only after * the WP-bit has been tested. */#ifndef CONFIG_SMP zap_low_mappings();#endif}#ifdef CONFIG_MEMORY_HOTPLUGint arch_add_memory(int nid, u64 start, u64 size){ struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(zone, start_pfn, nr_pages);}#endifstruct kmem_cache *pmd_cache;void __init pgtable_cache_init(void){ if (PTRS_PER_PMD > 1) pmd_cache = kmem_cache_create("pmd", PTRS_PER_PMD*sizeof(pmd_t), PTRS_PER_PMD*sizeof(pmd_t), SLAB_PANIC, pmd_ctor);}/* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */static int noinline do_test_wp_bit(void){ char tmp_reg; int flag; __asm__ __volatile__( " movb %0,%1 \n" "1: movb %1,%0 \n" " xorl %2,%2 \n" "2: \n" ".section __ex_table,\"a\"\n" " .align 4 \n" " .long 1b,2b \n" ".previous \n" :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag;}#ifdef CONFIG_DEBUG_RODATAvoid mark_rodata_ro(void){ unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start;#ifndef CONFIG_KPROBES#ifdef CONFIG_HOTPLUG_CPU /* It must still be possible to apply SMP alternatives. */ if (num_possible_cpus() <= 1)#endif { change_page_attr(virt_to_page(start), size >> PAGE_SHIFT, PAGE_KERNEL_RX); printk("Write protecting the kernel text: %luk\n", size >> 10); }#endif start += size; size = (unsigned long)__end_rodata - start; change_page_attr(virt_to_page(start), size >> PAGE_SHIFT, PAGE_KERNEL_RO); printk("Write protecting the kernel read-only data: %luk\n", size >> 10); /* * change_page_attr() requires a global_flush_tlb() call after it. * We do this after the printk so that if something went wrong in the * change, the printk gets out at least to give a better debug hint * of who is the culprit. */ global_flush_tlb();}#endifvoid free_init_pages(char *what, unsigned long begin, unsigned long end){ unsigned long addr; for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);}void free_initmem(void){ free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end));}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){ free_init_pages("initrd memory", start, end);}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -