📄 init.c
字号:
/* * linux/arch/x86_64/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/pagemap.h>#include <linux/bootmem.h>#include <linux/proc_fs.h>#include <linux/pci.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>#include <asm/tlb.h>#include <asm/mmu_context.h>#include <asm/proto.h>#include <asm/smp.h>#include <asm/sections.h>#ifndef Dprintk#define Dprintk(x...)#endifstatic unsigned long dma_reserve __initdata;DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);/* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */void show_mem(void){ long i, total = 0, reserved = 0; long shared = 0, cached = 0; pg_data_t *pgdat; struct page *page; printk(KERN_INFO "Mem-info:\n"); show_free_areas(); printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_pgdat(pgdat) { for (i = 0; i < pgdat->node_spanned_pages; ++i) { page = pfn_to_page(pgdat->node_start_pfn + i); total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (page_count(page)) shared += page_count(page) - 1; } } printk(KERN_INFO "%lu pages of RAM\n", total); printk(KERN_INFO "%lu reserved pages\n",reserved); printk(KERN_INFO "%lu pages shared\n",shared); printk(KERN_INFO "%lu pages swap cached\n",cached);}/* References to section boundaries */int after_bootmem;static void *spp_getpage(void){ void *ptr; if (after_bootmem) ptr = (void *) get_zeroed_page(GFP_ATOMIC); else ptr = alloc_bootmem_pages(PAGE_SIZE); if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":""); Dprintk("spp_getpage %p\n", ptr); return ptr;} static void set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot){ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte, new_pte; Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys); pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd)) { printk("PGD FIXMAP MISSING, it should be setup in head.S!\n"); return; } pud = pud_offset(pgd, vaddr); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); if (pmd != pmd_offset(pud, 0)) { printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0)); return; } } pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd)) { pte = (pte_t *) spp_getpage(); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); if (pte != pte_offset_kernel(pmd, 0)) { printk("PAGETABLE BUG #02!\n"); return; } } new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); pte = pte_offset_kernel(pmd, vaddr); if (!pte_none(*pte) && pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) pte_ERROR(*pte); set_pte(pte, new_pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr);}/* NOTE: this is meant to be run only at boot */void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot){ unsigned long address = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) { printk("Invalid __set_fixmap\n"); return; } set_pte_phys(address, phys, prot);}unsigned long __initdata table_start, table_end; extern pmd_t temp_boot_pmds[]; static struct temp_map { pmd_t *pmd; void *address; int allocated; } temp_mappings[] __initdata = { { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) }, { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, {}}; static __init void *alloc_low_page(int *index, unsigned long *phys) { struct temp_map *ti; int i; unsigned long pfn = table_end++, paddr; void *adr; if (pfn >= end_pfn) panic("alloc_low_page: ran out of memory"); for (i = 0; temp_mappings[i].allocated; i++) { if (!temp_mappings[i].pmd) panic("alloc_low_page: ran out of temp mappings"); } ti = &temp_mappings[i]; paddr = (pfn << PAGE_SHIFT) & PMD_MASK; set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); ti->allocated = 1; __flush_tlb(); adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); *index = i; *phys = pfn * PAGE_SIZE; return adr; } static __init void unmap_low_page(int i){ struct temp_map *ti = &temp_mappings[i]; set_pmd(ti->pmd, __pmd(0)); ti->allocated = 0; } static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end){ long i, j; i = pud_index(address); pud = pud + i; for (; i < PTRS_PER_PUD; pud++, i++) { int map; unsigned long paddr, pmd_phys; pmd_t *pmd; paddr = address + i*PUD_SIZE; if (paddr >= end) { for (; i < PTRS_PER_PUD; i++, pud++) set_pud(pud, __pud(0)); break; } if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) { set_pud(pud, __pud(0)); continue; } pmd = alloc_low_page(&map, &pmd_phys); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { unsigned long pe; if (paddr >= end) { for (; j < PTRS_PER_PMD; j++, pmd++) set_pmd(pmd, __pmd(0)); break; } pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr; pe &= __supported_pte_mask; set_pmd(pmd, __pmd(pe)); } unmap_low_page(map); } __flush_tlb();} static void __init find_early_table_space(unsigned long end){ unsigned long puds, pmds, tables; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + round_up(pmds * sizeof(pmd_t), PAGE_SIZE); table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables); if (table_start == -1UL) panic("Cannot find space for the kernel page tables"); table_start >>= PAGE_SHIFT; table_end = table_start;}/* Setup the direct mapping of the physical memory at PAGE_OFFSET. This runs before bootmem is initialized and gets pages directly from the physical memory. To access them they are temporarily mapped. */void __init init_memory_mapping(unsigned long start, unsigned long end){ unsigned long next; Dprintk("init_memory_mapping\n"); /* * Find space for the kernel direct mapping tables. * Later we should allocate these tables in the local node of the memory * mapped. Unfortunately this is done currently before the nodes are * discovered. */ find_early_table_space(end); start = (unsigned long)__va(start); end = (unsigned long)__va(end); for (; start < end; start = next) { int map; unsigned long pud_phys; pud_t *pud = alloc_low_page(&map, &pud_phys); next = start + PGDIR_SIZE; if (next > end) next = end; phys_pud_init(pud, __pa(start), __pa(next)); set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); unmap_low_page(map); } asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); __flush_tlb_all(); early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, table_start<<PAGE_SHIFT, table_end<<PAGE_SHIFT);}void __cpuinit zap_low_mappings(int cpu){ if (cpu == 0) { pgd_t *pgd = pgd_offset_k(0UL); pgd_clear(pgd);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -