📄 init.c
字号:
#ifdef CONFIG_VIRTUAL_MEM_MAPint vmemmap_find_next_valid_pfn(int node, int i){ unsigned long end_address, hole_next_pfn; unsigned long stop_address; pg_data_t *pgdat = NODE_DATA(node); end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; end_address = PAGE_ALIGN(end_address); stop_address = (unsigned long) &vmem_map[ pgdat->node_start_pfn + pgdat->node_spanned_pages]; do { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset_k(end_address); if (pgd_none(*pgd)) { end_address += PGDIR_SIZE; continue; } pud = pud_offset(pgd, end_address); if (pud_none(*pud)) { end_address += PUD_SIZE; continue; } pmd = pmd_offset(pud, end_address); if (pmd_none(*pmd)) { end_address += PMD_SIZE; continue; } pte = pte_offset_kernel(pmd, end_address);retry_pte: if (pte_none(*pte)) { end_address += PAGE_SIZE; pte++; if ((end_address < stop_address) && (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) goto retry_pte; continue; } /* Found next valid vmem_map page */ break; } while (end_address < stop_address); end_address = min(end_address, stop_address); end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; hole_next_pfn = end_address / sizeof(struct page); return hole_next_pfn - pgdat->node_start_pfn;}int __initcreate_mem_map_page_table (u64 start, u64 end, void *arg){ unsigned long address, start_page, end_page; struct page *map_start, *map_end; int node; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); start_page = (unsigned long) map_start & PAGE_MASK; end_page = PAGE_ALIGN((unsigned long) map_end); node = paddr_to_nid(__pa(start)); for (address = start_page; address < end_page; address += PAGE_SIZE) { pgd = pgd_offset_k(address); if (pgd_none(*pgd)) pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); pud = pud_offset(pgd, address); if (pud_none(*pud)) pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); pte = pte_offset_kernel(pmd, address); if (pte_none(*pte)) set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, PAGE_KERNEL)); } return 0;}struct memmap_init_callback_data { struct page *start; struct page *end; int nid; unsigned long zone;};static int __meminitvirtual_memmap_init (u64 start, u64 end, void *arg){ struct memmap_init_callback_data *args; struct page *map_start, *map_end; args = (struct memmap_init_callback_data *) arg; map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); if (map_start < args->start) map_start = args->start; if (map_end > args->end) map_end = args->end; /* * We have to initialize "out of bounds" struct page elements that fit completely * on the same pages that were allocated for the "in bounds" elements because they * may be referenced later (and found to be "reserved"). */ map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) / sizeof(struct page)); if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), MEMMAP_EARLY); return 0;}void __meminitmemmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn){ if (!vmem_map) memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); else { struct page *start; struct memmap_init_callback_data args; start = pfn_to_page(start_pfn); args.start = start; args.end = start + size; args.nid = nid; args.zone = zone; efi_memmap_walk(virtual_memmap_init, &args); }}intia64_pfn_valid (unsigned long pfn){ char byte; struct page *pg = pfn_to_page(pfn); return (__get_user(byte, (char __user *) pg) == 0) && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));}EXPORT_SYMBOL(ia64_pfn_valid);int __initfind_largest_hole (u64 start, u64 end, void *arg){ u64 *max_gap = arg; static u64 last_end = PAGE_OFFSET; /* NOTE: this algorithm assumes efi memmap table is ordered */ if (*max_gap < (start - last_end)) *max_gap = start - last_end; last_end = end; return 0;}#endif /* CONFIG_VIRTUAL_MEM_MAP */int __initregister_active_ranges(u64 start, u64 end, void *arg){ int nid = paddr_to_nid(__pa(start)); if (nid < 0) nid = 0;#ifdef CONFIG_KEXEC if (start > crashk_res.start && start < crashk_res.end) start = crashk_res.end; if (end > crashk_res.start && end < crashk_res.end) end = crashk_res.start;#endif if (start < end) add_active_range(nid, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); return 0;}static int __initcount_reserved_pages (u64 start, u64 end, void *arg){ unsigned long num_reserved = 0; unsigned long *count = arg; for (; start < end; start += PAGE_SIZE) if (PageReserved(virt_to_page(start))) ++num_reserved; *count += num_reserved; return 0;}intfind_max_min_low_pfn (unsigned long start, unsigned long end, void *arg){ unsigned long pfn_start, pfn_end;#ifdef CONFIG_FLATMEM pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;#else pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;#endif min_low_pfn = min(min_low_pfn, pfn_start); max_low_pfn = max(max_low_pfn, pfn_end); return 0;}/* * Boot command-line option "nolwsys" can be used to disable the use of any light-weight * system call handler. When this option is in effect, all fsyscalls will end up bubbling * down into the kernel and calling the normal (heavy-weight) syscall handler. This is * useful for performance testing, but conceivably could also come in handy for debugging * purposes. */static int nolwsys __initdata;static int __initnolwsys_setup (char *s){ nolwsys = 1; return 1;}__setup("nolwsys", nolwsys_setup);void __initmem_init (void){ long reserved_pages, codesize, datasize, initsize; pg_data_t *pgdat; int i; static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);#ifdef CONFIG_PCI /* * This needs to be called _after_ the command line has been parsed but _before_ * any drivers that may need the PCI DMA interface are initialized or bootmem has * been freed. */ platform_dma_init();#endif#ifdef CONFIG_FLATMEM if (!mem_map) BUG(); max_mapnr = max_low_pfn;#endif high_memory = __va(max_low_pfn * PAGE_SIZE); kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); kclist_add(&kcore_kernel, _stext, _end - _stext); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); reserved_pages = 0; efi_memmap_walk(count_reserved_pages, &reserved_pages); codesize = (unsigned long) _etext - (unsigned long) _stext; datasize = (unsigned long) _edata - (unsigned long) _etext; initsize = (unsigned long) __init_end - (unsigned long) __init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), num_physpages << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry * code can tell them apart. */ for (i = 0; i < NR_syscalls; ++i) { extern unsigned long fsyscall_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls]; if (!fsyscall_table[i] || nolwsys) fsyscall_table[i] = sys_call_table[i] | 1; } setup_gate();#ifdef CONFIG_IA32_SUPPORT ia32_mem_init();#endif}#ifdef CONFIG_MEMORY_HOTPLUGvoid online_page(struct page *page){ ClearPageReserved(page); init_page_count(page); __free_page(page); totalram_pages++; num_physpages++;}int arch_add_memory(int nid, u64 start, u64 size){ pg_data_t *pgdat; struct zone *zone; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; pgdat = NODE_DATA(nid); zone = pgdat->node_zones + ZONE_NORMAL; ret = __add_pages(zone, start_pfn, nr_pages); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __FUNCTION__, ret); return ret;}#ifdef CONFIG_MEMORY_HOTREMOVEint remove_memory(u64 start, u64 size){ unsigned long start_pfn, end_pfn; unsigned long timeout = 120 * HZ; int ret; start_pfn = start >> PAGE_SHIFT; end_pfn = start_pfn + (size >> PAGE_SHIFT); ret = offline_pages(start_pfn, end_pfn, timeout); if (ret) goto out; /* we can free mem_map at this point */out: return ret;}EXPORT_SYMBOL_GPL(remove_memory);#endif /* CONFIG_MEMORY_HOTREMOVE */#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -