⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sparse.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	 * Some platforms allow un-removable section because they will just	 * gather other removable sections for dynamic partitioning.	 * Just notify un-removable section's number here.	 */	printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,	       pgdat_snr, nid);	printk(KERN_CONT	       " have a circular dependency on usemap and pgdat allocations\n");}#elsestatic unsigned long * __initsparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat){	return NULL;}static void __init check_usemap_section_nr(int nid, unsigned long *usemap){}#endif /* CONFIG_MEMORY_HOTREMOVE */static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum){	unsigned long *usemap;	struct mem_section *ms = __nr_to_section(pnum);	int nid = sparse_early_nid(ms);	usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));	if (usemap)		return usemap;	usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());	if (usemap) {		check_usemap_section_nr(nid, usemap);		return usemap;	}	/* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */	nid = 0;	printk(KERN_WARNING "%s: allocation failed\n", __func__);	return NULL;}#ifndef CONFIG_SPARSEMEM_VMEMMAPstruct page __init *sparse_mem_map_populate(unsigned long pnum, int nid){	struct page *map;	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);	if (map)		return map;	map = alloc_bootmem_pages_node(NODE_DATA(nid),		       PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));	return map;}#endif /* !CONFIG_SPARSEMEM_VMEMMAP */static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum){	struct page *map;	struct mem_section *ms = __nr_to_section(pnum);	int nid = sparse_early_nid(ms);	map = sparse_mem_map_populate(pnum, nid);	if (map)		return map;	printk(KERN_ERR "%s: sparsemem memory map backing failed "			"some memory will not be available.\n", __func__);	ms->section_mem_map = 0;	return NULL;}void __attribute__((weak)) __meminit vmemmap_populate_print_last(void){}/* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */void __init sparse_init(void){	unsigned long pnum;	struct page *map;	unsigned long *usemap;	unsigned long **usemap_map;	int size;	/*	 * map is using big page (aka 2M in x86 64 bit)	 * usemap is less one page (aka 24 bytes)	 * so alloc 2M (with 2M align) and 24 bytes in turn will	 * make next 2M slip to one more 2M later.	 * then in big system, the memory will have a lot of holes...	 * here try to allocate 2M pages continously.	 *	 * powerpc need to call sparse_init_one_section right after each	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.	 */	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;	usemap_map = alloc_bootmem(size);	if (!usemap_map)		panic("can not allocate usemap_map\n");	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {		if (!present_section_nr(pnum))			continue;		usemap_map[pnum] = sparse_early_usemap_alloc(pnum);	}	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {		if (!present_section_nr(pnum))			continue;		usemap = usemap_map[pnum];		if (!usemap)			continue;		map = sparse_early_mem_map_alloc(pnum);		if (!map)			continue;		sparse_init_one_section(__nr_to_section(pnum), pnum, map,								usemap);	}	vmemmap_populate_print_last();	free_bootmem(__pa(usemap_map), size);}#ifdef CONFIG_MEMORY_HOTPLUG#ifdef CONFIG_SPARSEMEM_VMEMMAPstatic inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,						 unsigned long nr_pages){	/* This will make the necessary allocations eventually. */	return sparse_mem_map_populate(pnum, nid);}static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages){	return; /* XXX: Not implemented yet */}static void free_map_bootmem(struct page *page, unsigned long nr_pages){}#elsestatic struct page *__kmalloc_section_memmap(unsigned long nr_pages){	struct page *page, *ret;	unsigned long memmap_size = sizeof(struct page) * nr_pages;	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));	if (page)		goto got_map_page;	ret = vmalloc(memmap_size);	if (ret)		goto got_map_ptr;	return NULL;got_map_page:	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));got_map_ptr:	memset(ret, 0, memmap_size);	return ret;}static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,						  unsigned long nr_pages){	return __kmalloc_section_memmap(nr_pages);}static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages){	if (is_vmalloc_addr(memmap))		vfree(memmap);	else		free_pages((unsigned long)memmap,			   get_order(sizeof(struct page) * nr_pages));}static void free_map_bootmem(struct page *page, unsigned long nr_pages){	unsigned long maps_section_nr, removing_section_nr, i;	int magic;	for (i = 0; i < nr_pages; i++, page++) {		magic = atomic_read(&page->_mapcount);		BUG_ON(magic == NODE_INFO);		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));		removing_section_nr = page->private;		/*		 * When this function is called, the removing section is		 * logical offlined state. This means all pages are isolated		 * from page allocator. If removing section's memmap is placed		 * on the same section, it must not be freed.		 * If it is freed, page allocator may allocate it which will		 * be removed physically soon.		 */		if (maps_section_nr != removing_section_nr)			put_page_bootmem(page);	}}#endif /* CONFIG_SPARSEMEM_VMEMMAP */static void free_section_usemap(struct page *memmap, unsigned long *usemap){	struct page *usemap_page;	unsigned long nr_pages;	if (!usemap)		return;	usemap_page = virt_to_page(usemap);	/*	 * Check to see if allocation came from hot-plug-add	 */	if (PageSlab(usemap_page)) {		kfree(usemap);		if (memmap)			__kfree_section_memmap(memmap, PAGES_PER_SECTION);		return;	}	/*	 * The usemap came from bootmem. This is packed with other usemaps	 * on the section which has pgdat at boot time. Just keep it as is now.	 */	if (memmap) {		struct page *memmap_page;		memmap_page = virt_to_page(memmap);		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))			>> PAGE_SHIFT;		free_map_bootmem(memmap_page, nr_pages);	}}/* * returns the number of sections whose mem_maps were properly * set.  If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,			   int nr_pages){	unsigned long section_nr = pfn_to_section_nr(start_pfn);	struct pglist_data *pgdat = zone->zone_pgdat;	struct mem_section *ms;	struct page *memmap;	unsigned long *usemap;	unsigned long flags;	int ret;	/*	 * no locking for this, because it does its own	 * plus, it does a kmalloc	 */	ret = sparse_index_init(section_nr, pgdat->node_id);	if (ret < 0 && ret != -EEXIST)		return ret;	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);	if (!memmap)		return -ENOMEM;	usemap = __kmalloc_section_usemap();	if (!usemap) {		__kfree_section_memmap(memmap, nr_pages);		return -ENOMEM;	}	pgdat_resize_lock(pgdat, &flags);	ms = __pfn_to_section(start_pfn);	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {		ret = -EEXIST;		goto out;	}	ms->section_mem_map |= SECTION_MARKED_PRESENT;	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);out:	pgdat_resize_unlock(pgdat, &flags);	if (ret <= 0) {		kfree(usemap);		__kfree_section_memmap(memmap, nr_pages);	}	return ret;}void sparse_remove_one_section(struct zone *zone, struct mem_section *ms){	struct page *memmap = NULL;	unsigned long *usemap = NULL;	if (ms->section_mem_map) {		usemap = ms->pageblock_flags;		memmap = sparse_decode_mem_map(ms->section_mem_map,						__section_nr(ms));		ms->section_mem_map = 0;		ms->pageblock_flags = NULL;	}	free_section_usemap(memmap, usemap);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -