⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 memory_hotplug.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		vm_total_pages = nr_free_pagecache_pages();	writeback_set_ratelimit();	if (onlined_pages)		memory_notify(MEM_ONLINE, &arg);	return 0;}#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */static pg_data_t *hotadd_new_pgdat(int nid, u64 start){	struct pglist_data *pgdat;	unsigned long zones_size[MAX_NR_ZONES] = {0};	unsigned long zholes_size[MAX_NR_ZONES] = {0};	unsigned long start_pfn = start >> PAGE_SHIFT;	pgdat = arch_alloc_nodedata(nid);	if (!pgdat)		return NULL;	arch_refresh_nodedata(nid, pgdat);	/* we can use NODE_DATA(nid) from here */	/* init node's zones as empty zones, we don't have any present pages.*/	free_area_init_node(nid, zones_size, start_pfn, zholes_size);	return pgdat;}static void rollback_node_hotadd(int nid, pg_data_t *pgdat){	arch_refresh_nodedata(nid, NULL);	arch_free_nodedata(pgdat);	return;}/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */int __ref add_memory(int nid, u64 start, u64 size){	pg_data_t *pgdat = NULL;	int new_pgdat = 0;	struct resource *res;	int ret;	res = register_memory_resource(start, size);	if (!res)		return -EEXIST;	if (!node_online(nid)) {		pgdat = hotadd_new_pgdat(nid, start);		if (!pgdat)			return -ENOMEM;		new_pgdat = 1;	}	/* call arch's memory hotadd */	ret = arch_add_memory(nid, start, size);	if (ret < 0)		goto error;	/* we online node here. we can't roll back from here. */	node_set_online(nid);	if (new_pgdat) {		ret = register_one_node(nid);		/*		 * If sysfs file of new node can't create, cpu on the node		 * can't be hot-added. There is no rollback way now.		 * So, check by BUG_ON() to catch it reluctantly..		 */		BUG_ON(ret);	}	return ret;error:	/* rollback pgdat allocation and others */	if (new_pgdat)		rollback_node_hotadd(nid, pgdat);	if (res)		release_memory_resource(res);	return ret;}EXPORT_SYMBOL_GPL(add_memory);#ifdef CONFIG_MEMORY_HOTREMOVE/* * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy * set and the size of the free page is given by page_order(). Using this, * the function determines if the pageblock contains only free pages. * Due to buddy contraints, a free page at least the size of a pageblock will * be located at the start of the pageblock */static inline int pageblock_free(struct page *page){	return PageBuddy(page) && page_order(page) >= pageblock_order;}/* Return the start of the next active pageblock after a given page */static struct page *next_active_pageblock(struct page *page){	int pageblocks_stride;	/* Ensure the starting page is pageblock-aligned */	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));	/* Move forward by at least 1 * pageblock_nr_pages */	pageblocks_stride = 1;	/* If the entire pageblock is free, move to the end of free page */	if (pageblock_free(page))		pageblocks_stride += page_order(page) - pageblock_order;	return page + (pageblocks_stride * pageblock_nr_pages);}/* Checks if this range of memory is likely to be hot-removable. */int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages){	int type;	struct page *page = pfn_to_page(start_pfn);	struct page *end_page = page + nr_pages;	/* Check the starting page of each pageblock within the range */	for (; page < end_page; page = next_active_pageblock(page)) {		type = get_pageblock_migratetype(page);		/*		 * A pageblock containing MOVABLE or free pages is considered		 * removable		 */		if (type != MIGRATE_MOVABLE && !pageblock_free(page))			return 0;		/*		 * A pageblock starting with a PageReserved page is not		 * considered removable.		 */		if (PageReserved(page))			return 0;	}	/* All pageblocks in the memory block are likely to be hot-removable */	return 1;}/* * Confirm all pages in a range [start, end) is belongs to the same zone. */static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn){	unsigned long pfn;	struct zone *zone = NULL;	struct page *page;	int i;	for (pfn = start_pfn;	     pfn < end_pfn;	     pfn += MAX_ORDER_NR_PAGES) {		i = 0;		/* This is just a CONFIG_HOLES_IN_ZONE check.*/		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))			i++;		if (i == MAX_ORDER_NR_PAGES)			continue;		page = pfn_to_page(pfn + i);		if (zone && page_zone(page) != zone)			return 0;		zone = page_zone(page);	}	return 1;}/* * Scanning pfn is much easier than scanning lru list. * Scan pfn from start to end and Find LRU page. */int scan_lru_pages(unsigned long start, unsigned long end){	unsigned long pfn;	struct page *page;	for (pfn = start; pfn < end; pfn++) {		if (pfn_valid(pfn)) {			page = pfn_to_page(pfn);			if (PageLRU(page))				return pfn;		}	}	return 0;}static struct page *hotremove_migrate_alloc(struct page *page, unsigned long private, int **x){	/* This should be improooooved!! */	return alloc_page(GFP_HIGHUSER_MOVABLE);}#define NR_OFFLINE_AT_ONCE_PAGES	(256)static intdo_migrate_range(unsigned long start_pfn, unsigned long end_pfn){	unsigned long pfn;	struct page *page;	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;	int not_managed = 0;	int ret = 0;	LIST_HEAD(source);	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {		if (!pfn_valid(pfn))			continue;		page = pfn_to_page(pfn);		if (!page_count(page))			continue;		/*		 * We can skip free pages. And we can only deal with pages on		 * LRU.		 */		ret = isolate_lru_page(page);		if (!ret) { /* Success */			list_add_tail(&page->lru, &source);			move_pages--;		} else {			/* Becasue we don't have big zone->lock. we should			   check this again here. */			if (page_count(page))				not_managed++;#ifdef CONFIG_DEBUG_VM			printk(KERN_INFO "removing from LRU failed"					 " %lx/%d/%lx\n",				pfn, page_count(page), page->flags);#endif		}	}	ret = -EBUSY;	if (not_managed) {		if (!list_empty(&source))			putback_lru_pages(&source);		goto out;	}	ret = 0;	if (list_empty(&source))		goto out;	/* this function returns # of failed pages */	ret = migrate_pages(&source, hotremove_migrate_alloc, 0);out:	return ret;}/* * remove from free_area[] and mark all as Reserved. */static intoffline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,			void *data){	__offline_isolated_pages(start, start + nr_pages);	return 0;}static voidoffline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn){	walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,				offline_isolated_pages_cb);}/* * Check all pages in range, recoreded as memory resource, are isolated. */static intcheck_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,			void *data){	int ret;	long offlined = *(long *)data;	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);	offlined = nr_pages;	if (!ret)		*(long *)data += offlined;	return ret;}static longcheck_pages_isolated(unsigned long start_pfn, unsigned long end_pfn){	long offlined = 0;	int ret;	ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,			check_pages_isolated_cb);	if (ret < 0)		offlined = (long)ret;	return offlined;}int offline_pages(unsigned long start_pfn,		  unsigned long end_pfn, unsigned long timeout){	unsigned long pfn, nr_pages, expire;	long offlined_pages;	int ret, drain, retry_max, node;	struct zone *zone;	struct memory_notify arg;	BUG_ON(start_pfn >= end_pfn);	/* at least, alignment against pageblock is necessary */	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))		return -EINVAL;	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))		return -EINVAL;	/* This makes hotplug much easier...and readable.	   we assume this for now. .*/	if (!test_pages_in_a_zone(start_pfn, end_pfn))		return -EINVAL;	zone = page_zone(pfn_to_page(start_pfn));	node = zone_to_nid(zone);	nr_pages = end_pfn - start_pfn;	/* set above range as isolated */	ret = start_isolate_page_range(start_pfn, end_pfn);	if (ret)		return ret;	arg.start_pfn = start_pfn;	arg.nr_pages = nr_pages;	arg.status_change_nid = -1;	if (nr_pages >= node_present_pages(node))		arg.status_change_nid = node;	ret = memory_notify(MEM_GOING_OFFLINE, &arg);	ret = notifier_to_errno(ret);	if (ret)		goto failed_removal;	pfn = start_pfn;	expire = jiffies + timeout;	drain = 0;	retry_max = 5;repeat:	/* start memory hot removal */	ret = -EAGAIN;	if (time_after(jiffies, expire))		goto failed_removal;	ret = -EINTR;	if (signal_pending(current))		goto failed_removal;	ret = 0;	if (drain) {		lru_add_drain_all();		flush_scheduled_work();		cond_resched();		drain_all_pages();	}	pfn = scan_lru_pages(start_pfn, end_pfn);	if (pfn) { /* We have page on LRU */		ret = do_migrate_range(pfn, end_pfn);		if (!ret) {			drain = 1;			goto repeat;		} else {			if (ret < 0)				if (--retry_max == 0)					goto failed_removal;			yield();			drain = 1;			goto repeat;		}	}	/* drain all zone's lru pagevec, this is asyncronous... */	lru_add_drain_all();	flush_scheduled_work();	yield();	/* drain pcp pages , this is synchrouns. */	drain_all_pages();	/* check again */	offlined_pages = check_pages_isolated(start_pfn, end_pfn);	if (offlined_pages < 0) {		ret = -EBUSY;		goto failed_removal;	}	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);	/* Ok, all of our target is islaoted.	   We cannot do rollback at this point. */	offline_isolated_pages(start_pfn, end_pfn);	/* reset pagetype flags and makes migrate type to be MOVABLE */	undo_isolate_page_range(start_pfn, end_pfn);	/* removal success */	zone->present_pages -= offlined_pages;	zone->zone_pgdat->node_present_pages -= offlined_pages;	totalram_pages -= offlined_pages;	num_physpages -= offlined_pages;	vm_total_pages = nr_free_pagecache_pages();	writeback_set_ratelimit();	memory_notify(MEM_OFFLINE, &arg);	return 0;failed_removal:	printk(KERN_INFO "memory offlining %lx to %lx failed\n",		start_pfn, end_pfn);	memory_notify(MEM_CANCEL_OFFLINE, &arg);	/* pushback to free area */	undo_isolate_page_range(start_pfn, end_pfn);	return ret;}int remove_memory(u64 start, u64 size){	unsigned long start_pfn, end_pfn;	start_pfn = PFN_DOWN(start);	end_pfn = start_pfn + PFN_DOWN(size);	return offline_pages(start_pfn, end_pfn, 120 * HZ);}#elseint remove_memory(u64 start, u64 size){	return -EINVAL;}#endif /* CONFIG_MEMORY_HOTREMOVE */EXPORT_SYMBOL_GPL(remove_memory);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -