⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 snapshot.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 4 页
字号:
	if (!error)		set_bit(bit, addr);	return error;}static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn){	void *addr;	unsigned int bit;	int error;	error = memory_bm_find_bit(bm, pfn, &addr, &bit);	BUG_ON(error);	clear_bit(bit, addr);}static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn){	void *addr;	unsigned int bit;	int error;	error = memory_bm_find_bit(bm, pfn, &addr, &bit);	BUG_ON(error);	return test_bit(bit, addr);}/** *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is *	returned. * *	It is required to run memory_bm_position_reset() before the first call to *	this function. */static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm){	struct zone_bitmap *zone_bm;	struct bm_block *bb;	int bit;	do {		bb = bm->cur.block;		do {			bit = bm->cur.bit;			bit = find_next_bit(bb->data, bm_block_bits(bb), bit);			if (bit < bm_block_bits(bb))				goto Return_pfn;			bb = bb->next;			bm->cur.block = bb;			bm->cur.bit = 0;		} while (bb);		zone_bm = bm->cur.zone_bm->next;		if (zone_bm) {			bm->cur.zone_bm = zone_bm;			bm->cur.block = zone_bm->bm_blocks;			bm->cur.bit = 0;		}	} while (zone_bm);	memory_bm_position_reset(bm);	return BM_END_OF_MAP; Return_pfn:	bm->cur.bit = bit + 1;	return bb->start_pfn + bit;}/** *	This structure represents a range of page frames the contents of which *	should not be saved during the suspend. */struct nosave_region {	struct list_head list;	unsigned long start_pfn;	unsigned long end_pfn;};static LIST_HEAD(nosave_regions);/** *	register_nosave_region - register a range of page frames the contents *	of which should not be saved during the suspend (to be used in the early *	initialization code) */void __init__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,			 int use_kmalloc){	struct nosave_region *region;	if (start_pfn >= end_pfn)		return;	if (!list_empty(&nosave_regions)) {		/* Try to extend the previous region (they should be sorted) */		region = list_entry(nosave_regions.prev,					struct nosave_region, list);		if (region->end_pfn == start_pfn) {			region->end_pfn = end_pfn;			goto Report;		}	}	if (use_kmalloc) {		/* during init, this shouldn't fail */		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);		BUG_ON(!region);	} else		/* This allocation cannot fail */		region = alloc_bootmem_low(sizeof(struct nosave_region));	region->start_pfn = start_pfn;	region->end_pfn = end_pfn;	list_add_tail(&region->list, &nosave_regions); Report:	printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);}/* * Set bits in this map correspond to the page frames the contents of which * should not be saved during the suspend. */static struct memory_bitmap *forbidden_pages_map;/* Set bits in this map correspond to free page frames. */static struct memory_bitmap *free_pages_map;/* * Each page frame allocated for creating the image is marked by setting the * corresponding bits in forbidden_pages_map and free_pages_map simultaneously */void swsusp_set_page_free(struct page *page){	if (free_pages_map)		memory_bm_set_bit(free_pages_map, page_to_pfn(page));}static int swsusp_page_is_free(struct page *page){	return free_pages_map ?		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;}void swsusp_unset_page_free(struct page *page){	if (free_pages_map)		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));}static void swsusp_set_page_forbidden(struct page *page){	if (forbidden_pages_map)		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));}int swsusp_page_is_forbidden(struct page *page){	return forbidden_pages_map ?		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;}static void swsusp_unset_page_forbidden(struct page *page){	if (forbidden_pages_map)		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));}/** *	mark_nosave_pages - set bits corresponding to the page frames the *	contents of which should not be saved in a given bitmap. */static void mark_nosave_pages(struct memory_bitmap *bm){	struct nosave_region *region;	if (list_empty(&nosave_regions))		return;	list_for_each_entry(region, &nosave_regions, list) {		unsigned long pfn;		pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",				region->start_pfn << PAGE_SHIFT,				region->end_pfn << PAGE_SHIFT);		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)			if (pfn_valid(pfn)) {				/*				 * It is safe to ignore the result of				 * mem_bm_set_bit_check() here, since we won't				 * touch the PFNs for which the error is				 * returned anyway.				 */				mem_bm_set_bit_check(bm, pfn);			}	}}/** *	create_basic_memory_bitmaps - create bitmaps needed for marking page *	frames that should not be saved and free page frames.  The pointers *	forbidden_pages_map and free_pages_map are only modified if everything *	goes well, because we don't want the bits to be used before both bitmaps *	are set up. */int create_basic_memory_bitmaps(void){	struct memory_bitmap *bm1, *bm2;	int error = 0;	BUG_ON(forbidden_pages_map || free_pages_map);	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);	if (!bm1)		return -ENOMEM;	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);	if (error)		goto Free_first_object;	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);	if (!bm2)		goto Free_first_bitmap;	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);	if (error)		goto Free_second_object;	forbidden_pages_map = bm1;	free_pages_map = bm2;	mark_nosave_pages(forbidden_pages_map);	pr_debug("PM: Basic memory bitmaps created\n");	return 0; Free_second_object:	kfree(bm2); Free_first_bitmap: 	memory_bm_free(bm1, PG_UNSAFE_CLEAR); Free_first_object:	kfree(bm1);	return -ENOMEM;}/** *	free_basic_memory_bitmaps - free memory bitmaps allocated by *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary *	so that the bitmaps themselves are not referred to while they are being *	freed. */void free_basic_memory_bitmaps(void){	struct memory_bitmap *bm1, *bm2;	BUG_ON(!(forbidden_pages_map && free_pages_map));	bm1 = forbidden_pages_map;	bm2 = free_pages_map;	forbidden_pages_map = NULL;	free_pages_map = NULL;	memory_bm_free(bm1, PG_UNSAFE_CLEAR);	kfree(bm1);	memory_bm_free(bm2, PG_UNSAFE_CLEAR);	kfree(bm2);	pr_debug("PM: Basic memory bitmaps freed\n");}/** *	snapshot_additional_pages - estimate the number of additional pages *	be needed for setting up the suspend image data structures for given *	zone (usually the returned value is greater than the exact number) */unsigned int snapshot_additional_pages(struct zone *zone){	unsigned int res;	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);	return 2 * res;}#ifdef CONFIG_HIGHMEM/** *	count_free_highmem_pages - compute the total number of free highmem *	pages, system-wide. */static unsigned int count_free_highmem_pages(void){	struct zone *zone;	unsigned int cnt = 0;	for_each_zone(zone)		if (populated_zone(zone) && is_highmem(zone))			cnt += zone_page_state(zone, NR_FREE_PAGES);	return cnt;}/** *	saveable_highmem_page - Determine whether a highmem page should be *	included in the suspend image. * *	We should save the page if it isn't Nosave or NosaveFree, or Reserved, *	and it isn't a part of a free chunk of pages. */static struct page *saveable_highmem_page(unsigned long pfn){	struct page *page;	if (!pfn_valid(pfn))		return NULL;	page = pfn_to_page(pfn);	BUG_ON(!PageHighMem(page));	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||	    PageReserved(page))		return NULL;	return page;}/** *	count_highmem_pages - compute the total number of saveable highmem *	pages. */unsigned int count_highmem_pages(void){	struct zone *zone;	unsigned int n = 0;	for_each_zone(zone) {		unsigned long pfn, max_zone_pfn;		if (!is_highmem(zone))			continue;		mark_free_pages(zone);		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)			if (saveable_highmem_page(pfn))				n++;	}	return n;}#elsestatic inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }#endif /* CONFIG_HIGHMEM *//** *	saveable_page - Determine whether a non-highmem page should be included *	in the suspend image. * *	We should save the page if it isn't Nosave, and is not in the range *	of pages statically defined as 'unsaveable', and it isn't a part of *	a free chunk of pages. */static struct page *saveable_page(unsigned long pfn){	struct page *page;	if (!pfn_valid(pfn))		return NULL;	page = pfn_to_page(pfn);	BUG_ON(PageHighMem(page));	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))		return NULL;	if (PageReserved(page)	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))		return NULL;	return page;}/** *	count_data_pages - compute the total number of saveable non-highmem *	pages. */unsigned int count_data_pages(void){	struct zone *zone;	unsigned long pfn, max_zone_pfn;	unsigned int n = 0;	for_each_zone(zone) {		if (is_highmem(zone))			continue;		mark_free_pages(zone);		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)			if(saveable_page(pfn))				n++;	}	return n;}/* This is needed, because copy_page and memcpy are not usable for copying * task structs. */static inline void do_copy_page(long *dst, long *src){	int n;	for (n = PAGE_SIZE / sizeof(long); n; n--)		*dst++ = *src++;}/** *	safe_copy_page - check if the page we are going to copy is marked as *		present in the kernel page tables (this always is the case if *		CONFIG_DEBUG_PAGEALLOC is not set and in that case *		kernel_page_present() always returns 'true'). */static void safe_copy_page(void *dst, struct page *s_page){	if (kernel_page_present(s_page)) {		do_copy_page(dst, page_address(s_page));	} else {		kernel_map_pages(s_page, 1, 1);		do_copy_page(dst, page_address(s_page));		kernel_map_pages(s_page, 1, 0);	}}#ifdef CONFIG_HIGHMEMstatic inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn){	return is_highmem(zone) ?			saveable_highmem_page(pfn) : saveable_page(pfn);}static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn){	struct page *s_page, *d_page;	void *src, *dst;	s_page = pfn_to_page(src_pfn);	d_page = pfn_to_page(dst_pfn);	if (PageHighMem(s_page)) {		src = kmap_atomic(s_page, KM_USER0);		dst = kmap_atomic(d_page, KM_USER1);		do_copy_page(dst, src);		kunmap_atomic(src, KM_USER0);		kunmap_atomic(dst, KM_USER1);	} else {		if (PageHighMem(d_page)) {			/* Page pointed to by src may contain some kernel			 * data modified by kmap_atomic()			 */			safe_copy_page(buffer, s_page);			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);			memcpy(dst, buffer, PAGE_SIZE);			kunmap_atomic(dst, KM_USER0);		} else {			safe_copy_page(page_address(d_page), s_page);		}	}}#else#define page_is_saveable(zone, pfn)	saveable_page(pfn)static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn){	safe_copy_page(page_address(pfn_to_page(dst_pfn)),				pfn_to_page(src_pfn));}#endif /* CONFIG_HIGHMEM */static voidcopy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -