⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 snapshot.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 4 页
字号:
	set_bit(bit, addr);}static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn){	void *addr;	unsigned int bit;	memory_bm_find_bit(bm, pfn, &addr, &bit);	clear_bit(bit, addr);}static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn){	void *addr;	unsigned int bit;	memory_bm_find_bit(bm, pfn, &addr, &bit);	return test_bit(bit, addr);}/* Two auxiliary functions for memory_bm_next_pfn *//* Find the first set bit in the given chunk, if there is one */static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p){	bit++;	while (bit < BM_BITS_PER_CHUNK) {		if (test_bit(bit, chunk_p))			return bit;		bit++;	}	return -1;}/* Find a chunk containing some bits set in given block of bits */static inline int next_chunk_in_block(int n, struct bm_block *bb){	n++;	while (n < bb->size) {		if (bb->data[n])			return n;		n++;	}	return -1;}/** *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is *	returned. * *	It is required to run memory_bm_position_reset() before the first call to *	this function. */static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm){	struct zone_bitmap *zone_bm;	struct bm_block *bb;	int chunk;	int bit;	do {		bb = bm->cur.block;		do {			chunk = bm->cur.chunk;			bit = bm->cur.bit;			do {				bit = next_bit_in_chunk(bit, bb->data + chunk);				if (bit >= 0)					goto Return_pfn;				chunk = next_chunk_in_block(chunk, bb);				bit = -1;			} while (chunk >= 0);			bb = bb->next;			bm->cur.block = bb;			memory_bm_reset_chunk(bm);		} while (bb);		zone_bm = bm->cur.zone_bm->next;		if (zone_bm) {			bm->cur.zone_bm = zone_bm;			bm->cur.block = zone_bm->bm_blocks;			memory_bm_reset_chunk(bm);		}	} while (zone_bm);	memory_bm_position_reset(bm);	return BM_END_OF_MAP; Return_pfn:	bm->cur.chunk = chunk;	bm->cur.bit = bit;	return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;}/** *	This structure represents a range of page frames the contents of which *	should not be saved during the suspend. */struct nosave_region {	struct list_head list;	unsigned long start_pfn;	unsigned long end_pfn;};static LIST_HEAD(nosave_regions);/** *	register_nosave_region - register a range of page frames the contents *	of which should not be saved during the suspend (to be used in the early *	initialization code) */void __init__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,			 int use_kmalloc){	struct nosave_region *region;	if (start_pfn >= end_pfn)		return;	if (!list_empty(&nosave_regions)) {		/* Try to extend the previous region (they should be sorted) */		region = list_entry(nosave_regions.prev,					struct nosave_region, list);		if (region->end_pfn == start_pfn) {			region->end_pfn = end_pfn;			goto Report;		}	}	if (use_kmalloc) {		/* during init, this shouldn't fail */		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);		BUG_ON(!region);	} else		/* This allocation cannot fail */		region = alloc_bootmem_low(sizeof(struct nosave_region));	region->start_pfn = start_pfn;	region->end_pfn = end_pfn;	list_add_tail(&region->list, &nosave_regions); Report:	printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);}/* * Set bits in this map correspond to the page frames the contents of which * should not be saved during the suspend. */static struct memory_bitmap *forbidden_pages_map;/* Set bits in this map correspond to free page frames. */static struct memory_bitmap *free_pages_map;/* * Each page frame allocated for creating the image is marked by setting the * corresponding bits in forbidden_pages_map and free_pages_map simultaneously */void swsusp_set_page_free(struct page *page){	if (free_pages_map)		memory_bm_set_bit(free_pages_map, page_to_pfn(page));}static int swsusp_page_is_free(struct page *page){	return free_pages_map ?		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;}void swsusp_unset_page_free(struct page *page){	if (free_pages_map)		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));}static void swsusp_set_page_forbidden(struct page *page){	if (forbidden_pages_map)		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));}int swsusp_page_is_forbidden(struct page *page){	return forbidden_pages_map ?		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;}static void swsusp_unset_page_forbidden(struct page *page){	if (forbidden_pages_map)		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));}/** *	mark_nosave_pages - set bits corresponding to the page frames the *	contents of which should not be saved in a given bitmap. */static void mark_nosave_pages(struct memory_bitmap *bm){	struct nosave_region *region;	if (list_empty(&nosave_regions))		return;	list_for_each_entry(region, &nosave_regions, list) {		unsigned long pfn;		printk("swsusp: Marking nosave pages: %016lx - %016lx\n",				region->start_pfn << PAGE_SHIFT,				region->end_pfn << PAGE_SHIFT);		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)			memory_bm_set_bit(bm, pfn);	}}/** *	create_basic_memory_bitmaps - create bitmaps needed for marking page *	frames that should not be saved and free page frames.  The pointers *	forbidden_pages_map and free_pages_map are only modified if everything *	goes well, because we don't want the bits to be used before both bitmaps *	are set up. */int create_basic_memory_bitmaps(void){	struct memory_bitmap *bm1, *bm2;	int error = 0;	BUG_ON(forbidden_pages_map || free_pages_map);	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);	if (!bm1)		return -ENOMEM;	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);	if (error)		goto Free_first_object;	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);	if (!bm2)		goto Free_first_bitmap;	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);	if (error)		goto Free_second_object;	forbidden_pages_map = bm1;	free_pages_map = bm2;	mark_nosave_pages(forbidden_pages_map);	printk("swsusp: Basic memory bitmaps created\n");	return 0; Free_second_object:	kfree(bm2); Free_first_bitmap: 	memory_bm_free(bm1, PG_UNSAFE_CLEAR); Free_first_object:	kfree(bm1);	return -ENOMEM;}/** *	free_basic_memory_bitmaps - free memory bitmaps allocated by *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary *	so that the bitmaps themselves are not referred to while they are being *	freed. */void free_basic_memory_bitmaps(void){	struct memory_bitmap *bm1, *bm2;	BUG_ON(!(forbidden_pages_map && free_pages_map));	bm1 = forbidden_pages_map;	bm2 = free_pages_map;	forbidden_pages_map = NULL;	free_pages_map = NULL;	memory_bm_free(bm1, PG_UNSAFE_CLEAR);	kfree(bm1);	memory_bm_free(bm2, PG_UNSAFE_CLEAR);	kfree(bm2);	printk("swsusp: Basic memory bitmaps freed\n");}/** *	snapshot_additional_pages - estimate the number of additional pages *	be needed for setting up the suspend image data structures for given *	zone (usually the returned value is greater than the exact number) */unsigned int snapshot_additional_pages(struct zone *zone){	unsigned int res;	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);	return 2 * res;}#ifdef CONFIG_HIGHMEM/** *	count_free_highmem_pages - compute the total number of free highmem *	pages, system-wide. */static unsigned int count_free_highmem_pages(void){	struct zone *zone;	unsigned int cnt = 0;	for_each_zone(zone)		if (populated_zone(zone) && is_highmem(zone))			cnt += zone_page_state(zone, NR_FREE_PAGES);	return cnt;}/** *	saveable_highmem_page - Determine whether a highmem page should be *	included in the suspend image. * *	We should save the page if it isn't Nosave or NosaveFree, or Reserved, *	and it isn't a part of a free chunk of pages. */static struct page *saveable_highmem_page(unsigned long pfn){	struct page *page;	if (!pfn_valid(pfn))		return NULL;	page = pfn_to_page(pfn);	BUG_ON(!PageHighMem(page));	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||	    PageReserved(page))		return NULL;	return page;}/** *	count_highmem_pages - compute the total number of saveable highmem *	pages. */unsigned int count_highmem_pages(void){	struct zone *zone;	unsigned int n = 0;	for_each_zone(zone) {		unsigned long pfn, max_zone_pfn;		if (!is_highmem(zone))			continue;		mark_free_pages(zone);		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)			if (saveable_highmem_page(pfn))				n++;	}	return n;}#elsestatic inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }static inline unsigned int count_highmem_pages(void) { return 0; }#endif /* CONFIG_HIGHMEM *//** *	saveable - Determine whether a non-highmem page should be included in *	the suspend image. * *	We should save the page if it isn't Nosave, and is not in the range *	of pages statically defined as 'unsaveable', and it isn't a part of *	a free chunk of pages. */static struct page *saveable_page(unsigned long pfn){	struct page *page;	if (!pfn_valid(pfn))		return NULL;	page = pfn_to_page(pfn);	BUG_ON(PageHighMem(page));	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))		return NULL;	if (PageReserved(page) && pfn_is_nosave(pfn))		return NULL;	return page;}/** *	count_data_pages - compute the total number of saveable non-highmem *	pages. */unsigned int count_data_pages(void){	struct zone *zone;	unsigned long pfn, max_zone_pfn;	unsigned int n = 0;	for_each_zone(zone) {		if (is_highmem(zone))			continue;		mark_free_pages(zone);		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)			if(saveable_page(pfn))				n++;	}	return n;}/* This is needed, because copy_page and memcpy are not usable for copying * task structs. */static inline void do_copy_page(long *dst, long *src){	int n;	for (n = PAGE_SIZE / sizeof(long); n; n--)		*dst++ = *src++;}#ifdef CONFIG_HIGHMEMstatic inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn){	return is_highmem(zone) ?			saveable_highmem_page(pfn) : saveable_page(pfn);}static inline voidcopy_data_page(unsigned long dst_pfn, unsigned long src_pfn){	struct page *s_page, *d_page;	void *src, *dst;	s_page = pfn_to_page(src_pfn);	d_page = pfn_to_page(dst_pfn);	if (PageHighMem(s_page)) {		src = kmap_atomic(s_page, KM_USER0);		dst = kmap_atomic(d_page, KM_USER1);		do_copy_page(dst, src);		kunmap_atomic(src, KM_USER0);		kunmap_atomic(dst, KM_USER1);	} else {		src = page_address(s_page);		if (PageHighMem(d_page)) {			/* Page pointed to by src may contain some kernel			 * data modified by kmap_atomic()			 */			do_copy_page(buffer, src);			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);			memcpy(dst, buffer, PAGE_SIZE);			kunmap_atomic(dst, KM_USER0);		} else {			dst = page_address(d_page);			do_copy_page(dst, src);		}	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -