📄 snapshot.c
字号:
/* * linux/kernel/power/snapshot.c * * This file provides system snapshot/restore functionality for swsusp. * * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz> * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> * * This file is released under the GPLv2. * */#include <linux/version.h>#include <linux/module.h>#include <linux/mm.h>#include <linux/suspend.h>#include <linux/delay.h>#include <linux/bitops.h>#include <linux/spinlock.h>#include <linux/kernel.h>#include <linux/pm.h>#include <linux/device.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/syscalls.h>#include <linux/console.h>#include <linux/highmem.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/tlbflush.h>#include <asm/io.h>#include "power.h"static int swsusp_page_is_free(struct page *);static void swsusp_set_page_forbidden(struct page *);static void swsusp_unset_page_forbidden(struct page *);/* List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been * allocated by the "resume" kernel, so their contents cannot be written * directly to their "original" page frames. */struct pbe *restore_pblist;/* Pointer to an auxiliary buffer (1 page) */static void *buffer;/** * @safe_needed - on resume, for storing the PBE list and the image, * we can only use memory pages that do not conflict with the pages * used before suspend. The unsafe pages have PageNosaveFree set * and we count them using unsafe_pages. * * Each allocated image page is marked as PageNosave and PageNosaveFree * so that swsusp_free() can release it. */#define PG_ANY 0#define PG_SAFE 1#define PG_UNSAFE_CLEAR 1#define PG_UNSAFE_KEEP 0static unsigned int allocated_unsafe_pages;static void *get_image_page(gfp_t gfp_mask, int safe_needed){ void *res; res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) while (res && swsusp_page_is_free(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ swsusp_set_page_forbidden(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { swsusp_set_page_forbidden(virt_to_page(res)); swsusp_set_page_free(virt_to_page(res)); } return res;}unsigned long get_safe_page(gfp_t gfp_mask){ return (unsigned long)get_image_page(gfp_mask, PG_SAFE);}static struct page *alloc_image_page(gfp_t gfp_mask){ struct page *page; page = alloc_page(gfp_mask); if (page) { swsusp_set_page_forbidden(page); swsusp_set_page_free(page); } return page;}/** * free_image_page - free page represented by @addr, allocated with * get_image_page (page flags set by it must be cleared) */static inline void free_image_page(void *addr, int clear_nosave_free){ struct page *page; BUG_ON(!virt_addr_valid(addr)); page = virt_to_page(addr); swsusp_unset_page_forbidden(page); if (clear_nosave_free) swsusp_unset_page_free(page); __free_page(page);}/* struct linked_page is used to build chains of pages */#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))struct linked_page { struct linked_page *next; char data[LINKED_PAGE_DATA_SIZE];} __attribute__((packed));static inline voidfree_list_of_pages(struct linked_page *list, int clear_page_nosave){ while (list) { struct linked_page *lp = list->next; free_image_page(list, clear_page_nosave); list = lp; }}/** * struct chain_allocator is used for allocating small objects out of * a linked list of pages called 'the chain'. * * The chain grows each time when there is no room for a new object in * the current page. The allocated objects cannot be freed individually. * It is only possible to free them all at once, by freeing the entire * chain. * * NOTE: The chain allocator may be inefficient if the allocated objects * are not much smaller than PAGE_SIZE. */struct chain_allocator { struct linked_page *chain; /* the chain */ unsigned int used_space; /* total size of objects allocated out * of the current page */ gfp_t gfp_mask; /* mask for allocating pages */ int safe_needed; /* if set, only "safe" pages are allocated */};static voidchain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed){ ca->chain = NULL; ca->used_space = LINKED_PAGE_DATA_SIZE; ca->gfp_mask = gfp_mask; ca->safe_needed = safe_needed;}static void *chain_alloc(struct chain_allocator *ca, unsigned int size){ void *ret; if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { struct linked_page *lp; lp = get_image_page(ca->gfp_mask, ca->safe_needed); if (!lp) return NULL; lp->next = ca->chain; ca->chain = lp; ca->used_space = 0; } ret = ca->chain->data + ca->used_space; ca->used_space += size; return ret;}static void chain_free(struct chain_allocator *ca, int clear_page_nosave){ free_list_of_pages(ca->chain, clear_page_nosave); memset(ca, 0, sizeof(struct chain_allocator));}/** * Data types related to memory bitmaps. * * Memory bitmap is a structure consiting of many linked lists of * objects. The main list's elements are of type struct zone_bitmap * and each of them corresonds to one zone. For each zone bitmap * object there is a list of objects of type struct bm_block that * represent each blocks of bit chunks in which information is * stored. * * struct memory_bitmap contains a pointer to the main list of zone * bitmap objects, a struct bm_position used for browsing the bitmap, * and a pointer to the list of pages used for allocating all of the * zone bitmap objects and bitmap block objects. * * NOTE: It has to be possible to lay out the bitmap in memory * using only allocations of order 0. Additionally, the bitmap is * designed to work with arbitrary number of zones (this is over the * top for now, but let's avoid making unnecessary assumptions ;-). * * struct zone_bitmap contains a pointer to a list of bitmap block * objects and a pointer to the bitmap block object that has been * most recently used for setting bits. Additionally, it contains the * pfns that correspond to the start and end of the represented zone. * * struct bm_block contains a pointer to the memory page in which * information is stored (in the form of a block of bit chunks * of type unsigned long each). It also contains the pfns that * correspond to the start and end of the represented memory area and * the number of bit chunks in the block. */#define BM_END_OF_MAP (~0UL)#define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))#define BM_BITS_PER_CHUNK (sizeof(long) << 3)#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)struct bm_block { struct bm_block *next; /* next element of the list */ unsigned long start_pfn; /* pfn represented by the first bit */ unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ unsigned int size; /* number of bit chunks */ unsigned long *data; /* chunks of bits representing pages */};struct zone_bitmap { struct zone_bitmap *next; /* next element of the list */ unsigned long start_pfn; /* minimal pfn in this zone */ unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ struct bm_block *bm_blocks; /* list of bitmap blocks */ struct bm_block *cur_block; /* recently used bitmap block */};/* strcut bm_position is used for browsing memory bitmaps */struct bm_position { struct zone_bitmap *zone_bm; struct bm_block *block; int chunk; int bit;};struct memory_bitmap { struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ struct linked_page *p_list; /* list of pages used to store zone * bitmap objects and bitmap block * objects */ struct bm_position cur; /* most recently used bit position */};/* Functions that operate on memory bitmaps */static inline void memory_bm_reset_chunk(struct memory_bitmap *bm){ bm->cur.chunk = 0; bm->cur.bit = -1;}static void memory_bm_position_reset(struct memory_bitmap *bm){ struct zone_bitmap *zone_bm; zone_bm = bm->zone_bm_list; bm->cur.zone_bm = zone_bm; bm->cur.block = zone_bm->bm_blocks; memory_bm_reset_chunk(bm);}static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);/** * create_bm_block_list - create a list of block bitmap objects */static inline struct bm_block *create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca){ struct bm_block *bblist = NULL; while (nr_blocks-- > 0) { struct bm_block *bb; bb = chain_alloc(ca, sizeof(struct bm_block)); if (!bb) return NULL; bb->next = bblist; bblist = bb; } return bblist;}/** * create_zone_bm_list - create a list of zone bitmap objects */static inline struct zone_bitmap *create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca){ struct zone_bitmap *zbmlist = NULL; while (nr_zones-- > 0) { struct zone_bitmap *zbm; zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); if (!zbm) return NULL; zbm->next = zbmlist; zbmlist = zbm; } return zbmlist;}/** * memory_bm_create - allocate memory for a memory bitmap */static intmemory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed){ struct chain_allocator ca; struct zone *zone; struct zone_bitmap *zone_bm; struct bm_block *bb; unsigned int nr; chain_init(&ca, gfp_mask, safe_needed); /* Compute the number of zones */ nr = 0; for_each_zone(zone) if (populated_zone(zone)) nr++; /* Allocate the list of zones bitmap objects */ zone_bm = create_zone_bm_list(nr, &ca); bm->zone_bm_list = zone_bm; if (!zone_bm) { chain_free(&ca, PG_UNSAFE_CLEAR); return -ENOMEM; } /* Initialize the zone bitmap objects */ for_each_zone(zone) { unsigned long pfn; if (!populated_zone(zone)) continue; zone_bm->start_pfn = zone->zone_start_pfn; zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* Allocate the list of bitmap block objects */ nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); bb = create_bm_block_list(nr, &ca); zone_bm->bm_blocks = bb; zone_bm->cur_block = bb; if (!bb) goto Free; nr = zone->spanned_pages; pfn = zone->zone_start_pfn; /* Initialize the bitmap block objects */ while (bb) { unsigned long *ptr; ptr = get_image_page(gfp_mask, safe_needed); bb->data = ptr; if (!ptr) goto Free; bb->start_pfn = pfn; if (nr >= BM_BITS_PER_BLOCK) { pfn += BM_BITS_PER_BLOCK; bb->size = BM_CHUNKS_PER_BLOCK; nr -= BM_BITS_PER_BLOCK; } else { /* This is executed only once in the loop */ pfn += nr; bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); } bb->end_pfn = pfn; bb = bb->next; } zone_bm = zone_bm->next; } bm->p_list = ca.chain; memory_bm_position_reset(bm); return 0; Free: bm->p_list = ca.chain; memory_bm_free(bm, PG_UNSAFE_CLEAR); return -ENOMEM;}/** * memory_bm_free - free memory occupied by the memory bitmap @bm */static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free){ struct zone_bitmap *zone_bm; /* Free the list of bit blocks for each zone_bitmap object */ zone_bm = bm->zone_bm_list; while (zone_bm) { struct bm_block *bb; bb = zone_bm->bm_blocks; while (bb) { if (bb->data) free_image_page(bb->data, clear_nosave_free); bb = bb->next; } zone_bm = zone_bm->next; } free_list_of_pages(bm->p_list, clear_nosave_free); bm->zone_bm_list = NULL;}/** * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds * to given pfn. The cur_zone_bm member of @bm and the cur_block member * of @bm->cur_zone_bm are updated. */static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, void **addr, unsigned int *bit_nr){ struct zone_bitmap *zone_bm; struct bm_block *bb; /* Check if the pfn is from the current zone */ zone_bm = bm->cur.zone_bm; if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { zone_bm = bm->zone_bm_list; /* We don't assume that the zones are sorted by pfns */ while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { zone_bm = zone_bm->next; BUG_ON(!zone_bm); } bm->cur.zone_bm = zone_bm; } /* Check if the pfn corresponds to the current bitmap block */ bb = zone_bm->cur_block; if (pfn < bb->start_pfn) bb = zone_bm->bm_blocks; while (pfn >= bb->end_pfn) { bb = bb->next; BUG_ON(!bb); } zone_bm->cur_block = bb; pfn -= bb->start_pfn; *bit_nr = pfn % BM_BITS_PER_CHUNK; *addr = bb->data + pfn / BM_BITS_PER_CHUNK;}static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn){ void *addr; unsigned int bit; memory_bm_find_bit(bm, pfn, &addr, &bit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -