summaryrefslogtreecommitdiffstats
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-06 17:43:46 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-09 19:37:26 -0400
commitef96f639ea663474c4e1c57bd64e118ffbb92be4 (patch)
tree8019aaf33045712cabed16ae607ee33a553773d4 /kernel/power/snapshot.c
parentefd5a85242e996275ebf3df71013beabd723bda3 (diff)
PM / hibernate: Clean up comments in snapshot.c
Many comments in kernel/power/snapshot.c do not follow the general comment formatting rules. They look odd, some of them are outdated too, some are hard to parse and generally difficult to understand. Clean them up to make them easier to comprehend. No functional changes. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c636
1 files changed, 330 insertions, 306 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1fe0ddb6fd0d..bd927d9efeb7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -67,7 +67,8 @@ void __init hibernate_image_size_init(void)
67 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; 67 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
68} 68}
69 69
70/* List of PBEs needed for restoring the pages that were allocated before 70/*
71 * List of PBEs needed for restoring the pages that were allocated before
71 * the suspend and included in the suspend image, but have also been 72 * the suspend and included in the suspend image, but have also been
72 * allocated by the "resume" kernel, so their contents cannot be written 73 * allocated by the "resume" kernel, so their contents cannot be written
73 * directly to their "original" page frames. 74 * directly to their "original" page frames.
@@ -93,16 +94,6 @@ static struct linked_page *safe_pages_list;
93/* Pointer to an auxiliary buffer (1 page) */ 94/* Pointer to an auxiliary buffer (1 page) */
94static void *buffer; 95static void *buffer;
95 96
96/**
97 * @safe_needed - on resume, for storing the PBE list and the image,
98 * we can only use memory pages that do not conflict with the pages
99 * used before suspend. The unsafe pages have PageNosaveFree set
100 * and we count them using unsafe_pages.
101 *
102 * Each allocated image page is marked as PageNosave and PageNosaveFree
103 * so that swsusp_free() can release it.
104 */
105
106#define PG_ANY 0 97#define PG_ANY 0
107#define PG_SAFE 1 98#define PG_SAFE 1
108#define PG_UNSAFE_CLEAR 1 99#define PG_UNSAFE_CLEAR 1
@@ -110,6 +101,19 @@ static void *buffer;
110 101
111static unsigned int allocated_unsafe_pages; 102static unsigned int allocated_unsafe_pages;
112 103
104/**
105 * get_image_page - Allocate a page for a hibernation image.
106 * @gfp_mask: GFP mask for the allocation.
107 * @safe_needed: Get pages that were not used before hibernation (restore only)
108 *
109 * During image restoration, for storing the PBE list and the image data, we can
110 * only use memory pages that do not conflict with the pages used before
111 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
112 * using allocated_unsafe_pages.
113 *
114 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
115 * swsusp_free() can release it.
116 */
113static void *get_image_page(gfp_t gfp_mask, int safe_needed) 117static void *get_image_page(gfp_t gfp_mask, int safe_needed)
114{ 118{
115 void *res; 119 void *res;
@@ -167,10 +171,13 @@ static void recycle_safe_page(void *page_address)
167} 171}
168 172
169/** 173/**
170 * free_image_page - free page represented by @addr, allocated with 174 * free_image_page - Free a page allocated for hibernation image.
171 * get_image_page (page flags set by it must be cleared) 175 * @addr: Address of the page to free.
176 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
177 *
178 * The page to free should have been allocated by get_image_page() (page flags
179 * set by it are affected).
172 */ 180 */
173
174static inline void free_image_page(void *addr, int clear_nosave_free) 181static inline void free_image_page(void *addr, int clear_nosave_free)
175{ 182{
176 struct page *page; 183 struct page *page;
@@ -197,24 +204,22 @@ static inline void free_list_of_pages(struct linked_page *list,
197 } 204 }
198} 205}
199 206
200/** 207/*
201 * struct chain_allocator is used for allocating small objects out of 208 * struct chain_allocator is used for allocating small objects out of
202 * a linked list of pages called 'the chain'. 209 * a linked list of pages called 'the chain'.
203 * 210 *
204 * The chain grows each time when there is no room for a new object in 211 * The chain grows each time when there is no room for a new object in
205 * the current page. The allocated objects cannot be freed individually. 212 * the current page. The allocated objects cannot be freed individually.
206 * It is only possible to free them all at once, by freeing the entire 213 * It is only possible to free them all at once, by freeing the entire
207 * chain. 214 * chain.
208 * 215 *
209 * NOTE: The chain allocator may be inefficient if the allocated objects 216 * NOTE: The chain allocator may be inefficient if the allocated objects
210 * are not much smaller than PAGE_SIZE. 217 * are not much smaller than PAGE_SIZE.
211 */ 218 */
212
213struct chain_allocator { 219struct chain_allocator {
214 struct linked_page *chain; /* the chain */ 220 struct linked_page *chain; /* the chain */
215 unsigned int used_space; /* total size of objects allocated out 221 unsigned int used_space; /* total size of objects allocated out
216 * of the current page 222 of the current page */
217 */
218 gfp_t gfp_mask; /* mask for allocating pages */ 223 gfp_t gfp_mask; /* mask for allocating pages */
219 int safe_needed; /* if set, only "safe" pages are allocated */ 224 int safe_needed; /* if set, only "safe" pages are allocated */
220}; 225};
@@ -250,44 +255,44 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
250} 255}
251 256
252/** 257/**
253 * Data types related to memory bitmaps. 258 * Data types related to memory bitmaps.
254 * 259 *
255 * Memory bitmap is a structure consiting of many linked lists of 260 * Memory bitmap is a structure consiting of many linked lists of
256 * objects. The main list's elements are of type struct zone_bitmap 261 * objects. The main list's elements are of type struct zone_bitmap
257 * and each of them corresonds to one zone. For each zone bitmap 262 * and each of them corresonds to one zone. For each zone bitmap
258 * object there is a list of objects of type struct bm_block that 263 * object there is a list of objects of type struct bm_block that
259 * represent each blocks of bitmap in which information is stored. 264 * represent each blocks of bitmap in which information is stored.
260 * 265 *
261 * struct memory_bitmap contains a pointer to the main list of zone 266 * struct memory_bitmap contains a pointer to the main list of zone
262 * bitmap objects, a struct bm_position used for browsing the bitmap, 267 * bitmap objects, a struct bm_position used for browsing the bitmap,
263 * and a pointer to the list of pages used for allocating all of the 268 * and a pointer to the list of pages used for allocating all of the
264 * zone bitmap objects and bitmap block objects. 269 * zone bitmap objects and bitmap block objects.
265 * 270 *
266 * NOTE: It has to be possible to lay out the bitmap in memory 271 * NOTE: It has to be possible to lay out the bitmap in memory
267 * using only allocations of order 0. Additionally, the bitmap is 272 * using only allocations of order 0. Additionally, the bitmap is
268 * designed to work with arbitrary number of zones (this is over the 273 * designed to work with arbitrary number of zones (this is over the
269 * top for now, but let's avoid making unnecessary assumptions ;-). 274 * top for now, but let's avoid making unnecessary assumptions ;-).
270 * 275 *
271 * struct zone_bitmap contains a pointer to a list of bitmap block 276 * struct zone_bitmap contains a pointer to a list of bitmap block
272 * objects and a pointer to the bitmap block object that has been 277 * objects and a pointer to the bitmap block object that has been
273 * most recently used for setting bits. Additionally, it contains the 278 * most recently used for setting bits. Additionally, it contains the
274 * pfns that correspond to the start and end of the represented zone. 279 * PFNs that correspond to the start and end of the represented zone.
275 * 280 *
276 * struct bm_block contains a pointer to the memory page in which 281 * struct bm_block contains a pointer to the memory page in which
277 * information is stored (in the form of a block of bitmap) 282 * information is stored (in the form of a block of bitmap)
278 * It also contains the pfns that correspond to the start and end of 283 * It also contains the pfns that correspond to the start and end of
279 * the represented memory area. 284 * the represented memory area.
280 * 285 *
281 * The memory bitmap is organized as a radix tree to guarantee fast random 286 * The memory bitmap is organized as a radix tree to guarantee fast random
282 * access to the bits. There is one radix tree for each zone (as returned 287 * access to the bits. There is one radix tree for each zone (as returned
283 * from create_mem_extents). 288 * from create_mem_extents).
284 * 289 *
285 * One radix tree is represented by one struct mem_zone_bm_rtree. There are 290 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
286 * two linked lists for the nodes of the tree, one for the inner nodes and 291 * two linked lists for the nodes of the tree, one for the inner nodes and
287 * one for the leave nodes. The linked leave nodes are used for fast linear 292 * one for the leave nodes. The linked leave nodes are used for fast linear
288 * access of the memory bitmap. 293 * access of the memory bitmap.
289 * 294 *
290 * The struct rtree_node represents one node of the radix tree. 295 * The struct rtree_node represents one node of the radix tree.
291 */ 296 */
292 297
293#define BM_END_OF_MAP (~0UL) 298#define BM_END_OF_MAP (~0UL)
@@ -333,9 +338,8 @@ struct bm_position {
333struct memory_bitmap { 338struct memory_bitmap {
334 struct list_head zones; 339 struct list_head zones;
335 struct linked_page *p_list; /* list of pages used to store zone 340 struct linked_page *p_list; /* list of pages used to store zone
336 * bitmap objects and bitmap block 341 bitmap objects and bitmap block
337 * objects 342 objects */
338 */
339 struct bm_position cur; /* most recently used bit position */ 343 struct bm_position cur; /* most recently used bit position */
340}; 344};
341 345
@@ -349,12 +353,12 @@ struct memory_bitmap {
349#endif 353#endif
350#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) 354#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
351 355
352/* 356/**
353 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 357 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
354 * 358 *
355 * This function is used to allocate inner nodes as well as the 359 * This function is used to allocate inner nodes as well as the
356 * leave nodes of the radix tree. It also adds the node to the 360 * leave nodes of the radix tree. It also adds the node to the
357 * corresponding linked list passed in by the *list parameter. 361 * corresponding linked list passed in by the *list parameter.
358 */ 362 */
359static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, 363static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
360 struct chain_allocator *ca, 364 struct chain_allocator *ca,
@@ -375,12 +379,12 @@ static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
375 return node; 379 return node;
376} 380}
377 381
378/* 382/**
379 * add_rtree_block - Add a new leave node to the radix tree 383 * add_rtree_block - Add a new leave node to the radix tree.
380 * 384 *
381 * The leave nodes need to be allocated in order to keep the leaves 385 * The leave nodes need to be allocated in order to keep the leaves
382 * linked list in order. This is guaranteed by the zone->blocks 386 * linked list in order. This is guaranteed by the zone->blocks
383 * counter. 387 * counter.
384 */ 388 */
385static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, 389static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
386 int safe_needed, struct chain_allocator *ca) 390 int safe_needed, struct chain_allocator *ca)
@@ -445,12 +449,12 @@ static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
445static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 449static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
446 int clear_nosave_free); 450 int clear_nosave_free);
447 451
448/* 452/**
449 * create_zone_bm_rtree - create a radix tree for one zone 453 * create_zone_bm_rtree - Create a radix tree for one zone.
450 * 454 *
451 * Allocated the mem_zone_bm_rtree structure and initializes it. 455 * Allocated the mem_zone_bm_rtree structure and initializes it.
452 * This function also allocated and builds the radix tree for the 456 * This function also allocated and builds the radix tree for the
453 * zone. 457 * zone.
454 */ 458 */
455static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, 459static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
456 int safe_needed, 460 int safe_needed,
@@ -483,12 +487,12 @@ static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
483 return zone; 487 return zone;
484} 488}
485 489
486/* 490/**
487 * free_zone_bm_rtree - Free the memory of the radix tree 491 * free_zone_bm_rtree - Free the memory of the radix tree.
488 * 492 *
489 * Free all node pages of the radix tree. The mem_zone_bm_rtree 493 * Free all node pages of the radix tree. The mem_zone_bm_rtree
490 * structure itself is not freed here nor are the rtree_node 494 * structure itself is not freed here nor are the rtree_node
491 * structs. 495 * structs.
492 */ 496 */
493static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 497static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
494 int clear_nosave_free) 498 int clear_nosave_free)
@@ -521,8 +525,8 @@ struct mem_extent {
521}; 525};
522 526
523/** 527/**
524 * free_mem_extents - free a list of memory extents 528 * free_mem_extents - Free a list of memory extents.
525 * @list - list of extents to empty 529 * @list: List of extents to free.
526 */ 530 */
527static void free_mem_extents(struct list_head *list) 531static void free_mem_extents(struct list_head *list)
528{ 532{
@@ -535,10 +539,11 @@ static void free_mem_extents(struct list_head *list)
535} 539}
536 540
537/** 541/**
538 * create_mem_extents - create a list of memory extents representing 542 * create_mem_extents - Create a list of memory extents.
539 * contiguous ranges of PFNs 543 * @list: List to put the extents into.
540 * @list - list to put the extents into 544 * @gfp_mask: Mask to use for memory allocations.
541 * @gfp_mask - mask to use for memory allocations 545 *
546 * The extents represent contiguous ranges of PFNs.
542 */ 547 */
543static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 548static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
544{ 549{
@@ -594,8 +599,8 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
594} 599}
595 600
596/** 601/**
597 * memory_bm_create - allocate memory for a memory bitmap 602 * memory_bm_create - Allocate memory for a memory bitmap.
598 */ 603 */
599static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, 604static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
600 int safe_needed) 605 int safe_needed)
601{ 606{
@@ -636,8 +641,9 @@ static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
636} 641}
637 642
638/** 643/**
639 * memory_bm_free - free memory occupied by the memory bitmap @bm 644 * memory_bm_free - Free memory occupied by the memory bitmap.
640 */ 645 * @bm: Memory bitmap.
646 */
641static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 647static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
642{ 648{
643 struct mem_zone_bm_rtree *zone; 649 struct mem_zone_bm_rtree *zone;
@@ -651,14 +657,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
651} 657}
652 658
653/** 659/**
654 * memory_bm_find_bit - Find the bit for pfn in the memory 660 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
655 * bitmap
656 * 661 *
657 * Find the bit in the bitmap @bm that corresponds to given pfn. 662 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
658 * The cur.zone, cur.block and cur.node_pfn member of @bm are 663 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
659 * updated. 664 *
660 * It walks the radix tree to find the page which contains the bit for 665 * Walk the radix tree to find the page containing the bit that represents @pfn
661 * pfn and returns the bit position in **addr and *bit_nr. 666 * and return the position of the bit in @addr and @bit_nr.
662 */ 667 */
663static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 668static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
664 void **addr, unsigned int *bit_nr) 669 void **addr, unsigned int *bit_nr)
@@ -687,10 +692,9 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
687 692
688zone_found: 693zone_found:
689 /* 694 /*
690 * We have a zone. Now walk the radix tree to find the leave 695 * We have found the zone. Now walk the radix tree to find the leaf node
691 * node for our pfn. 696 * for our PFN.
692 */ 697 */
693
694 node = bm->cur.node; 698 node = bm->cur.node;
695 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) 699 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
696 goto node_found; 700 goto node_found;
@@ -783,14 +787,14 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
783} 787}
784 788
785/* 789/*
786 * rtree_next_node - Jumps to the next leave node 790 * rtree_next_node - Jump to the next leaf node.
787 * 791 *
788 * Sets the position to the beginning of the next node in the 792 * Set the position to the beginning of the next node in the
789 * memory bitmap. This is either the next node in the current 793 * memory bitmap. This is either the next node in the current
790 * zone's radix tree or the first node in the radix tree of the 794 * zone's radix tree or the first node in the radix tree of the
791 * next zone. 795 * next zone.
792 * 796 *
793 * Returns true if there is a next node, false otherwise. 797 * Return true if there is a next node, false otherwise.
794 */ 798 */
795static bool rtree_next_node(struct memory_bitmap *bm) 799static bool rtree_next_node(struct memory_bitmap *bm)
796{ 800{
@@ -819,14 +823,15 @@ static bool rtree_next_node(struct memory_bitmap *bm)
819} 823}
820 824
821/** 825/**
822 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm 826 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
827 * @bm: Memory bitmap.
823 * 828 *
824 * Starting from the last returned position this function searches 829 * Starting from the last returned position this function searches for the next
825 * for the next set bit in the memory bitmap and returns its 830 * set bit in @bm and returns the PFN represented by it. If no more bits are
826 * number. If no more bit is set BM_END_OF_MAP is returned. 831 * set, BM_END_OF_MAP is returned.
827 * 832 *
828 * It is required to run memory_bm_position_reset() before the 833 * It is required to run memory_bm_position_reset() before the first call to
829 * first call to this function. 834 * this function for the given memory bitmap.
830 */ 835 */
831static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 836static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
832{ 837{
@@ -848,11 +853,10 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
848 return BM_END_OF_MAP; 853 return BM_END_OF_MAP;
849} 854}
850 855
851/** 856/*
852 * This structure represents a range of page frames the contents of which 857 * This structure represents a range of page frames the contents of which
853 * should not be saved during the suspend. 858 * should not be saved during hibernation.
854 */ 859 */
855
856struct nosave_region { 860struct nosave_region {
857 struct list_head list; 861 struct list_head list;
858 unsigned long start_pfn; 862 unsigned long start_pfn;
@@ -890,11 +894,11 @@ static void memory_bm_recycle(struct memory_bitmap *bm)
890} 894}
891 895
892/** 896/**
893 * register_nosave_region - register a range of page frames the contents 897 * register_nosave_region - Register a region of unsaveable memory.
894 * of which should not be saved during the suspend (to be used in the early 898 *
895 * initialization code) 899 * Register a range of page frames the contents of which should not be saved
900 * during hibernation (to be used in the early initialization code).
896 */ 901 */
897
898void __init __register_nosave_region(unsigned long start_pfn, 902void __init __register_nosave_region(unsigned long start_pfn,
899 unsigned long end_pfn, int use_kmalloc) 903 unsigned long end_pfn, int use_kmalloc)
900{ 904{
@@ -913,7 +917,7 @@ void __init __register_nosave_region(unsigned long start_pfn,
913 } 917 }
914 } 918 }
915 if (use_kmalloc) { 919 if (use_kmalloc) {
916 /* during init, this shouldn't fail */ 920 /* During init, this shouldn't fail */
917 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); 921 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
918 BUG_ON(!region); 922 BUG_ON(!region);
919 } else 923 } else
@@ -979,10 +983,12 @@ static void swsusp_unset_page_forbidden(struct page *page)
979} 983}
980 984
981/** 985/**
982 * mark_nosave_pages - set bits corresponding to the page frames the 986 * mark_nosave_pages - Mark pages that should not be saved.
983 * contents of which should not be saved in a given bitmap. 987 * @bm: Memory bitmap.
988 *
989 * Set the bits in @bm that correspond to the page frames the contents of which
990 * should not be saved.
984 */ 991 */
985
986static void mark_nosave_pages(struct memory_bitmap *bm) 992static void mark_nosave_pages(struct memory_bitmap *bm)
987{ 993{
988 struct nosave_region *region; 994 struct nosave_region *region;
@@ -1012,13 +1018,13 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
1012} 1018}
1013 1019
1014/** 1020/**
1015 * create_basic_memory_bitmaps - create bitmaps needed for marking page 1021 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1016 * frames that should not be saved and free page frames. The pointers 1022 *
1017 * forbidden_pages_map and free_pages_map are only modified if everything 1023 * Create bitmaps needed for marking page frames that should not be saved and
1018 * goes well, because we don't want the bits to be used before both bitmaps 1024 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1019 * are set up. 1025 * only modified if everything goes well, because we don't want the bits to be
1026 * touched before both bitmaps are set up.
1020 */ 1027 */
1021
1022int create_basic_memory_bitmaps(void) 1028int create_basic_memory_bitmaps(void)
1023{ 1029{
1024 struct memory_bitmap *bm1, *bm2; 1030 struct memory_bitmap *bm1, *bm2;
@@ -1063,12 +1069,12 @@ int create_basic_memory_bitmaps(void)
1063} 1069}
1064 1070
1065/** 1071/**
1066 * free_basic_memory_bitmaps - free memory bitmaps allocated by 1072 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1067 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary 1073 *
1068 * so that the bitmaps themselves are not referred to while they are being 1074 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1069 * freed. 1075 * auxiliary pointers are necessary so that the bitmaps themselves are not
1076 * referred to while they are being freed.
1070 */ 1077 */
1071
1072void free_basic_memory_bitmaps(void) 1078void free_basic_memory_bitmaps(void)
1073{ 1079{
1074 struct memory_bitmap *bm1, *bm2; 1080 struct memory_bitmap *bm1, *bm2;
@@ -1089,11 +1095,13 @@ void free_basic_memory_bitmaps(void)
1089} 1095}
1090 1096
1091/** 1097/**
1092 * snapshot_additional_pages - estimate the number of additional pages 1098 * snapshot_additional_pages - Estimate the number of extra pages needed.
1093 * be needed for setting up the suspend image data structures for given 1099 * @zone: Memory zone to carry out the computation for.
1094 * zone (usually the returned value is greater than the exact number) 1100 *
1101 * Estimate the number of additional pages needed for setting up a hibernation
1102 * image data structures for @zone (usually, the returned value is greater than
1103 * the exact number).
1095 */ 1104 */
1096
1097unsigned int snapshot_additional_pages(struct zone *zone) 1105unsigned int snapshot_additional_pages(struct zone *zone)
1098{ 1106{
1099 unsigned int rtree, nodes; 1107 unsigned int rtree, nodes;
@@ -1111,10 +1119,10 @@ unsigned int snapshot_additional_pages(struct zone *zone)
1111 1119
1112#ifdef CONFIG_HIGHMEM 1120#ifdef CONFIG_HIGHMEM
1113/** 1121/**
1114 * count_free_highmem_pages - compute the total number of free highmem 1122 * count_free_highmem_pages - Compute the total number of free highmem pages.
1115 * pages, system-wide. 1123 *
1124 * The returned number is system-wide.
1116 */ 1125 */
1117
1118static unsigned int count_free_highmem_pages(void) 1126static unsigned int count_free_highmem_pages(void)
1119{ 1127{
1120 struct zone *zone; 1128 struct zone *zone;
@@ -1128,11 +1136,12 @@ static unsigned int count_free_highmem_pages(void)
1128} 1136}
1129 1137
1130/** 1138/**
1131 * saveable_highmem_page - Determine whether a highmem page should be 1139 * saveable_highmem_page - Check if a highmem page is saveable.
1132 * included in the suspend image. 1140 *
1141 * Determine whether a highmem page should be included in a hibernation image.
1133 * 1142 *
1134 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 1143 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1135 * and it isn't a part of a free chunk of pages. 1144 * and it isn't part of a free chunk of pages.
1136 */ 1145 */
1137static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 1146static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1138{ 1147{
@@ -1158,10 +1167,8 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1158} 1167}
1159 1168
1160/** 1169/**
1161 * count_highmem_pages - compute the total number of saveable highmem 1170 * count_highmem_pages - Compute the total number of saveable highmem pages.
1162 * pages.
1163 */ 1171 */
1164
1165static unsigned int count_highmem_pages(void) 1172static unsigned int count_highmem_pages(void)
1166{ 1173{
1167 struct zone *zone; 1174 struct zone *zone;
@@ -1189,12 +1196,14 @@ static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1189#endif /* CONFIG_HIGHMEM */ 1196#endif /* CONFIG_HIGHMEM */
1190 1197
1191/** 1198/**
1192 * saveable_page - Determine whether a non-highmem page should be included 1199 * saveable_page - Check if the given page is saveable.
1193 * in the suspend image. 1200 *
1201 * Determine whether a non-highmem page should be included in a hibernation
1202 * image.
1194 * 1203 *
1195 * We should save the page if it isn't Nosave, and is not in the range 1204 * We should save the page if it isn't Nosave, and is not in the range
1196 * of pages statically defined as 'unsaveable', and it isn't a part of 1205 * of pages statically defined as 'unsaveable', and it isn't part of
1197 * a free chunk of pages. 1206 * a free chunk of pages.
1198 */ 1207 */
1199static struct page *saveable_page(struct zone *zone, unsigned long pfn) 1208static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1200{ 1209{
@@ -1223,10 +1232,8 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1223} 1232}
1224 1233
1225/** 1234/**
1226 * count_data_pages - compute the total number of saveable non-highmem 1235 * count_data_pages - Compute the total number of saveable non-highmem pages.
1227 * pages.
1228 */ 1236 */
1229
1230static unsigned int count_data_pages(void) 1237static unsigned int count_data_pages(void)
1231{ 1238{
1232 struct zone *zone; 1239 struct zone *zone;
@@ -1246,7 +1253,8 @@ static unsigned int count_data_pages(void)
1246 return n; 1253 return n;
1247} 1254}
1248 1255
1249/* This is needed, because copy_page and memcpy are not usable for copying 1256/*
1257 * This is needed, because copy_page and memcpy are not usable for copying
1250 * task structs. 1258 * task structs.
1251 */ 1259 */
1252static inline void do_copy_page(long *dst, long *src) 1260static inline void do_copy_page(long *dst, long *src)
@@ -1257,12 +1265,12 @@ static inline void do_copy_page(long *dst, long *src)
1257 *dst++ = *src++; 1265 *dst++ = *src++;
1258} 1266}
1259 1267
1260
1261/** 1268/**
1262 * safe_copy_page - check if the page we are going to copy is marked as 1269 * safe_copy_page - Copy a page in a safe way.
1263 * present in the kernel page tables (this always is the case if 1270 *
1264 * CONFIG_DEBUG_PAGEALLOC is not set and in that case 1271 * Check if the page we are going to copy is marked as present in the kernel
1265 * kernel_page_present() always returns 'true'). 1272 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1273 * and in that case kernel_page_present() always returns 'true').
1266 */ 1274 */
1267static void safe_copy_page(void *dst, struct page *s_page) 1275static void safe_copy_page(void *dst, struct page *s_page)
1268{ 1276{
@@ -1275,7 +1283,6 @@ static void safe_copy_page(void *dst, struct page *s_page)
1275 } 1283 }
1276} 1284}
1277 1285
1278
1279#ifdef CONFIG_HIGHMEM 1286#ifdef CONFIG_HIGHMEM
1280static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) 1287static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1281{ 1288{
@@ -1298,7 +1305,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1298 kunmap_atomic(src); 1305 kunmap_atomic(src);
1299 } else { 1306 } else {
1300 if (PageHighMem(d_page)) { 1307 if (PageHighMem(d_page)) {
1301 /* Page pointed to by src may contain some kernel 1308 /*
1309 * The page pointed to by src may contain some kernel
1302 * data modified by kmap_atomic() 1310 * data modified by kmap_atomic()
1303 */ 1311 */
1304 safe_copy_page(buffer, s_page); 1312 safe_copy_page(buffer, s_page);
@@ -1370,12 +1378,11 @@ static struct memory_bitmap orig_bm;
1370static struct memory_bitmap copy_bm; 1378static struct memory_bitmap copy_bm;
1371 1379
1372/** 1380/**
1373 * swsusp_free - free pages allocated for the suspend. 1381 * swsusp_free - Free pages allocated for hibernation image.
1374 * 1382 *
1375 * Suspend pages are alocated before the atomic copy is made, so we 1383 * Image pages are alocated before snapshot creation, so they need to be
1376 * need to release them after the resume. 1384 * released after resume.
1377 */ 1385 */
1378
1379void swsusp_free(void) 1386void swsusp_free(void)
1380{ 1387{
1381 unsigned long fb_pfn, fr_pfn; 1388 unsigned long fb_pfn, fr_pfn;
@@ -1424,7 +1431,7 @@ out:
1424#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1431#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1425 1432
1426/** 1433/**
1427 * preallocate_image_pages - Allocate a number of pages for hibernation image 1434 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1428 * @nr_pages: Number of page frames to allocate. 1435 * @nr_pages: Number of page frames to allocate.
1429 * @mask: GFP flags to use for the allocation. 1436 * @mask: GFP flags to use for the allocation.
1430 * 1437 *
@@ -1474,7 +1481,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1474} 1481}
1475 1482
1476/** 1483/**
1477 * __fraction - Compute (an approximation of) x * (multiplier / base) 1484 * __fraction - Compute (an approximation of) x * (multiplier / base).
1478 */ 1485 */
1479static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1486static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1480{ 1487{
@@ -1506,7 +1513,7 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1506#endif /* CONFIG_HIGHMEM */ 1513#endif /* CONFIG_HIGHMEM */
1507 1514
1508/** 1515/**
1509 * free_unnecessary_pages - Release preallocated pages not needed for the image 1516 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1510 */ 1517 */
1511static unsigned long free_unnecessary_pages(void) 1518static unsigned long free_unnecessary_pages(void)
1512{ 1519{
@@ -1560,7 +1567,7 @@ static unsigned long free_unnecessary_pages(void)
1560} 1567}
1561 1568
1562/** 1569/**
1563 * minimum_image_size - Estimate the minimum acceptable size of an image 1570 * minimum_image_size - Estimate the minimum acceptable size of an image.
1564 * @saveable: Number of saveable pages in the system. 1571 * @saveable: Number of saveable pages in the system.
1565 * 1572 *
1566 * We want to avoid attempting to free too much memory too hard, so estimate the 1573 * We want to avoid attempting to free too much memory too hard, so estimate the
@@ -1590,7 +1597,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
1590} 1597}
1591 1598
1592/** 1599/**
1593 * hibernate_preallocate_memory - Preallocate memory for hibernation image 1600 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1594 * 1601 *
1595 * To create a hibernation image it is necessary to make a copy of every page 1602 * To create a hibernation image it is necessary to make a copy of every page
1596 * frame in use. We also need a number of page frames to be free during 1603 * frame in use. We also need a number of page frames to be free during
@@ -1763,10 +1770,11 @@ int hibernate_preallocate_memory(void)
1763 1770
1764#ifdef CONFIG_HIGHMEM 1771#ifdef CONFIG_HIGHMEM
1765/** 1772/**
1766 * count_pages_for_highmem - compute the number of non-highmem pages 1773 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1767 * that will be necessary for creating copies of highmem pages. 1774 *
1768 */ 1775 * Compute the number of non-highmem pages that will be necessary for creating
1769 1776 * copies of highmem pages.
1777 */
1770static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1778static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1771{ 1779{
1772 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1780 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
@@ -1783,10 +1791,8 @@ static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0;
1783#endif /* CONFIG_HIGHMEM */ 1791#endif /* CONFIG_HIGHMEM */
1784 1792
1785/** 1793/**
1786 * enough_free_mem - Make sure we have enough free memory for the 1794 * enough_free_mem - Check if there is enough free memory for the image.
1787 * snapshot image.
1788 */ 1795 */
1789
1790static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1796static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1791{ 1797{
1792 struct zone *zone; 1798 struct zone *zone;
@@ -1805,10 +1811,11 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1805 1811
1806#ifdef CONFIG_HIGHMEM 1812#ifdef CONFIG_HIGHMEM
1807/** 1813/**
1808 * get_highmem_buffer - if there are some highmem pages in the suspend 1814 * get_highmem_buffer - Allocate a buffer for highmem pages.
1809 * image, we may need the buffer to copy them and/or load their data. 1815 *
1816 * If there are some highmem pages in the hibernation image, we may need a
1817 * buffer to copy them and/or load their data.
1810 */ 1818 */
1811
1812static inline int get_highmem_buffer(int safe_needed) 1819static inline int get_highmem_buffer(int safe_needed)
1813{ 1820{
1814 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); 1821 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
@@ -1816,11 +1823,11 @@ static inline int get_highmem_buffer(int safe_needed)
1816} 1823}
1817 1824
1818/** 1825/**
1819 * alloc_highmem_image_pages - allocate some highmem pages for the image. 1826 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1820 * Try to allocate as many pages as needed, but if the number of free 1827 *
1821 * highmem pages is lesser than that, allocate them all. 1828 * Try to allocate as many pages as needed, but if the number of free highmem
1829 * pages is less than that, allocate them all.
1822 */ 1830 */
1823
1824static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1831static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1825 unsigned int nr_highmem) 1832 unsigned int nr_highmem)
1826{ 1833{
@@ -1846,17 +1853,16 @@ static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1846#endif /* CONFIG_HIGHMEM */ 1853#endif /* CONFIG_HIGHMEM */
1847 1854
1848/** 1855/**
1849 * swsusp_alloc - allocate memory for the suspend image 1856 * swsusp_alloc - Allocate memory for hibernation image.
1850 * 1857 *
1851 * We first try to allocate as many highmem pages as there are 1858 * We first try to allocate as many highmem pages as there are
1852 * saveable highmem pages in the system. If that fails, we allocate 1859 * saveable highmem pages in the system. If that fails, we allocate
1853 * non-highmem pages for the copies of the remaining highmem ones. 1860 * non-highmem pages for the copies of the remaining highmem ones.
1854 * 1861 *
1855 * In this approach it is likely that the copies of highmem pages will 1862 * In this approach it is likely that the copies of highmem pages will
1856 * also be located in the high memory, because of the way in which 1863 * also be located in the high memory, because of the way in which
1857 * copy_data_pages() works. 1864 * copy_data_pages() works.
1858 */ 1865 */
1859
1860static int swsusp_alloc(struct memory_bitmap *orig_bm, 1866static int swsusp_alloc(struct memory_bitmap *orig_bm,
1861 struct memory_bitmap *copy_bm, 1867 struct memory_bitmap *copy_bm,
1862 unsigned int nr_pages, unsigned int nr_highmem) 1868 unsigned int nr_pages, unsigned int nr_highmem)
@@ -1909,7 +1915,8 @@ asmlinkage __visible int swsusp_save(void)
1909 return -ENOMEM; 1915 return -ENOMEM;
1910 } 1916 }
1911 1917
1912 /* During allocating of suspend pagedir, new cold pages may appear. 1918 /*
1919 * During allocating of suspend pagedir, new cold pages may appear.
1913 * Kill them. 1920 * Kill them.
1914 */ 1921 */
1915 drain_local_pages(NULL); 1922 drain_local_pages(NULL);
@@ -1972,10 +1979,13 @@ static int init_header(struct swsusp_info *info)
1972} 1979}
1973 1980
1974/** 1981/**
1975 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm 1982 * pack_pfns - Prepare PFNs for saving.
1976 * are stored in the array @buf[] (1 page at a time) 1983 * @bm: Memory bitmap.
1984 * @buf: Memory buffer to store the PFNs in.
1985 *
1986 * PFNs corresponding to set bits in @bm are stored in the area of memory
1987 * pointed to by @buf (1 page at a time).
1977 */ 1988 */
1978
1979static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 1989static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1980{ 1990{
1981 int j; 1991 int j;
@@ -1990,22 +2000,21 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1990} 2000}
1991 2001
1992/** 2002/**
1993 * snapshot_read_next - used for reading the system memory snapshot. 2003 * snapshot_read_next - Get the address to read the next image page from.
2004 * @handle: Snapshot handle to be used for the reading.
1994 * 2005 *
1995 * On the first call to it @handle should point to a zeroed 2006 * On the first call, @handle should point to a zeroed snapshot_handle
1996 * snapshot_handle structure. The structure gets updated and a pointer 2007 * structure. The structure gets populated then and a pointer to it should be
1997 * to it should be passed to this function every next time. 2008 * passed to this function every next time.
1998 * 2009 *
1999 * On success the function returns a positive number. Then, the caller 2010 * On success, the function returns a positive number. Then, the caller
2000 * is allowed to read up to the returned number of bytes from the memory 2011 * is allowed to read up to the returned number of bytes from the memory
2001 * location computed by the data_of() macro. 2012 * location computed by the data_of() macro.
2002 * 2013 *
2003 * The function returns 0 to indicate the end of data stream condition, 2014 * The function returns 0 to indicate the end of the data stream condition,
2004 * and a negative number is returned on error. In such cases the 2015 * and negative numbers are returned on errors. If that happens, the structure
2005 * structure pointed to by @handle is not updated and should not be used 2016 * pointed to by @handle is not updated and should not be used any more.
2006 * any more.
2007 */ 2017 */
2008
2009int snapshot_read_next(struct snapshot_handle *handle) 2018int snapshot_read_next(struct snapshot_handle *handle)
2010{ 2019{
2011 if (handle->cur > nr_meta_pages + nr_copy_pages) 2020 if (handle->cur > nr_meta_pages + nr_copy_pages)
@@ -2034,7 +2043,8 @@ int snapshot_read_next(struct snapshot_handle *handle)
2034 2043
2035 page = pfn_to_page(memory_bm_next_pfn(&copy_bm)); 2044 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2036 if (PageHighMem(page)) { 2045 if (PageHighMem(page)) {
2037 /* Highmem pages are copied to the buffer, 2046 /*
2047 * Highmem pages are copied to the buffer,
2038 * because we can't return with a kmapped 2048 * because we can't return with a kmapped
2039 * highmem page (we may not be called again). 2049 * highmem page (we may not be called again).
2040 */ 2050 */
@@ -2066,11 +2076,11 @@ static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2066} 2076}
2067 2077
2068/** 2078/**
2069 * mark_unsafe_pages - mark the pages that cannot be used for storing 2079 * mark_unsafe_pages - Mark pages that were used before hibernation.
2070 * the image during resume, because they conflict with the pages that 2080 *
2071 * had been used before suspend 2081 * Mark the pages that cannot be used for storing the image during restoration,
2082 * because they conflict with the pages that had been used before hibernation.
2072 */ 2083 */
2073
2074static void mark_unsafe_pages(struct memory_bitmap *bm) 2084static void mark_unsafe_pages(struct memory_bitmap *bm)
2075{ 2085{
2076 unsigned long pfn; 2086 unsigned long pfn;
@@ -2104,9 +2114,8 @@ static int check_header(struct swsusp_info *info)
2104} 2114}
2105 2115
2106/** 2116/**
2107 * load header - check the image header and copy data from it 2117 * load header - Check the image header and copy the data from it.
2108 */ 2118 */
2109
2110static int load_header(struct swsusp_info *info) 2119static int load_header(struct swsusp_info *info)
2111{ 2120{
2112 int error; 2121 int error;
@@ -2121,8 +2130,12 @@ static int load_header(struct swsusp_info *info)
2121} 2130}
2122 2131
2123/** 2132/**
2124 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set 2133 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2125 * the corresponding bit in the memory bitmap @bm 2134 * @bm: Memory bitmap.
2135 * @buf: Area of memory containing the PFNs.
2136 *
2137 * For each element of the array pointed to by @buf (1 page at a time), set the
2138 * corresponding bit in @bm.
2126 */ 2139 */
2127static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 2140static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2128{ 2141{
@@ -2145,7 +2158,8 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2145} 2158}
2146 2159
2147#ifdef CONFIG_HIGHMEM 2160#ifdef CONFIG_HIGHMEM
2148/* struct highmem_pbe is used for creating the list of highmem pages that 2161/*
2162 * struct highmem_pbe is used for creating the list of highmem pages that
2149 * should be restored atomically during the resume from disk, because the page 2163 * should be restored atomically during the resume from disk, because the page
2150 * frames they have occupied before the suspend are in use. 2164 * frames they have occupied before the suspend are in use.
2151 */ 2165 */
@@ -2155,7 +2169,8 @@ struct highmem_pbe {
2155 struct highmem_pbe *next; 2169 struct highmem_pbe *next;
2156}; 2170};
2157 2171
2158/* List of highmem PBEs needed for restoring the highmem pages that were 2172/*
2173 * List of highmem PBEs needed for restoring the highmem pages that were
2159 * allocated before the suspend and included in the suspend image, but have 2174 * allocated before the suspend and included in the suspend image, but have
2160 * also been allocated by the "resume" kernel, so their contents cannot be 2175 * also been allocated by the "resume" kernel, so their contents cannot be
2161 * written directly to their "original" page frames. 2176 * written directly to their "original" page frames.
@@ -2163,11 +2178,11 @@ struct highmem_pbe {
2163static struct highmem_pbe *highmem_pblist; 2178static struct highmem_pbe *highmem_pblist;
2164 2179
2165/** 2180/**
2166 * count_highmem_image_pages - compute the number of highmem pages in the 2181 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2167 * suspend image. The bits in the memory bitmap @bm that correspond to the 2182 * @bm: Memory bitmap.
2168 * image pages are assumed to be set. 2183 *
2184 * The bits in @bm that correspond to image pages are assumed to be set.
2169 */ 2185 */
2170
2171static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 2186static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2172{ 2187{
2173 unsigned long pfn; 2188 unsigned long pfn;
@@ -2184,22 +2199,23 @@ static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2184 return cnt; 2199 return cnt;
2185} 2200}
2186 2201
2187/**
2188 * prepare_highmem_image - try to allocate as many highmem pages as
2189 * there are highmem image pages (@nr_highmem_p points to the variable
2190 * containing the number of highmem image pages). The pages that are
2191 * "safe" (ie. will not be overwritten when the suspend image is
2192 * restored) have the corresponding bits set in @bm (it must be
2193 * unitialized).
2194 *
2195 * NOTE: This function should not be called if there are no highmem
2196 * image pages.
2197 */
2198
2199static unsigned int safe_highmem_pages; 2202static unsigned int safe_highmem_pages;
2200 2203
2201static struct memory_bitmap *safe_highmem_bm; 2204static struct memory_bitmap *safe_highmem_bm;
2202 2205
2206/**
2207 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2208 * @bm: Pointer to an uninitialized memory bitmap structure.
2209 * @nr_highmem_p: Pointer to the number of highmem image pages.
2210 *
2211 * Try to allocate as many highmem pages as there are highmem image pages
2212 * (@nr_highmem_p points to the variable containing the number of highmem image
2213 * pages). The pages that are "safe" (ie. will not be overwritten when the
2214 * hibernation image is restored entirely) have the corresponding bits set in
2215 * @bm (it must be unitialized).
2216 *
2217 * NOTE: This function should not be called if there are no highmem image pages.
2218 */
2203static int prepare_highmem_image(struct memory_bitmap *bm, 2219static int prepare_highmem_image(struct memory_bitmap *bm,
2204 unsigned int *nr_highmem_p) 2220 unsigned int *nr_highmem_p)
2205{ 2221{
@@ -2236,25 +2252,26 @@ static int prepare_highmem_image(struct memory_bitmap *bm,
2236 return 0; 2252 return 0;
2237} 2253}
2238 2254
2255static struct page *last_highmem_page;
2256
2239/** 2257/**
2240 * get_highmem_page_buffer - for given highmem image page find the buffer 2258 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2241 * that suspend_write_next() should set for its caller to write to. 2259 *
2260 * For a given highmem image page get a buffer that suspend_write_next() should
2261 * return to its caller to write to.
2242 * 2262 *
2243 * If the page is to be saved to its "original" page frame or a copy of 2263 * If the page is to be saved to its "original" page frame or a copy of
2244 * the page is to be made in the highmem, @buffer is returned. Otherwise, 2264 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2245 * the copy of the page is to be made in normal memory, so the address of 2265 * the copy of the page is to be made in normal memory, so the address of
2246 * the copy is returned. 2266 * the copy is returned.
2247 * 2267 *
2248 * If @buffer is returned, the caller of suspend_write_next() will write 2268 * If @buffer is returned, the caller of suspend_write_next() will write
2249 * the page's contents to @buffer, so they will have to be copied to the 2269 * the page's contents to @buffer, so they will have to be copied to the
2250 * right location on the next call to suspend_write_next() and it is done 2270 * right location on the next call to suspend_write_next() and it is done
2251 * with the help of copy_last_highmem_page(). For this purpose, if 2271 * with the help of copy_last_highmem_page(). For this purpose, if
2252 * @buffer is returned, @last_highmem page is set to the page to which 2272 * @buffer is returned, @last_highmem_page is set to the page to which
2253 * the data will have to be copied from @buffer. 2273 * the data will have to be copied from @buffer.
2254 */ 2274 */
2255
2256static struct page *last_highmem_page;
2257
2258static void *get_highmem_page_buffer(struct page *page, 2275static void *get_highmem_page_buffer(struct page *page,
2259 struct chain_allocator *ca) 2276 struct chain_allocator *ca)
2260{ 2277{
@@ -2262,13 +2279,15 @@ static void *get_highmem_page_buffer(struct page *page,
2262 void *kaddr; 2279 void *kaddr;
2263 2280
2264 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 2281 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2265 /* We have allocated the "original" page frame and we can 2282 /*
2283 * We have allocated the "original" page frame and we can
2266 * use it directly to store the loaded page. 2284 * use it directly to store the loaded page.
2267 */ 2285 */
2268 last_highmem_page = page; 2286 last_highmem_page = page;
2269 return buffer; 2287 return buffer;
2270 } 2288 }
2271 /* The "original" page frame has not been allocated and we have to 2289 /*
2290 * The "original" page frame has not been allocated and we have to
2272 * use a "safe" page frame to store the loaded page. 2291 * use a "safe" page frame to store the loaded page.
2273 */ 2292 */
2274 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 2293 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
@@ -2298,11 +2317,12 @@ static void *get_highmem_page_buffer(struct page *page,
2298} 2317}
2299 2318
2300/** 2319/**
2301 * copy_last_highmem_page - copy the contents of a highmem image from 2320 * copy_last_highmem_page - Copy most the most recent highmem image page.
2302 * @buffer, where the caller of snapshot_write_next() has place them, 2321 *
2303 * to the right location represented by @last_highmem_page . 2322 * Copy the contents of a highmem image from @buffer, where the caller of
2323 * snapshot_write_next() has stored them, to the right location represented by
2324 * @last_highmem_page .
2304 */ 2325 */
2305
2306static void copy_last_highmem_page(void) 2326static void copy_last_highmem_page(void)
2307{ 2327{
2308 if (last_highmem_page) { 2328 if (last_highmem_page) {
@@ -2345,22 +2365,23 @@ static inline int last_highmem_page_copied(void) { return 1; }
2345static inline void free_highmem_data(void) {} 2365static inline void free_highmem_data(void) {}
2346#endif /* CONFIG_HIGHMEM */ 2366#endif /* CONFIG_HIGHMEM */
2347 2367
2368#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2369
2348/** 2370/**
2349 * prepare_image - use the memory bitmap @bm to mark the pages that will 2371 * prepare_image - Make room for loading hibernation image.
2350 * be overwritten in the process of restoring the system memory state 2372 * @new_bm: Unitialized memory bitmap structure.
2351 * from the suspend image ("unsafe" pages) and allocate memory for the 2373 * @bm: Memory bitmap with unsafe pages marked.
2352 * image. 2374 *
2375 * Use @bm to mark the pages that will be overwritten in the process of
2376 * restoring the system memory state from the suspend image ("unsafe" pages)
2377 * and allocate memory for the image.
2353 * 2378 *
2354 * The idea is to allocate a new memory bitmap first and then allocate 2379 * The idea is to allocate a new memory bitmap first and then allocate
2355 * as many pages as needed for the image data, but not to assign these 2380 * as many pages as needed for image data, but without specifying what those
2356 * pages to specific tasks initially. Instead, we just mark them as 2381 * pages will be used for just yet. Instead, we mark them all as allocated and
2357 * allocated and create a lists of "safe" pages that will be used 2382 * create a lists of "safe" pages to be used later. On systems with high
2358 * later. On systems with high memory a list of "safe" highmem pages is 2383 * memory a list of "safe" highmem pages is created too.
2359 * also created.
2360 */ 2384 */
2361
2362#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2363
2364static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2385static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2365{ 2386{
2366 unsigned int nr_pages, nr_highmem; 2387 unsigned int nr_pages, nr_highmem;
@@ -2385,7 +2406,8 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2385 if (error) 2406 if (error)
2386 goto Free; 2407 goto Free;
2387 } 2408 }
2388 /* Reserve some safe pages for potential later use. 2409 /*
2410 * Reserve some safe pages for potential later use.
2389 * 2411 *
2390 * NOTE: This way we make sure there will be enough safe pages for the 2412 * NOTE: This way we make sure there will be enough safe pages for the
2391 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2413 * chain_alloc() in get_buffer(). It is a bit wasteful, but
@@ -2431,10 +2453,11 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2431} 2453}
2432 2454
2433/** 2455/**
2434 * get_buffer - compute the address that snapshot_write_next() should 2456 * get_buffer - Get the address to store the next image data page.
2435 * set for its caller to write to. 2457 *
2458 * Get the address that snapshot_write_next() should return to its caller to
2459 * write to.
2436 */ 2460 */
2437
2438static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2461static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2439{ 2462{
2440 struct pbe *pbe; 2463 struct pbe *pbe;
@@ -2449,12 +2472,14 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2449 return get_highmem_page_buffer(page, ca); 2472 return get_highmem_page_buffer(page, ca);
2450 2473
2451 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2474 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2452 /* We have allocated the "original" page frame and we can 2475 /*
2476 * We have allocated the "original" page frame and we can
2453 * use it directly to store the loaded page. 2477 * use it directly to store the loaded page.
2454 */ 2478 */
2455 return page_address(page); 2479 return page_address(page);
2456 2480
2457 /* The "original" page frame has not been allocated and we have to 2481 /*
2482 * The "original" page frame has not been allocated and we have to
2458 * use a "safe" page frame to store the loaded page. 2483 * use a "safe" page frame to store the loaded page.
2459 */ 2484 */
2460 pbe = chain_alloc(ca, sizeof(struct pbe)); 2485 pbe = chain_alloc(ca, sizeof(struct pbe));
@@ -2471,22 +2496,21 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2471} 2496}
2472 2497
2473/** 2498/**
2474 * snapshot_write_next - used for writing the system memory snapshot. 2499 * snapshot_write_next - Get the address to store the next image page.
2500 * @handle: Snapshot handle structure to guide the writing.
2475 * 2501 *
2476 * On the first call to it @handle should point to a zeroed 2502 * On the first call, @handle should point to a zeroed snapshot_handle
2477 * snapshot_handle structure. The structure gets updated and a pointer 2503 * structure. The structure gets populated then and a pointer to it should be
2478 * to it should be passed to this function every next time. 2504 * passed to this function every next time.
2479 * 2505 *
2480 * On success the function returns a positive number. Then, the caller 2506 * On success, the function returns a positive number. Then, the caller
2481 * is allowed to write up to the returned number of bytes to the memory 2507 * is allowed to write up to the returned number of bytes to the memory
2482 * location computed by the data_of() macro. 2508 * location computed by the data_of() macro.
2483 * 2509 *
2484 * The function returns 0 to indicate the "end of file" condition, 2510 * The function returns 0 to indicate the "end of file" condition. Negative
2485 * and a negative number is returned on error. In such cases the 2511 * numbers are returned on errors, in which cases the structure pointed to by
2486 * structure pointed to by @handle is not updated and should not be used 2512 * @handle is not updated and should not be used any more.
2487 * any more.
2488 */ 2513 */
2489
2490int snapshot_write_next(struct snapshot_handle *handle) 2514int snapshot_write_next(struct snapshot_handle *handle)
2491{ 2515{
2492 static struct chain_allocator ca; 2516 static struct chain_allocator ca;
@@ -2556,13 +2580,13 @@ int snapshot_write_next(struct snapshot_handle *handle)
2556} 2580}
2557 2581
2558/** 2582/**
2559 * snapshot_write_finalize - must be called after the last call to 2583 * snapshot_write_finalize - Complete the loading of a hibernation image.
2560 * snapshot_write_next() in case the last page in the image happens 2584 *
2561 * to be a highmem page and its contents should be stored in the 2585 * Must be called after the last call to snapshot_write_next() in case the last
2562 * highmem. Additionally, it releases the memory that will not be 2586 * page in the image happens to be a highmem page and its contents should be
2563 * used any more. 2587 * stored in highmem. Additionally, it recycles bitmap memory that's not
2588 * necessary any more.
2564 */ 2589 */
2565
2566void snapshot_write_finalize(struct snapshot_handle *handle) 2590void snapshot_write_finalize(struct snapshot_handle *handle)
2567{ 2591{
2568 copy_last_highmem_page(); 2592 copy_last_highmem_page();
@@ -2599,15 +2623,15 @@ static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2599} 2623}
2600 2624
2601/** 2625/**
2602 * restore_highmem - for each highmem page that was allocated before 2626 * restore_highmem - Put highmem image pages into their original locations.
2603 * the suspend and included in the suspend image, and also has been 2627 *
2604 * allocated by the "resume" kernel swap its current (ie. "before 2628 * For each highmem page that was in use before hibernation and is included in
2605 * resume") contents with the previous (ie. "before suspend") one. 2629 * the image, and also has been allocated by the "restore" kernel, swap its
2630 * current contents with the previous (ie. "before hibernation") ones.
2606 * 2631 *
2607 * If the resume eventually fails, we can call this function once 2632 * If the restore eventually fails, we can call this function once again and
2608 * again and restore the "before resume" highmem state. 2633 * restore the highmem state as seen by the restore kernel.
2609 */ 2634 */
2610
2611int restore_highmem(void) 2635int restore_highmem(void)
2612{ 2636{
2613 struct highmem_pbe *pbe = highmem_pblist; 2637 struct highmem_pbe *pbe = highmem_pblist;