aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-08-05 16:49:45 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-08-05 16:49:45 -0400
commitddbe8db147cd0954da93dc6986f4793bcabb3889 (patch)
tree0a0d463863ca78b644471132eb3f27ec3405b16d /kernel
parentb14c348e8dabcc7604683cfc44fad7396e8b639a (diff)
parent0f7d83e85dbd5bb8032ebed7713edf59670fb074 (diff)
Merge branch 'pm-sleep'
* pm-sleep: PM / Hibernate: Touch Soft Lockup Watchdog in rtree_next_node PM / Hibernate: Remove the old memory-bitmap implementation PM / Hibernate: Iterate over set bits instead of PFNs in swsusp_free() PM / Hibernate: Implement position keeping in radix tree PM / Hibernate: Add memory_rtree_find_bit function PM / Hibernate: Create a Radix-Tree to store memory bitmap PM / sleep: fix kernel-doc warnings in drivers/base/power/main.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/power/snapshot.c494
1 files changed, 367 insertions, 127 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1ea328aafdc9..4fc5c32422b3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -248,33 +248,61 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
248 * information is stored (in the form of a block of bitmap) 248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of 249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area. 250 * the represented memory area.
251 *
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
255 *
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
260 *
261 * The struct rtree_node represents one node of the radix tree.
251 */ 262 */
252 263
253#define BM_END_OF_MAP (~0UL) 264#define BM_END_OF_MAP (~0UL)
254 265
255#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 266#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
256 269
257struct bm_block { 270/*
258 struct list_head hook; /* hook into a list of bitmap blocks */ 271 * struct rtree_node is a wrapper struct to link the nodes
259 unsigned long start_pfn; /* pfn represented by the first bit */ 272 * of the rtree together for easy linear iteration over
260 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 273 * bits and easy freeing
261 unsigned long *data; /* bitmap representing pages */ 274 */
275struct rtree_node {
276 struct list_head list;
277 unsigned long *data;
262}; 278};
263 279
264static inline unsigned long bm_block_bits(struct bm_block *bb) 280/*
265{ 281 * struct mem_zone_bm_rtree represents a bitmap used for one
266 return bb->end_pfn - bb->start_pfn; 282 * populated memory zone.
267} 283 */
284struct mem_zone_bm_rtree {
285 struct list_head list; /* Link Zones together */
286 struct list_head nodes; /* Radix Tree inner nodes */
287 struct list_head leaves; /* Radix Tree leaves */
288 unsigned long start_pfn; /* Zone start page frame */
289 unsigned long end_pfn; /* Zone end page frame + 1 */
290 struct rtree_node *rtree; /* Radix Tree Root */
291 int levels; /* Number of Radix Tree Levels */
292 unsigned int blocks; /* Number of Bitmap Blocks */
293};
268 294
269/* strcut bm_position is used for browsing memory bitmaps */ 295/* strcut bm_position is used for browsing memory bitmaps */
270 296
271struct bm_position { 297struct bm_position {
272 struct bm_block *block; 298 struct mem_zone_bm_rtree *zone;
273 int bit; 299 struct rtree_node *node;
300 unsigned long node_pfn;
301 int node_bit;
274}; 302};
275 303
276struct memory_bitmap { 304struct memory_bitmap {
277 struct list_head blocks; /* list of bitmap blocks */ 305 struct list_head zones;
278 struct linked_page *p_list; /* list of pages used to store zone 306 struct linked_page *p_list; /* list of pages used to store zone
279 * bitmap objects and bitmap block 307 * bitmap objects and bitmap block
280 * objects 308 * objects
@@ -284,38 +312,178 @@ struct memory_bitmap {
284 312
285/* Functions that operate on memory bitmaps */ 313/* Functions that operate on memory bitmaps */
286 314
287static void memory_bm_position_reset(struct memory_bitmap *bm) 315#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316#if BITS_PER_LONG == 32
317#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
318#else
319#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
320#endif
321#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322
323/*
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
325 *
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
329 */
330static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 struct chain_allocator *ca,
332 struct list_head *list)
288{ 333{
289 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); 334 struct rtree_node *node;
290 bm->cur.bit = 0;
291}
292 335
293static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 336 node = chain_alloc(ca, sizeof(struct rtree_node));
337 if (!node)
338 return NULL;
294 339
295/** 340 node->data = get_image_page(gfp_mask, safe_needed);
296 * create_bm_block_list - create a list of block bitmap objects 341 if (!node->data)
297 * @pages - number of pages to track 342 return NULL;
298 * @list - list to put the allocated blocks into 343
299 * @ca - chain allocator to be used for allocating memory 344 list_add_tail(&node->list, list);
345
346 return node;
347}
348
349/*
350 * add_rtree_block - Add a new leave node to the radix tree
351 *
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
354 * counter.
300 */ 355 */
301static int create_bm_block_list(unsigned long pages, 356static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
302 struct list_head *list, 357 int safe_needed, struct chain_allocator *ca)
303 struct chain_allocator *ca)
304{ 358{
305 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 359 struct rtree_node *node, *block, **dst;
360 unsigned int levels_needed, block_nr;
361 int i;
306 362
307 while (nr_blocks-- > 0) { 363 block_nr = zone->blocks;
308 struct bm_block *bb; 364 levels_needed = 0;
309 365
310 bb = chain_alloc(ca, sizeof(struct bm_block)); 366 /* How many levels do we need for this block nr? */
311 if (!bb) 367 while (block_nr) {
368 levels_needed += 1;
369 block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 }
371
372 /* Make sure the rtree has enough levels */
373 for (i = zone->levels; i < levels_needed; i++) {
374 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 &zone->nodes);
376 if (!node)
312 return -ENOMEM; 377 return -ENOMEM;
313 list_add(&bb->hook, list); 378
379 node->data[0] = (unsigned long)zone->rtree;
380 zone->rtree = node;
381 zone->levels += 1;
382 }
383
384 /* Allocate new block */
385 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 if (!block)
387 return -ENOMEM;
388
389 /* Now walk the rtree to insert the block */
390 node = zone->rtree;
391 dst = &zone->rtree;
392 block_nr = zone->blocks;
393 for (i = zone->levels; i > 0; i--) {
394 int index;
395
396 if (!node) {
397 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 &zone->nodes);
399 if (!node)
400 return -ENOMEM;
401 *dst = node;
402 }
403
404 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 index &= BM_RTREE_LEVEL_MASK;
406 dst = (struct rtree_node **)&((*dst)->data[index]);
407 node = *dst;
314 } 408 }
315 409
410 zone->blocks += 1;
411 *dst = block;
412
316 return 0; 413 return 0;
317} 414}
318 415
416static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 int clear_nosave_free);
418
419/*
420 * create_zone_bm_rtree - create a radix tree for one zone
421 *
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
424 * zone.
425 */
426static struct mem_zone_bm_rtree *
427create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 struct chain_allocator *ca,
429 unsigned long start, unsigned long end)
430{
431 struct mem_zone_bm_rtree *zone;
432 unsigned int i, nr_blocks;
433 unsigned long pages;
434
435 pages = end - start;
436 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 if (!zone)
438 return NULL;
439
440 INIT_LIST_HEAD(&zone->nodes);
441 INIT_LIST_HEAD(&zone->leaves);
442 zone->start_pfn = start;
443 zone->end_pfn = end;
444 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445
446 for (i = 0; i < nr_blocks; i++) {
447 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 return NULL;
450 }
451 }
452
453 return zone;
454}
455
456/*
457 * free_zone_bm_rtree - Free the memory of the radix tree
458 *
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
461 * structs.
462 */
463static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 int clear_nosave_free)
465{
466 struct rtree_node *node;
467
468 list_for_each_entry(node, &zone->nodes, list)
469 free_image_page(node->data, clear_nosave_free);
470
471 list_for_each_entry(node, &zone->leaves, list)
472 free_image_page(node->data, clear_nosave_free);
473}
474
475static void memory_bm_position_reset(struct memory_bitmap *bm)
476{
477 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 list);
479 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 struct rtree_node, list);
481 bm->cur.node_pfn = 0;
482 bm->cur.node_bit = 0;
483}
484
485static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486
319struct mem_extent { 487struct mem_extent {
320 struct list_head hook; 488 struct list_head hook;
321 unsigned long start; 489 unsigned long start;
@@ -407,40 +575,22 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
407 int error; 575 int error;
408 576
409 chain_init(&ca, gfp_mask, safe_needed); 577 chain_init(&ca, gfp_mask, safe_needed);
410 INIT_LIST_HEAD(&bm->blocks); 578 INIT_LIST_HEAD(&bm->zones);
411 579
412 error = create_mem_extents(&mem_extents, gfp_mask); 580 error = create_mem_extents(&mem_extents, gfp_mask);
413 if (error) 581 if (error)
414 return error; 582 return error;
415 583
416 list_for_each_entry(ext, &mem_extents, hook) { 584 list_for_each_entry(ext, &mem_extents, hook) {
417 struct bm_block *bb; 585 struct mem_zone_bm_rtree *zone;
418 unsigned long pfn = ext->start;
419 unsigned long pages = ext->end - ext->start;
420
421 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422 586
423 error = create_bm_block_list(pages, bm->blocks.prev, &ca); 587 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
424 if (error) 588 ext->start, ext->end);
589 if (!zone) {
590 error = -ENOMEM;
425 goto Error; 591 goto Error;
426
427 list_for_each_entry_continue(bb, &bm->blocks, hook) {
428 bb->data = get_image_page(gfp_mask, safe_needed);
429 if (!bb->data) {
430 error = -ENOMEM;
431 goto Error;
432 }
433
434 bb->start_pfn = pfn;
435 if (pages >= BM_BITS_PER_BLOCK) {
436 pfn += BM_BITS_PER_BLOCK;
437 pages -= BM_BITS_PER_BLOCK;
438 } else {
439 /* This is executed only once in the loop */
440 pfn += pages;
441 }
442 bb->end_pfn = pfn;
443 } 592 }
593 list_add_tail(&zone->list, &bm->zones);
444 } 594 }
445 595
446 bm->p_list = ca.chain; 596 bm->p_list = ca.chain;
@@ -460,51 +610,83 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
460 */ 610 */
461static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 611static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462{ 612{
463 struct bm_block *bb; 613 struct mem_zone_bm_rtree *zone;
464 614
465 list_for_each_entry(bb, &bm->blocks, hook) 615 list_for_each_entry(zone, &bm->zones, list)
466 if (bb->data) 616 free_zone_bm_rtree(zone, clear_nosave_free);
467 free_image_page(bb->data, clear_nosave_free);
468 617
469 free_list_of_pages(bm->p_list, clear_nosave_free); 618 free_list_of_pages(bm->p_list, clear_nosave_free);
470 619
471 INIT_LIST_HEAD(&bm->blocks); 620 INIT_LIST_HEAD(&bm->zones);
472} 621}
473 622
474/** 623/**
475 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds 624 * memory_bm_find_bit - Find the bit for pfn in the memory
476 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 625 * bitmap
477 * of @bm->cur_zone_bm are updated. 626 *
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
629 * updated.
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
478 */ 632 */
479static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 633static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480 void **addr, unsigned int *bit_nr) 634 void **addr, unsigned int *bit_nr)
481{ 635{
482 struct bm_block *bb; 636 struct mem_zone_bm_rtree *curr, *zone;
637 struct rtree_node *node;
638 int i, block_nr;
483 639
640 zone = bm->cur.zone;
641
642 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 goto zone_found;
644
645 zone = NULL;
646
647 /* Find the right zone */
648 list_for_each_entry(curr, &bm->zones, list) {
649 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 zone = curr;
651 break;
652 }
653 }
654
655 if (!zone)
656 return -EFAULT;
657
658zone_found:
484 /* 659 /*
485 * Check if the pfn corresponds to the current bitmap block and find 660 * We have a zone. Now walk the radix tree to find the leave
486 * the block where it fits if this is not the case. 661 * node for our pfn.
487 */ 662 */
488 bb = bm->cur.block;
489 if (pfn < bb->start_pfn)
490 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491 if (pfn >= bb->start_pfn)
492 break;
493 663
494 if (pfn >= bb->end_pfn) 664 node = bm->cur.node;
495 list_for_each_entry_continue(bb, &bm->blocks, hook) 665 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
496 if (pfn >= bb->start_pfn && pfn < bb->end_pfn) 666 goto node_found;
497 break;
498 667
499 if (&bb->hook == &bm->blocks) 668 node = zone->rtree;
500 return -EFAULT; 669 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670
671 for (i = zone->levels; i > 0; i--) {
672 int index;
673
674 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 index &= BM_RTREE_LEVEL_MASK;
676 BUG_ON(node->data[index] == 0);
677 node = (struct rtree_node *)node->data[index];
678 }
679
680node_found:
681 /* Update last position */
682 bm->cur.zone = zone;
683 bm->cur.node = node;
684 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685
686 /* Set return values */
687 *addr = node->data;
688 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
501 689
502 /* The block has been found */
503 bm->cur.block = bb;
504 pfn -= bb->start_pfn;
505 bm->cur.bit = pfn + 1;
506 *bit_nr = pfn;
507 *addr = bb->data;
508 return 0; 690 return 0;
509} 691}
510 692
@@ -528,6 +710,7 @@ static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
528 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 710 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 if (!error) 711 if (!error)
530 set_bit(bit, addr); 712 set_bit(bit, addr);
713
531 return error; 714 return error;
532} 715}
533 716
@@ -542,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
542 clear_bit(bit, addr); 725 clear_bit(bit, addr);
543} 726}
544 727
728static void memory_bm_clear_current(struct memory_bitmap *bm)
729{
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734}
735
545static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 736static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546{ 737{
547 void *addr; 738 void *addr;
@@ -561,38 +752,70 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
561 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 752 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562} 753}
563 754
564/** 755/*
565 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 756 * rtree_next_node - Jumps to the next leave node
566 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 757 *
567 * returned. 758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
761 * next zone.
568 * 762 *
569 * It is required to run memory_bm_position_reset() before the first call to 763 * Returns true if there is a next node, false otherwise.
570 * this function.
571 */ 764 */
765static bool rtree_next_node(struct memory_bitmap *bm)
766{
767 bm->cur.node = list_entry(bm->cur.node->list.next,
768 struct rtree_node, list);
769 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
770 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
771 bm->cur.node_bit = 0;
772 touch_softlockup_watchdog();
773 return true;
774 }
775
776 /* No more nodes, goto next zone */
777 bm->cur.zone = list_entry(bm->cur.zone->list.next,
778 struct mem_zone_bm_rtree, list);
779 if (&bm->cur.zone->list != &bm->zones) {
780 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
781 struct rtree_node, list);
782 bm->cur.node_pfn = 0;
783 bm->cur.node_bit = 0;
784 return true;
785 }
572 786
787 /* No more zones */
788 return false;
789}
790
791/**
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
793 *
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
797 *
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
800 */
573static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 801static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574{ 802{
575 struct bm_block *bb; 803 unsigned long bits, pfn, pages;
576 int bit; 804 int bit;
577 805
578 bb = bm->cur.block;
579 do { 806 do {
580 bit = bm->cur.bit; 807 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
581 bit = find_next_bit(bb->data, bm_block_bits(bb), bit); 808 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
582 if (bit < bm_block_bits(bb)) 809 bit = find_next_bit(bm->cur.node->data, bits,
583 goto Return_pfn; 810 bm->cur.node_bit);
584 811 if (bit < bits) {
585 bb = list_entry(bb->hook.next, struct bm_block, hook); 812 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
586 bm->cur.block = bb; 813 bm->cur.node_bit = bit + 1;
587 bm->cur.bit = 0; 814 return pfn;
588 } while (&bb->hook != &bm->blocks); 815 }
816 } while (rtree_next_node(bm));
589 817
590 memory_bm_position_reset(bm);
591 return BM_END_OF_MAP; 818 return BM_END_OF_MAP;
592
593 Return_pfn:
594 bm->cur.bit = bit + 1;
595 return bb->start_pfn + bit;
596} 819}
597 820
598/** 821/**
@@ -816,12 +1039,17 @@ void free_basic_memory_bitmaps(void)
816 1039
817unsigned int snapshot_additional_pages(struct zone *zone) 1040unsigned int snapshot_additional_pages(struct zone *zone)
818{ 1041{
819 unsigned int res; 1042 unsigned int rtree, nodes;
1043
1044 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1045 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1046 LINKED_PAGE_DATA_SIZE);
1047 while (nodes > 1) {
1048 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1049 rtree += nodes;
1050 }
820 1051
821 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1052 return 2 * rtree;
822 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
823 LINKED_PAGE_DATA_SIZE);
824 return 2 * res;
825} 1053}
826 1054
827#ifdef CONFIG_HIGHMEM 1055#ifdef CONFIG_HIGHMEM
@@ -1094,23 +1322,35 @@ static struct memory_bitmap copy_bm;
1094 1322
1095void swsusp_free(void) 1323void swsusp_free(void)
1096{ 1324{
1097 struct zone *zone; 1325 unsigned long fb_pfn, fr_pfn;
1098 unsigned long pfn, max_zone_pfn;
1099 1326
1100 for_each_populated_zone(zone) { 1327 memory_bm_position_reset(forbidden_pages_map);
1101 max_zone_pfn = zone_end_pfn(zone); 1328 memory_bm_position_reset(free_pages_map);
1102 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1329
1103 if (pfn_valid(pfn)) { 1330loop:
1104 struct page *page = pfn_to_page(pfn); 1331 fr_pfn = memory_bm_next_pfn(free_pages_map);
1105 1332 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1106 if (swsusp_page_is_forbidden(page) && 1333
1107 swsusp_page_is_free(page)) { 1334 /*
1108 swsusp_unset_page_forbidden(page); 1335 * Find the next bit set in both bitmaps. This is guaranteed to
1109 swsusp_unset_page_free(page); 1336 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1110 __free_page(page); 1337 */
1111 } 1338 do {
1112 } 1339 if (fb_pfn < fr_pfn)
1340 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1341 if (fr_pfn < fb_pfn)
1342 fr_pfn = memory_bm_next_pfn(free_pages_map);
1343 } while (fb_pfn != fr_pfn);
1344
1345 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1346 struct page *page = pfn_to_page(fr_pfn);
1347
1348 memory_bm_clear_current(forbidden_pages_map);
1349 memory_bm_clear_current(free_pages_map);
1350 __free_page(page);
1351 goto loop;
1113 } 1352 }
1353
1114 nr_copy_pages = 0; 1354 nr_copy_pages = 0;
1115 nr_meta_pages = 0; 1355 nr_meta_pages = 0;
1116 restore_pblist = NULL; 1356 restore_pblist = NULL;