aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2007-05-06 17:50:43 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:59 -0400
commit74dfd666de861c97d47bdbd892f6d21b801d0247 (patch)
tree7200946212cf546f4e5fac31db3dc97dbb144300 /kernel/power/snapshot.c
parent7be9823491ecbaf9700d7d3502cb4b4dd0ed868a (diff)
swsusp: do not use page flags
Make swsusp use memory bitmaps instead of page flags for marking 'nosave' and free pages. This allows us to 'recycle' two page flags that can be used for other purposes. Also, the memory needed to store the bitmaps is allocated when necessary (ie. before the suspend) and freed after the resume which is more reasonable. The patch is designed to minimize the amount of changes and there are some nice simplifications and optimizations possible on top of it. I am going to implement them separately in the future. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c250
1 files changed, 234 insertions, 16 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 48fc7a35571b..f66e4411795b 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/init.h>
24#include <linux/bootmem.h> 25#include <linux/bootmem.h>
25#include <linux/syscalls.h> 26#include <linux/syscalls.h>
26#include <linux/console.h> 27#include <linux/console.h>
@@ -34,6 +35,10 @@
34 35
35#include "power.h" 36#include "power.h"
36 37
38static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *);
41
37/* List of PBEs needed for restoring the pages that were allocated before 42/* List of PBEs needed for restoring the pages that were allocated before
38 * the suspend and included in the suspend image, but have also been 43 * the suspend and included in the suspend image, but have also been
39 * allocated by the "resume" kernel, so their contents cannot be written 44 * allocated by the "resume" kernel, so their contents cannot be written
@@ -224,11 +229,6 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
224 * of type unsigned long each). It also contains the pfns that 229 * of type unsigned long each). It also contains the pfns that
225 * correspond to the start and end of the represented memory area and 230 * correspond to the start and end of the represented memory area and
226 * the number of bit chunks in the block. 231 * the number of bit chunks in the block.
227 *
228 * NOTE: Memory bitmaps are used for two types of operations only:
229 * "set a bit" and "find the next bit set". Moreover, the searching
230 * is always carried out after all of the "set a bit" operations
231 * on given bitmap.
232 */ 232 */
233 233
234#define BM_END_OF_MAP (~0UL) 234#define BM_END_OF_MAP (~0UL)
@@ -443,15 +443,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
443} 443}
444 444
445/** 445/**
446 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds 446 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
448 * of @bm->cur_zone_bm are updated. 448 * of @bm->cur_zone_bm are updated.
449 *
450 * If the bit cannot be set, the function returns -EINVAL .
451 */ 449 */
452 450
453static int 451static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
454memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 452 void **addr, unsigned int *bit_nr)
455{ 453{
456 struct zone_bitmap *zone_bm; 454 struct zone_bitmap *zone_bm;
457 struct bm_block *bb; 455 struct bm_block *bb;
@@ -463,8 +461,8 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
463 /* We don't assume that the zones are sorted by pfns */ 461 /* We don't assume that the zones are sorted by pfns */
464 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { 462 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
465 zone_bm = zone_bm->next; 463 zone_bm = zone_bm->next;
466 if (unlikely(!zone_bm)) 464
467 return -EINVAL; 465 BUG_ON(!zone_bm);
468 } 466 }
469 bm->cur.zone_bm = zone_bm; 467 bm->cur.zone_bm = zone_bm;
470 } 468 }
@@ -475,13 +473,40 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
475 473
476 while (pfn >= bb->end_pfn) { 474 while (pfn >= bb->end_pfn) {
477 bb = bb->next; 475 bb = bb->next;
478 if (unlikely(!bb)) 476
479 return -EINVAL; 477 BUG_ON(!bb);
480 } 478 }
481 zone_bm->cur_block = bb; 479 zone_bm->cur_block = bb;
482 pfn -= bb->start_pfn; 480 pfn -= bb->start_pfn;
483 set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); 481 *bit_nr = pfn % BM_BITS_PER_CHUNK;
484 return 0; 482 *addr = bb->data + pfn / BM_BITS_PER_CHUNK;
483}
484
485static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
486{
487 void *addr;
488 unsigned int bit;
489
490 memory_bm_find_bit(bm, pfn, &addr, &bit);
491 set_bit(bit, addr);
492}
493
494static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
495{
496 void *addr;
497 unsigned int bit;
498
499 memory_bm_find_bit(bm, pfn, &addr, &bit);
500 clear_bit(bit, addr);
501}
502
503static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
504{
505 void *addr;
506 unsigned int bit;
507
508 memory_bm_find_bit(bm, pfn, &addr, &bit);
509 return test_bit(bit, addr);
485} 510}
486 511
487/* Two auxiliary functions for memory_bm_next_pfn */ 512/* Two auxiliary functions for memory_bm_next_pfn */
@@ -564,6 +589,199 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
564} 589}
565 590
566/** 591/**
592 * This structure represents a range of page frames the contents of which
593 * should not be saved during the suspend.
594 */
595
596struct nosave_region {
597 struct list_head list;
598 unsigned long start_pfn;
599 unsigned long end_pfn;
600};
601
602static LIST_HEAD(nosave_regions);
603
604/**
605 * register_nosave_region - register a range of page frames the contents
606 * of which should not be saved during the suspend (to be used in the early
607 * initialization code)
608 */
609
610void __init
611register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
612{
613 struct nosave_region *region;
614
615 if (start_pfn >= end_pfn)
616 return;
617
618 if (!list_empty(&nosave_regions)) {
619 /* Try to extend the previous region (they should be sorted) */
620 region = list_entry(nosave_regions.prev,
621 struct nosave_region, list);
622 if (region->end_pfn == start_pfn) {
623 region->end_pfn = end_pfn;
624 goto Report;
625 }
626 }
627 /* This allocation cannot fail */
628 region = alloc_bootmem_low(sizeof(struct nosave_region));
629 region->start_pfn = start_pfn;
630 region->end_pfn = end_pfn;
631 list_add_tail(&region->list, &nosave_regions);
632 Report:
633 printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
634 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
635}
636
637/*
638 * Set bits in this map correspond to the page frames the contents of which
639 * should not be saved during the suspend.
640 */
641static struct memory_bitmap *forbidden_pages_map;
642
643/* Set bits in this map correspond to free page frames. */
644static struct memory_bitmap *free_pages_map;
645
646/*
647 * Each page frame allocated for creating the image is marked by setting the
648 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
649 */
650
651void swsusp_set_page_free(struct page *page)
652{
653 if (free_pages_map)
654 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
655}
656
657static int swsusp_page_is_free(struct page *page)
658{
659 return free_pages_map ?
660 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
661}
662
663void swsusp_unset_page_free(struct page *page)
664{
665 if (free_pages_map)
666 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
667}
668
669static void swsusp_set_page_forbidden(struct page *page)
670{
671 if (forbidden_pages_map)
672 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
673}
674
675int swsusp_page_is_forbidden(struct page *page)
676{
677 return forbidden_pages_map ?
678 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
679}
680
681static void swsusp_unset_page_forbidden(struct page *page)
682{
683 if (forbidden_pages_map)
684 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
685}
686
687/**
688 * mark_nosave_pages - set bits corresponding to the page frames the
689 * contents of which should not be saved in a given bitmap.
690 */
691
692static void mark_nosave_pages(struct memory_bitmap *bm)
693{
694 struct nosave_region *region;
695
696 if (list_empty(&nosave_regions))
697 return;
698
699 list_for_each_entry(region, &nosave_regions, list) {
700 unsigned long pfn;
701
702 printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
703 region->start_pfn << PAGE_SHIFT,
704 region->end_pfn << PAGE_SHIFT);
705
706 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
707 memory_bm_set_bit(bm, pfn);
708 }
709}
710
711/**
712 * create_basic_memory_bitmaps - create bitmaps needed for marking page
713 * frames that should not be saved and free page frames. The pointers
714 * forbidden_pages_map and free_pages_map are only modified if everything
715 * goes well, because we don't want the bits to be used before both bitmaps
716 * are set up.
717 */
718
719int create_basic_memory_bitmaps(void)
720{
721 struct memory_bitmap *bm1, *bm2;
722 int error = 0;
723
724 BUG_ON(forbidden_pages_map || free_pages_map);
725
726 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_ATOMIC);
727 if (!bm1)
728 return -ENOMEM;
729
730 error = memory_bm_create(bm1, GFP_ATOMIC | __GFP_COLD, PG_ANY);
731 if (error)
732 goto Free_first_object;
733
734 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_ATOMIC);
735 if (!bm2)
736 goto Free_first_bitmap;
737
738 error = memory_bm_create(bm2, GFP_ATOMIC | __GFP_COLD, PG_ANY);
739 if (error)
740 goto Free_second_object;
741
742 forbidden_pages_map = bm1;
743 free_pages_map = bm2;
744 mark_nosave_pages(forbidden_pages_map);
745
746 printk("swsusp: Basic memory bitmaps created\n");
747
748 return 0;
749
750 Free_second_object:
751 kfree(bm2);
752 Free_first_bitmap:
753 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
754 Free_first_object:
755 kfree(bm1);
756 return -ENOMEM;
757}
758
759/**
760 * free_basic_memory_bitmaps - free memory bitmaps allocated by
761 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
762 * so that the bitmaps themselves are not referred to while they are being
763 * freed.
764 */
765
766void free_basic_memory_bitmaps(void)
767{
768 struct memory_bitmap *bm1, *bm2;
769
770 BUG_ON(!(forbidden_pages_map && free_pages_map));
771
772 bm1 = forbidden_pages_map;
773 bm2 = free_pages_map;
774 forbidden_pages_map = NULL;
775 free_pages_map = NULL;
776 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
777 kfree(bm1);
778 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
779 kfree(bm2);
780
781 printk("swsusp: Basic memory bitmaps freed\n");
782}
783
784/**
567 * snapshot_additional_pages - estimate the number of additional pages 785 * snapshot_additional_pages - estimate the number of additional pages
568 * be needed for setting up the suspend image data structures for given 786 * be needed for setting up the suspend image data structures for given
569 * zone (usually the returned value is greater than the exact number) 787 * zone (usually the returned value is greater than the exact number)