aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c298
1 files changed, 259 insertions, 39 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 704c25a3ffec..128da11f01c2 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/init.h>
24#include <linux/bootmem.h> 25#include <linux/bootmem.h>
25#include <linux/syscalls.h> 26#include <linux/syscalls.h>
26#include <linux/console.h> 27#include <linux/console.h>
@@ -34,6 +35,10 @@
34 35
35#include "power.h" 36#include "power.h"
36 37
38static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *);
41
37/* List of PBEs needed for restoring the pages that were allocated before 42/* List of PBEs needed for restoring the pages that were allocated before
38 * the suspend and included in the suspend image, but have also been 43 * the suspend and included in the suspend image, but have also been
39 * allocated by the "resume" kernel, so their contents cannot be written 44 * allocated by the "resume" kernel, so their contents cannot be written
@@ -67,15 +72,15 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
67 72
68 res = (void *)get_zeroed_page(gfp_mask); 73 res = (void *)get_zeroed_page(gfp_mask);
69 if (safe_needed) 74 if (safe_needed)
70 while (res && PageNosaveFree(virt_to_page(res))) { 75 while (res && swsusp_page_is_free(virt_to_page(res))) {
71 /* The page is unsafe, mark it for swsusp_free() */ 76 /* The page is unsafe, mark it for swsusp_free() */
72 SetPageNosave(virt_to_page(res)); 77 swsusp_set_page_forbidden(virt_to_page(res));
73 allocated_unsafe_pages++; 78 allocated_unsafe_pages++;
74 res = (void *)get_zeroed_page(gfp_mask); 79 res = (void *)get_zeroed_page(gfp_mask);
75 } 80 }
76 if (res) { 81 if (res) {
77 SetPageNosave(virt_to_page(res)); 82 swsusp_set_page_forbidden(virt_to_page(res));
78 SetPageNosaveFree(virt_to_page(res)); 83 swsusp_set_page_free(virt_to_page(res));
79 } 84 }
80 return res; 85 return res;
81} 86}
@@ -91,8 +96,8 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
91 96
92 page = alloc_page(gfp_mask); 97 page = alloc_page(gfp_mask);
93 if (page) { 98 if (page) {
94 SetPageNosave(page); 99 swsusp_set_page_forbidden(page);
95 SetPageNosaveFree(page); 100 swsusp_set_page_free(page);
96 } 101 }
97 return page; 102 return page;
98} 103}
@@ -110,9 +115,9 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
110 115
111 page = virt_to_page(addr); 116 page = virt_to_page(addr);
112 117
113 ClearPageNosave(page); 118 swsusp_unset_page_forbidden(page);
114 if (clear_nosave_free) 119 if (clear_nosave_free)
115 ClearPageNosaveFree(page); 120 swsusp_unset_page_free(page);
116 121
117 __free_page(page); 122 __free_page(page);
118} 123}
@@ -224,11 +229,6 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
224 * of type unsigned long each). It also contains the pfns that 229 * of type unsigned long each). It also contains the pfns that
225 * correspond to the start and end of the represented memory area and 230 * correspond to the start and end of the represented memory area and
226 * the number of bit chunks in the block. 231 * the number of bit chunks in the block.
227 *
228 * NOTE: Memory bitmaps are used for two types of operations only:
229 * "set a bit" and "find the next bit set". Moreover, the searching
230 * is always carried out after all of the "set a bit" operations
231 * on given bitmap.
232 */ 232 */
233 233
234#define BM_END_OF_MAP (~0UL) 234#define BM_END_OF_MAP (~0UL)
@@ -443,15 +443,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
443} 443}
444 444
445/** 445/**
446 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds 446 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
448 * of @bm->cur_zone_bm are updated. 448 * of @bm->cur_zone_bm are updated.
449 *
450 * If the bit cannot be set, the function returns -EINVAL .
451 */ 449 */
452 450
453static int 451static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
454memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 452 void **addr, unsigned int *bit_nr)
455{ 453{
456 struct zone_bitmap *zone_bm; 454 struct zone_bitmap *zone_bm;
457 struct bm_block *bb; 455 struct bm_block *bb;
@@ -463,8 +461,8 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
463 /* We don't assume that the zones are sorted by pfns */ 461 /* We don't assume that the zones are sorted by pfns */
464 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { 462 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
465 zone_bm = zone_bm->next; 463 zone_bm = zone_bm->next;
466 if (unlikely(!zone_bm)) 464
467 return -EINVAL; 465 BUG_ON(!zone_bm);
468 } 466 }
469 bm->cur.zone_bm = zone_bm; 467 bm->cur.zone_bm = zone_bm;
470 } 468 }
@@ -475,13 +473,40 @@ memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
475 473
476 while (pfn >= bb->end_pfn) { 474 while (pfn >= bb->end_pfn) {
477 bb = bb->next; 475 bb = bb->next;
478 if (unlikely(!bb)) 476
479 return -EINVAL; 477 BUG_ON(!bb);
480 } 478 }
481 zone_bm->cur_block = bb; 479 zone_bm->cur_block = bb;
482 pfn -= bb->start_pfn; 480 pfn -= bb->start_pfn;
483 set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); 481 *bit_nr = pfn % BM_BITS_PER_CHUNK;
484 return 0; 482 *addr = bb->data + pfn / BM_BITS_PER_CHUNK;
483}
484
485static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
486{
487 void *addr;
488 unsigned int bit;
489
490 memory_bm_find_bit(bm, pfn, &addr, &bit);
491 set_bit(bit, addr);
492}
493
494static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
495{
496 void *addr;
497 unsigned int bit;
498
499 memory_bm_find_bit(bm, pfn, &addr, &bit);
500 clear_bit(bit, addr);
501}
502
503static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
504{
505 void *addr;
506 unsigned int bit;
507
508 memory_bm_find_bit(bm, pfn, &addr, &bit);
509 return test_bit(bit, addr);
485} 510}
486 511
487/* Two auxiliary functions for memory_bm_next_pfn */ 512/* Two auxiliary functions for memory_bm_next_pfn */
@@ -564,6 +589,199 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
564} 589}
565 590
566/** 591/**
592 * This structure represents a range of page frames the contents of which
593 * should not be saved during the suspend.
594 */
595
596struct nosave_region {
597 struct list_head list;
598 unsigned long start_pfn;
599 unsigned long end_pfn;
600};
601
602static LIST_HEAD(nosave_regions);
603
604/**
605 * register_nosave_region - register a range of page frames the contents
606 * of which should not be saved during the suspend (to be used in the early
607 * initialization code)
608 */
609
610void __init
611register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
612{
613 struct nosave_region *region;
614
615 if (start_pfn >= end_pfn)
616 return;
617
618 if (!list_empty(&nosave_regions)) {
619 /* Try to extend the previous region (they should be sorted) */
620 region = list_entry(nosave_regions.prev,
621 struct nosave_region, list);
622 if (region->end_pfn == start_pfn) {
623 region->end_pfn = end_pfn;
624 goto Report;
625 }
626 }
627 /* This allocation cannot fail */
628 region = alloc_bootmem_low(sizeof(struct nosave_region));
629 region->start_pfn = start_pfn;
630 region->end_pfn = end_pfn;
631 list_add_tail(&region->list, &nosave_regions);
632 Report:
633 printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
634 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
635}
636
637/*
638 * Set bits in this map correspond to the page frames the contents of which
639 * should not be saved during the suspend.
640 */
641static struct memory_bitmap *forbidden_pages_map;
642
643/* Set bits in this map correspond to free page frames. */
644static struct memory_bitmap *free_pages_map;
645
646/*
647 * Each page frame allocated for creating the image is marked by setting the
648 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
649 */
650
651void swsusp_set_page_free(struct page *page)
652{
653 if (free_pages_map)
654 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
655}
656
657static int swsusp_page_is_free(struct page *page)
658{
659 return free_pages_map ?
660 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
661}
662
663void swsusp_unset_page_free(struct page *page)
664{
665 if (free_pages_map)
666 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
667}
668
669static void swsusp_set_page_forbidden(struct page *page)
670{
671 if (forbidden_pages_map)
672 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
673}
674
675int swsusp_page_is_forbidden(struct page *page)
676{
677 return forbidden_pages_map ?
678 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
679}
680
681static void swsusp_unset_page_forbidden(struct page *page)
682{
683 if (forbidden_pages_map)
684 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
685}
686
687/**
688 * mark_nosave_pages - set bits corresponding to the page frames the
689 * contents of which should not be saved in a given bitmap.
690 */
691
692static void mark_nosave_pages(struct memory_bitmap *bm)
693{
694 struct nosave_region *region;
695
696 if (list_empty(&nosave_regions))
697 return;
698
699 list_for_each_entry(region, &nosave_regions, list) {
700 unsigned long pfn;
701
702 printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
703 region->start_pfn << PAGE_SHIFT,
704 region->end_pfn << PAGE_SHIFT);
705
706 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
707 memory_bm_set_bit(bm, pfn);
708 }
709}
710
711/**
712 * create_basic_memory_bitmaps - create bitmaps needed for marking page
713 * frames that should not be saved and free page frames. The pointers
714 * forbidden_pages_map and free_pages_map are only modified if everything
715 * goes well, because we don't want the bits to be used before both bitmaps
716 * are set up.
717 */
718
719int create_basic_memory_bitmaps(void)
720{
721 struct memory_bitmap *bm1, *bm2;
722 int error = 0;
723
724 BUG_ON(forbidden_pages_map || free_pages_map);
725
726 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
727 if (!bm1)
728 return -ENOMEM;
729
730 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
731 if (error)
732 goto Free_first_object;
733
734 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
735 if (!bm2)
736 goto Free_first_bitmap;
737
738 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
739 if (error)
740 goto Free_second_object;
741
742 forbidden_pages_map = bm1;
743 free_pages_map = bm2;
744 mark_nosave_pages(forbidden_pages_map);
745
746 printk("swsusp: Basic memory bitmaps created\n");
747
748 return 0;
749
750 Free_second_object:
751 kfree(bm2);
752 Free_first_bitmap:
753 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
754 Free_first_object:
755 kfree(bm1);
756 return -ENOMEM;
757}
758
759/**
760 * free_basic_memory_bitmaps - free memory bitmaps allocated by
761 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
762 * so that the bitmaps themselves are not referred to while they are being
763 * freed.
764 */
765
766void free_basic_memory_bitmaps(void)
767{
768 struct memory_bitmap *bm1, *bm2;
769
770 BUG_ON(!(forbidden_pages_map && free_pages_map));
771
772 bm1 = forbidden_pages_map;
773 bm2 = free_pages_map;
774 forbidden_pages_map = NULL;
775 free_pages_map = NULL;
776 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
777 kfree(bm1);
778 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
779 kfree(bm2);
780
781 printk("swsusp: Basic memory bitmaps freed\n");
782}
783
784/**
567 * snapshot_additional_pages - estimate the number of additional pages 785 * snapshot_additional_pages - estimate the number of additional pages
568 * be needed for setting up the suspend image data structures for given 786 * be needed for setting up the suspend image data structures for given
569 * zone (usually the returned value is greater than the exact number) 787 * zone (usually the returned value is greater than the exact number)
@@ -615,7 +833,8 @@ static struct page *saveable_highmem_page(unsigned long pfn)
615 833
616 BUG_ON(!PageHighMem(page)); 834 BUG_ON(!PageHighMem(page));
617 835
618 if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page)) 836 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
837 PageReserved(page))
619 return NULL; 838 return NULL;
620 839
621 return page; 840 return page;
@@ -670,7 +889,7 @@ static struct page *saveable_page(unsigned long pfn)
670 889
671 BUG_ON(PageHighMem(page)); 890 BUG_ON(PageHighMem(page));
672 891
673 if (PageNosave(page) || PageNosaveFree(page)) 892 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
674 return NULL; 893 return NULL;
675 894
676 if (PageReserved(page) && pfn_is_nosave(pfn)) 895 if (PageReserved(page) && pfn_is_nosave(pfn))
@@ -810,9 +1029,10 @@ void swsusp_free(void)
810 if (pfn_valid(pfn)) { 1029 if (pfn_valid(pfn)) {
811 struct page *page = pfn_to_page(pfn); 1030 struct page *page = pfn_to_page(pfn);
812 1031
813 if (PageNosave(page) && PageNosaveFree(page)) { 1032 if (swsusp_page_is_forbidden(page) &&
814 ClearPageNosave(page); 1033 swsusp_page_is_free(page)) {
815 ClearPageNosaveFree(page); 1034 swsusp_unset_page_forbidden(page);
1035 swsusp_unset_page_free(page);
816 __free_page(page); 1036 __free_page(page);
817 } 1037 }
818 } 1038 }
@@ -1135,7 +1355,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
1135 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1355 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1136 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1356 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1137 if (pfn_valid(pfn)) 1357 if (pfn_valid(pfn))
1138 ClearPageNosaveFree(pfn_to_page(pfn)); 1358 swsusp_unset_page_free(pfn_to_page(pfn));
1139 } 1359 }
1140 1360
1141 /* Mark pages that correspond to the "original" pfns as "unsafe" */ 1361 /* Mark pages that correspond to the "original" pfns as "unsafe" */
@@ -1144,7 +1364,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
1144 pfn = memory_bm_next_pfn(bm); 1364 pfn = memory_bm_next_pfn(bm);
1145 if (likely(pfn != BM_END_OF_MAP)) { 1365 if (likely(pfn != BM_END_OF_MAP)) {
1146 if (likely(pfn_valid(pfn))) 1366 if (likely(pfn_valid(pfn)))
1147 SetPageNosaveFree(pfn_to_page(pfn)); 1367 swsusp_set_page_free(pfn_to_page(pfn));
1148 else 1368 else
1149 return -EFAULT; 1369 return -EFAULT;
1150 } 1370 }
@@ -1310,14 +1530,14 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1310 struct page *page; 1530 struct page *page;
1311 1531
1312 page = alloc_page(__GFP_HIGHMEM); 1532 page = alloc_page(__GFP_HIGHMEM);
1313 if (!PageNosaveFree(page)) { 1533 if (!swsusp_page_is_free(page)) {
1314 /* The page is "safe", set its bit the bitmap */ 1534 /* The page is "safe", set its bit the bitmap */
1315 memory_bm_set_bit(bm, page_to_pfn(page)); 1535 memory_bm_set_bit(bm, page_to_pfn(page));
1316 safe_highmem_pages++; 1536 safe_highmem_pages++;
1317 } 1537 }
1318 /* Mark the page as allocated */ 1538 /* Mark the page as allocated */
1319 SetPageNosave(page); 1539 swsusp_set_page_forbidden(page);
1320 SetPageNosaveFree(page); 1540 swsusp_set_page_free(page);
1321 } 1541 }
1322 memory_bm_position_reset(bm); 1542 memory_bm_position_reset(bm);
1323 safe_highmem_bm = bm; 1543 safe_highmem_bm = bm;
@@ -1349,7 +1569,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1349 struct highmem_pbe *pbe; 1569 struct highmem_pbe *pbe;
1350 void *kaddr; 1570 void *kaddr;
1351 1571
1352 if (PageNosave(page) && PageNosaveFree(page)) { 1572 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1353 /* We have allocated the "original" page frame and we can 1573 /* We have allocated the "original" page frame and we can
1354 * use it directly to store the loaded page. 1574 * use it directly to store the loaded page.
1355 */ 1575 */
@@ -1511,14 +1731,14 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1511 error = -ENOMEM; 1731 error = -ENOMEM;
1512 goto Free; 1732 goto Free;
1513 } 1733 }
1514 if (!PageNosaveFree(virt_to_page(lp))) { 1734 if (!swsusp_page_is_free(virt_to_page(lp))) {
1515 /* The page is "safe", add it to the list */ 1735 /* The page is "safe", add it to the list */
1516 lp->next = safe_pages_list; 1736 lp->next = safe_pages_list;
1517 safe_pages_list = lp; 1737 safe_pages_list = lp;
1518 } 1738 }
1519 /* Mark the page as allocated */ 1739 /* Mark the page as allocated */
1520 SetPageNosave(virt_to_page(lp)); 1740 swsusp_set_page_forbidden(virt_to_page(lp));
1521 SetPageNosaveFree(virt_to_page(lp)); 1741 swsusp_set_page_free(virt_to_page(lp));
1522 nr_pages--; 1742 nr_pages--;
1523 } 1743 }
1524 /* Free the reserved safe pages so that chain_alloc() can use them */ 1744 /* Free the reserved safe pages so that chain_alloc() can use them */
@@ -1547,7 +1767,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1547 if (PageHighMem(page)) 1767 if (PageHighMem(page))
1548 return get_highmem_page_buffer(page, ca); 1768 return get_highmem_page_buffer(page, ca);
1549 1769
1550 if (PageNosave(page) && PageNosaveFree(page)) 1770 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
1551 /* We have allocated the "original" page frame and we can 1771 /* We have allocated the "original" page frame and we can
1552 * use it directly to store the loaded page. 1772 * use it directly to store the loaded page.
1553 */ 1773 */