aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c110
1 files changed, 36 insertions, 74 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index f6abfdb0a02a..50667f4f3a2b 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -629,6 +629,11 @@ int swsusp_resume(void)
629 * execution continues at place where swsusp_arch_suspend was called 629 * execution continues at place where swsusp_arch_suspend was called
630 */ 630 */
631 BUG_ON(!error); 631 BUG_ON(!error);
632 /* The only reason why swsusp_arch_resume() can fail is memory being
633 * very tight, so we have to free it as soon as we can to avoid
634 * subsequent failures
635 */
636 swsusp_free();
632 restore_processor_state(); 637 restore_processor_state();
633 restore_highmem(); 638 restore_highmem();
634 touch_softlockup_watchdog(); 639 touch_softlockup_watchdog();
@@ -644,54 +649,28 @@ int swsusp_resume(void)
644 * 649 *
645 * We don't know which pages are usable until we allocate them. 650 * We don't know which pages are usable until we allocate them.
646 * 651 *
647 * Allocated but unusable (ie eaten) memory pages are linked together 652 * Allocated but unusable (ie eaten) memory pages are marked so that
648 * to create a list, so that we can free them easily 653 * swsusp_free() can release them
649 *
650 * We could have used a type other than (void *)
651 * for this purpose, but ...
652 */ 654 */
653static void **eaten_memory = NULL;
654 655
655static inline void eat_page(void *page) 656unsigned long get_safe_page(gfp_t gfp_mask)
656{
657 void **c;
658
659 c = eaten_memory;
660 eaten_memory = page;
661 *eaten_memory = c;
662}
663
664unsigned long get_usable_page(gfp_t gfp_mask)
665{ 657{
666 unsigned long m; 658 unsigned long m;
667 659
668 m = get_zeroed_page(gfp_mask); 660 do {
669 while (!PageNosaveFree(virt_to_page(m))) {
670 eat_page((void *)m);
671 m = get_zeroed_page(gfp_mask); 661 m = get_zeroed_page(gfp_mask);
672 if (!m) 662 if (m && PageNosaveFree(virt_to_page(m)))
673 break; 663 /* This is for swsusp_free() */
664 SetPageNosave(virt_to_page(m));
665 } while (m && PageNosaveFree(virt_to_page(m)));
666 if (m) {
667 /* This is for swsusp_free() */
668 SetPageNosave(virt_to_page(m));
669 SetPageNosaveFree(virt_to_page(m));
674 } 670 }
675 return m; 671 return m;
676} 672}
677 673
678void free_eaten_memory(void)
679{
680 unsigned long m;
681 void **c;
682 int i = 0;
683
684 c = eaten_memory;
685 while (c) {
686 m = (unsigned long)c;
687 c = *c;
688 free_page(m);
689 i++;
690 }
691 eaten_memory = NULL;
692 pr_debug("swsusp: %d unused pages freed\n", i);
693}
694
695/** 674/**
696 * check_pagedir - We ensure here that pages that the PBEs point to 675 * check_pagedir - We ensure here that pages that the PBEs point to
697 * won't collide with pages where we're going to restore from the loaded 676 * won't collide with pages where we're going to restore from the loaded
@@ -709,7 +688,7 @@ static int check_pagedir(struct pbe *pblist)
709 p->address = 0UL; 688 p->address = 0UL;
710 689
711 for_each_pbe (p, pblist) { 690 for_each_pbe (p, pblist) {
712 p->address = get_usable_page(GFP_ATOMIC); 691 p->address = get_safe_page(GFP_ATOMIC);
713 if (!p->address) 692 if (!p->address)
714 return -ENOMEM; 693 return -ENOMEM;
715 } 694 }
@@ -728,7 +707,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
728 unsigned long zone_pfn; 707 unsigned long zone_pfn;
729 struct pbe *pbpage, *tail, *p; 708 struct pbe *pbpage, *tail, *p;
730 void *m; 709 void *m;
731 int rel = 0, error = 0; 710 int rel = 0;
732 711
733 if (!pblist) /* a sanity check */ 712 if (!pblist) /* a sanity check */
734 return NULL; 713 return NULL;
@@ -736,41 +715,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
736 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", 715 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
737 swsusp_info.pagedir_pages); 716 swsusp_info.pagedir_pages);
738 717
739 /* Set page flags */ 718 /* Clear page flags */
740 719
741 for_each_zone (zone) { 720 for_each_zone (zone) {
742 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 721 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
743 SetPageNosaveFree(pfn_to_page(zone_pfn + 722 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
723 ClearPageNosaveFree(pfn_to_page(zone_pfn +
744 zone->zone_start_pfn)); 724 zone->zone_start_pfn));
745 } 725 }
746 726
747 /* Clear orig addresses */ 727 /* Mark orig addresses */
748 728
749 for_each_pbe (p, pblist) 729 for_each_pbe (p, pblist)
750 ClearPageNosaveFree(virt_to_page(p->orig_address)); 730 SetPageNosaveFree(virt_to_page(p->orig_address));
751 731
752 tail = pblist + PB_PAGE_SKIP; 732 tail = pblist + PB_PAGE_SKIP;
753 733
754 /* Relocate colliding pages */ 734 /* Relocate colliding pages */
755 735
756 for_each_pb_page (pbpage, pblist) { 736 for_each_pb_page (pbpage, pblist) {
757 if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { 737 if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
758 m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); 738 m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
759 if (!m) { 739 if (!m)
760 error = -ENOMEM; 740 return NULL;
761 break;
762 }
763 memcpy(m, (void *)pbpage, PAGE_SIZE); 741 memcpy(m, (void *)pbpage, PAGE_SIZE);
764 if (pbpage == pblist) 742 if (pbpage == pblist)
765 pblist = (struct pbe *)m; 743 pblist = (struct pbe *)m;
766 else 744 else
767 tail->next = (struct pbe *)m; 745 tail->next = (struct pbe *)m;
768
769 eat_page((void *)pbpage);
770 pbpage = (struct pbe *)m; 746 pbpage = (struct pbe *)m;
771 747
772 /* We have to link the PBEs again */ 748 /* We have to link the PBEs again */
773
774 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) 749 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
775 if (p->next) /* needed to save the end */ 750 if (p->next) /* needed to save the end */
776 p->next = p + 1; 751 p->next = p + 1;
@@ -780,15 +755,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
780 tail = pbpage + PB_PAGE_SKIP; 755 tail = pbpage + PB_PAGE_SKIP;
781 } 756 }
782 757
783 if (error) { 758 /* This is for swsusp_free() */
784 printk("\nswsusp: Out of memory\n\n"); 759 for_each_pb_page (pbpage, pblist) {
785 free_pagedir(pblist); 760 SetPageNosave(virt_to_page(pbpage));
786 free_eaten_memory(); 761 SetPageNosaveFree(virt_to_page(pbpage));
787 pblist = NULL; 762 }
788 /* Is this even worth handling? It should never ever happen, and we 763
789 have just lost user's state, anyway... */ 764 printk("swsusp: Relocated %d pages\n", rel);
790 } else
791 printk("swsusp: Relocated %d pages\n", rel);
792 765
793 return pblist; 766 return pblist;
794} 767}
@@ -1006,9 +979,7 @@ static int read_pagedir(struct pbe *pblist)
1006 break; 979 break;
1007 } 980 }
1008 981
1009 if (error) 982 if (!error)
1010 free_pagedir(pblist);
1011 else
1012 BUG_ON(i != swsusp_info.pagedir_pages); 983 BUG_ON(i != swsusp_info.pagedir_pages);
1013 984
1014 return error; 985 return error;
@@ -1051,15 +1022,6 @@ static int read_suspend_image(void)
1051 if (!error) 1022 if (!error)
1052 error = data_read(pagedir_nosave); 1023 error = data_read(pagedir_nosave);
1053 1024
1054 if (error) { /* We fail cleanly */
1055 free_eaten_memory();
1056 for_each_pbe (p, pagedir_nosave)
1057 if (p->address) {
1058 free_page(p->address);
1059 p->address = 0UL;
1060 }
1061 free_pagedir(pagedir_nosave);
1062 }
1063 return error; 1025 return error;
1064} 1026}
1065 1027