aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c569
1 files changed, 45 insertions, 524 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d5c45676442..12db1d2ad61f 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1,11 +1,10 @@
1/* 1/*
2 * linux/kernel/power/swsusp.c 2 * linux/kernel/power/swsusp.c
3 * 3 *
4 * This file is to realize architecture-independent 4 * This file provides code to write suspend image to swap and read it back.
5 * machine suspend feature using pretty near only high-level routines
6 * 5 *
7 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> 6 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
8 * Copyright (C) 1998,2001-2004 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
9 * 8 *
10 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
11 * 10 *
@@ -47,11 +46,7 @@
47#include <linux/utsname.h> 46#include <linux/utsname.h>
48#include <linux/version.h> 47#include <linux/version.h>
49#include <linux/delay.h> 48#include <linux/delay.h>
50#include <linux/reboot.h>
51#include <linux/bitops.h> 49#include <linux/bitops.h>
52#include <linux/vt_kern.h>
53#include <linux/kbd_kern.h>
54#include <linux/keyboard.h>
55#include <linux/spinlock.h> 50#include <linux/spinlock.h>
56#include <linux/genhd.h> 51#include <linux/genhd.h>
57#include <linux/kernel.h> 52#include <linux/kernel.h>
@@ -63,10 +58,8 @@
63#include <linux/swapops.h> 58#include <linux/swapops.h>
64#include <linux/bootmem.h> 59#include <linux/bootmem.h>
65#include <linux/syscalls.h> 60#include <linux/syscalls.h>
66#include <linux/console.h>
67#include <linux/highmem.h> 61#include <linux/highmem.h>
68#include <linux/bio.h> 62#include <linux/bio.h>
69#include <linux/mount.h>
70 63
71#include <asm/uaccess.h> 64#include <asm/uaccess.h>
72#include <asm/mmu_context.h> 65#include <asm/mmu_context.h>
@@ -84,16 +77,10 @@
84#define MAXKEY 32 77#define MAXKEY 32
85#define MAXIV 32 78#define MAXIV 32
86 79
87/* References to section boundaries */
88extern const void __nosave_begin, __nosave_end;
89
90/* Variables to be preserved over suspend */
91static int nr_copy_pages_check;
92
93extern char resume_file[]; 80extern char resume_file[];
94 81
95/* Local variables that should not be affected by save */ 82/* Local variables that should not be affected by save */
96static unsigned int nr_copy_pages __nosavedata = 0; 83unsigned int nr_copy_pages __nosavedata = 0;
97 84
98/* Suspend pagedir is allocated before final copy, therefore it 85/* Suspend pagedir is allocated before final copy, therefore it
99 must be freed after resume 86 must be freed after resume
@@ -109,7 +96,7 @@ static unsigned int nr_copy_pages __nosavedata = 0;
109 MMU hardware. 96 MMU hardware.
110 */ 97 */
111suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; 98suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
112static suspend_pagedir_t *pagedir_save; 99suspend_pagedir_t *pagedir_save;
113 100
114#define SWSUSP_SIG "S1SUSPEND" 101#define SWSUSP_SIG "S1SUSPEND"
115 102
@@ -124,12 +111,6 @@ static struct swsusp_header {
124static struct swsusp_info swsusp_info; 111static struct swsusp_info swsusp_info;
125 112
126/* 113/*
127 * XXX: We try to keep some more pages free so that I/O operations succeed
128 * without paging. Might this be more?
129 */
130#define PAGES_FOR_IO 512
131
132/*
133 * Saving part... 114 * Saving part...
134 */ 115 */
135 116
@@ -552,346 +533,6 @@ static int write_suspend_image(void)
552 goto Done; 533 goto Done;
553} 534}
554 535
555
556#ifdef CONFIG_HIGHMEM
557struct highmem_page {
558 char *data;
559 struct page *page;
560 struct highmem_page *next;
561};
562
563static struct highmem_page *highmem_copy;
564
565static int save_highmem_zone(struct zone *zone)
566{
567 unsigned long zone_pfn;
568 mark_free_pages(zone);
569 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
570 struct page *page;
571 struct highmem_page *save;
572 void *kaddr;
573 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
574
575 if (!(pfn%1000))
576 printk(".");
577 if (!pfn_valid(pfn))
578 continue;
579 page = pfn_to_page(pfn);
580 /*
581 * This condition results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations. This should be
583 * corrected eventually when the cases giving rise to this
584 * are better understood.
585 */
586 if (PageReserved(page)) {
587 printk("highmem reserved page?!\n");
588 continue;
589 }
590 BUG_ON(PageNosave(page));
591 if (PageNosaveFree(page))
592 continue;
593 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
594 if (!save)
595 return -ENOMEM;
596 save->next = highmem_copy;
597 save->page = page;
598 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
599 if (!save->data) {
600 kfree(save);
601 return -ENOMEM;
602 }
603 kaddr = kmap_atomic(page, KM_USER0);
604 memcpy(save->data, kaddr, PAGE_SIZE);
605 kunmap_atomic(kaddr, KM_USER0);
606 highmem_copy = save;
607 }
608 return 0;
609}
610#endif /* CONFIG_HIGHMEM */
611
612
613static int save_highmem(void)
614{
615#ifdef CONFIG_HIGHMEM
616 struct zone *zone;
617 int res = 0;
618
619 pr_debug("swsusp: Saving Highmem\n");
620 for_each_zone (zone) {
621 if (is_highmem(zone))
622 res = save_highmem_zone(zone);
623 if (res)
624 return res;
625 }
626#endif
627 return 0;
628}
629
630static int restore_highmem(void)
631{
632#ifdef CONFIG_HIGHMEM
633 printk("swsusp: Restoring Highmem\n");
634 while (highmem_copy) {
635 struct highmem_page *save = highmem_copy;
636 void *kaddr;
637 highmem_copy = save->next;
638
639 kaddr = kmap_atomic(save->page, KM_USER0);
640 memcpy(kaddr, save->data, PAGE_SIZE);
641 kunmap_atomic(kaddr, KM_USER0);
642 free_page((long) save->data);
643 kfree(save);
644 }
645#endif
646 return 0;
647}
648
649
650static int pfn_is_nosave(unsigned long pfn)
651{
652 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
653 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
654 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
655}
656
657/**
658 * saveable - Determine whether a page should be cloned or not.
659 * @pfn: The page
660 *
661 * We save a page if it's Reserved, and not in the range of pages
662 * statically defined as 'unsaveable', or if it isn't reserved, and
663 * isn't part of a free chunk of pages.
664 */
665
666static int saveable(struct zone * zone, unsigned long * zone_pfn)
667{
668 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
669 struct page * page;
670
671 if (!pfn_valid(pfn))
672 return 0;
673
674 page = pfn_to_page(pfn);
675 BUG_ON(PageReserved(page) && PageNosave(page));
676 if (PageNosave(page))
677 return 0;
678 if (PageReserved(page) && pfn_is_nosave(pfn)) {
679 pr_debug("[nosave pfn 0x%lx]", pfn);
680 return 0;
681 }
682 if (PageNosaveFree(page))
683 return 0;
684
685 return 1;
686}
687
688static void count_data_pages(void)
689{
690 struct zone *zone;
691 unsigned long zone_pfn;
692
693 nr_copy_pages = 0;
694
695 for_each_zone (zone) {
696 if (is_highmem(zone))
697 continue;
698 mark_free_pages(zone);
699 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
700 nr_copy_pages += saveable(zone, &zone_pfn);
701 }
702}
703
704
705static void copy_data_pages(void)
706{
707 struct zone *zone;
708 unsigned long zone_pfn;
709 struct pbe * pbe = pagedir_nosave;
710
711 pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages);
712 for_each_zone (zone) {
713 if (is_highmem(zone))
714 continue;
715 mark_free_pages(zone);
716 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
717 if (saveable(zone, &zone_pfn)) {
718 struct page * page;
719 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
720 BUG_ON(!pbe);
721 pbe->orig_address = (long) page_address(page);
722 /* copy_page is not usable for copying task structs. */
723 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
724 pbe = pbe->next;
725 }
726 }
727 }
728 BUG_ON(pbe);
729}
730
731
732/**
733 * calc_nr - Determine the number of pages needed for a pbe list.
734 */
735
736static int calc_nr(int nr_copy)
737{
738 return nr_copy + (nr_copy+PBES_PER_PAGE-2)/(PBES_PER_PAGE-1);
739}
740
741/**
742 * free_pagedir - free pages allocated with alloc_pagedir()
743 */
744
745static inline void free_pagedir(struct pbe *pblist)
746{
747 struct pbe *pbe;
748
749 while (pblist) {
750 pbe = (pblist + PB_PAGE_SKIP)->next;
751 free_page((unsigned long)pblist);
752 pblist = pbe;
753 }
754}
755
756/**
757 * fill_pb_page - Create a list of PBEs on a given memory page
758 */
759
760static inline void fill_pb_page(struct pbe *pbpage)
761{
762 struct pbe *p;
763
764 p = pbpage;
765 pbpage += PB_PAGE_SKIP;
766 do
767 p->next = p + 1;
768 while (++p < pbpage);
769}
770
771/**
772 * create_pbe_list - Create a list of PBEs on top of a given chain
773 * of memory pages allocated with alloc_pagedir()
774 */
775
776static void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
777{
778 struct pbe *pbpage, *p;
779 unsigned num = PBES_PER_PAGE;
780
781 for_each_pb_page (pbpage, pblist) {
782 if (num >= nr_pages)
783 break;
784
785 fill_pb_page(pbpage);
786 num += PBES_PER_PAGE;
787 }
788 if (pbpage) {
789 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
790 p->next = p + 1;
791 p->next = NULL;
792 }
793 pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
794}
795
796/**
797 * alloc_pagedir - Allocate the page directory.
798 *
799 * First, determine exactly how many pages we need and
800 * allocate them.
801 *
802 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
803 * struct pbe elements (pbes) and the last element in the page points
804 * to the next page.
805 *
806 * On each page we set up a list of struct_pbe elements.
807 */
808
809static struct pbe * alloc_pagedir(unsigned nr_pages)
810{
811 unsigned num;
812 struct pbe *pblist, *pbe;
813
814 if (!nr_pages)
815 return NULL;
816
817 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
818 pblist = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
819 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
820 pbe = pbe->next, num += PBES_PER_PAGE) {
821 pbe += PB_PAGE_SKIP;
822 pbe->next = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
823 }
824 if (!pbe) { /* get_zeroed_page() failed */
825 free_pagedir(pblist);
826 pblist = NULL;
827 }
828 return pblist;
829}
830
831/**
832 * free_image_pages - Free pages allocated for snapshot
833 */
834
835static void free_image_pages(void)
836{
837 struct pbe * p;
838
839 for_each_pbe (p, pagedir_save) {
840 if (p->address) {
841 ClearPageNosave(virt_to_page(p->address));
842 free_page(p->address);
843 p->address = 0;
844 }
845 }
846}
847
848/**
849 * alloc_image_pages - Allocate pages for the snapshot.
850 */
851
852static int alloc_image_pages(void)
853{
854 struct pbe * p;
855
856 for_each_pbe (p, pagedir_save) {
857 p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
858 if (!p->address)
859 return -ENOMEM;
860 SetPageNosave(virt_to_page(p->address));
861 }
862 return 0;
863}
864
865/* Free pages we allocated for suspend. Suspend pages are alocated
866 * before atomic copy, so we need to free them after resume.
867 */
868void swsusp_free(void)
869{
870 BUG_ON(PageNosave(virt_to_page(pagedir_save)));
871 BUG_ON(PageNosaveFree(virt_to_page(pagedir_save)));
872 free_image_pages();
873 free_pagedir(pagedir_save);
874}
875
876
877/**
878 * enough_free_mem - Make sure we enough free memory to snapshot.
879 *
880 * Returns TRUE or FALSE after checking the number of available
881 * free pages.
882 */
883
884static int enough_free_mem(void)
885{
886 if (nr_free_pages() < (nr_copy_pages + PAGES_FOR_IO)) {
887 pr_debug("swsusp: Not enough free pages: Have %d\n",
888 nr_free_pages());
889 return 0;
890 }
891 return 1;
892}
893
894
895/** 536/**
896 * enough_swap - Make sure we have enough swap to save the image. 537 * enough_swap - Make sure we have enough swap to save the image.
897 * 538 *
@@ -902,87 +543,14 @@ static int enough_free_mem(void)
902 * We should only consider resume_device. 543 * We should only consider resume_device.
903 */ 544 */
904 545
905static int enough_swap(void) 546int enough_swap(unsigned nr_pages)
906{ 547{
907 struct sysinfo i; 548 struct sysinfo i;
908 549
909 si_swapinfo(&i); 550 si_swapinfo(&i);
910 if (i.freeswap < (nr_copy_pages + PAGES_FOR_IO)) { 551 pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
911 pr_debug("swsusp: Not enough swap. Need %ld\n",i.freeswap); 552 return i.freeswap > (nr_pages + PAGES_FOR_IO +
912 return 0; 553 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
913 }
914 return 1;
915}
916
917static int swsusp_alloc(void)
918{
919 int error;
920
921 pagedir_nosave = NULL;
922 nr_copy_pages = calc_nr(nr_copy_pages);
923 nr_copy_pages_check = nr_copy_pages;
924
925 pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
926 nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
927
928 if (!enough_free_mem())
929 return -ENOMEM;
930
931 if (!enough_swap())
932 return -ENOSPC;
933
934 if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
935 !!(nr_copy_pages % PBES_PER_PAGE))
936 return -ENOSPC;
937
938 if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
939 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
940 return -ENOMEM;
941 }
942 create_pbe_list(pagedir_save, nr_copy_pages);
943 pagedir_nosave = pagedir_save;
944 if ((error = alloc_image_pages())) {
945 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
946 swsusp_free();
947 return error;
948 }
949
950 return 0;
951}
952
953static int suspend_prepare_image(void)
954{
955 int error;
956
957 pr_debug("swsusp: critical section: \n");
958 if (save_highmem()) {
959 printk(KERN_CRIT "Suspend machine: Not enough free pages for highmem\n");
960 restore_highmem();
961 return -ENOMEM;
962 }
963
964 drain_local_pages();
965 count_data_pages();
966 printk("swsusp: Need to copy %u pages\n", nr_copy_pages);
967
968 error = swsusp_alloc();
969 if (error)
970 return error;
971
972 /* During allocating of suspend pagedir, new cold pages may appear.
973 * Kill them.
974 */
975 drain_local_pages();
976 copy_data_pages();
977
978 /*
979 * End of critical section. From now on, we can write to memory,
980 * but we should not touch disk. This specially means we must _not_
981 * touch swap space! Except we must write out our image of course.
982 */
983
984 printk("swsusp: critical section/: done (%d pages copied)\n", nr_copy_pages );
985 return 0;
986} 554}
987 555
988 556
@@ -994,7 +562,7 @@ static int suspend_prepare_image(void)
994int swsusp_write(void) 562int swsusp_write(void)
995{ 563{
996 int error; 564 int error;
997 device_resume(); 565
998 lock_swapdevices(); 566 lock_swapdevices();
999 error = write_suspend_image(); 567 error = write_suspend_image();
1000 /* This will unlock ignored swap devices since writing is finished */ 568 /* This will unlock ignored swap devices since writing is finished */
@@ -1004,14 +572,6 @@ int swsusp_write(void)
1004} 572}
1005 573
1006 574
1007extern asmlinkage int swsusp_arch_suspend(void);
1008extern asmlinkage int swsusp_arch_resume(void);
1009
1010
1011asmlinkage int swsusp_save(void)
1012{
1013 return suspend_prepare_image();
1014}
1015 575
1016int swsusp_suspend(void) 576int swsusp_suspend(void)
1017{ 577{
@@ -1043,7 +603,6 @@ int swsusp_suspend(void)
1043 printk(KERN_ERR "Error %d suspending\n", error); 603 printk(KERN_ERR "Error %d suspending\n", error);
1044 /* Restore control flow magically appears here */ 604 /* Restore control flow magically appears here */
1045 restore_processor_state(); 605 restore_processor_state();
1046 BUG_ON (nr_copy_pages_check != nr_copy_pages);
1047 restore_highmem(); 606 restore_highmem();
1048 device_power_up(); 607 device_power_up();
1049 local_irq_enable(); 608 local_irq_enable();
@@ -1063,6 +622,11 @@ int swsusp_resume(void)
1063 * execution continues at place where swsusp_arch_suspend was called 622 * execution continues at place where swsusp_arch_suspend was called
1064 */ 623 */
1065 BUG_ON(!error); 624 BUG_ON(!error);
625 /* The only reason why swsusp_arch_resume() can fail is memory being
626 * very tight, so we have to free it as soon as we can to avoid
627 * subsequent failures
628 */
629 swsusp_free();
1066 restore_processor_state(); 630 restore_processor_state();
1067 restore_highmem(); 631 restore_highmem();
1068 touch_softlockup_watchdog(); 632 touch_softlockup_watchdog();
@@ -1078,54 +642,28 @@ int swsusp_resume(void)
1078 * 642 *
1079 * We don't know which pages are usable until we allocate them. 643 * We don't know which pages are usable until we allocate them.
1080 * 644 *
1081 * Allocated but unusable (ie eaten) memory pages are linked together 645 * Allocated but unusable (ie eaten) memory pages are marked so that
1082 * to create a list, so that we can free them easily 646 * swsusp_free() can release them
1083 *
1084 * We could have used a type other than (void *)
1085 * for this purpose, but ...
1086 */ 647 */
1087static void **eaten_memory = NULL;
1088
1089static inline void eat_page(void *page)
1090{
1091 void **c;
1092
1093 c = eaten_memory;
1094 eaten_memory = page;
1095 *eaten_memory = c;
1096}
1097 648
1098unsigned long get_usable_page(unsigned gfp_mask) 649unsigned long get_safe_page(gfp_t gfp_mask)
1099{ 650{
1100 unsigned long m; 651 unsigned long m;
1101 652
1102 m = get_zeroed_page(gfp_mask); 653 do {
1103 while (!PageNosaveFree(virt_to_page(m))) {
1104 eat_page((void *)m);
1105 m = get_zeroed_page(gfp_mask); 654 m = get_zeroed_page(gfp_mask);
1106 if (!m) 655 if (m && PageNosaveFree(virt_to_page(m)))
1107 break; 656 /* This is for swsusp_free() */
657 SetPageNosave(virt_to_page(m));
658 } while (m && PageNosaveFree(virt_to_page(m)));
659 if (m) {
660 /* This is for swsusp_free() */
661 SetPageNosave(virt_to_page(m));
662 SetPageNosaveFree(virt_to_page(m));
1108 } 663 }
1109 return m; 664 return m;
1110} 665}
1111 666
1112void free_eaten_memory(void)
1113{
1114 unsigned long m;
1115 void **c;
1116 int i = 0;
1117
1118 c = eaten_memory;
1119 while (c) {
1120 m = (unsigned long)c;
1121 c = *c;
1122 free_page(m);
1123 i++;
1124 }
1125 eaten_memory = NULL;
1126 pr_debug("swsusp: %d unused pages freed\n", i);
1127}
1128
1129/** 667/**
1130 * check_pagedir - We ensure here that pages that the PBEs point to 668 * check_pagedir - We ensure here that pages that the PBEs point to
1131 * won't collide with pages where we're going to restore from the loaded 669 * won't collide with pages where we're going to restore from the loaded
@@ -1143,7 +681,7 @@ static int check_pagedir(struct pbe *pblist)
1143 p->address = 0UL; 681 p->address = 0UL;
1144 682
1145 for_each_pbe (p, pblist) { 683 for_each_pbe (p, pblist) {
1146 p->address = get_usable_page(GFP_ATOMIC); 684 p->address = get_safe_page(GFP_ATOMIC);
1147 if (!p->address) 685 if (!p->address)
1148 return -ENOMEM; 686 return -ENOMEM;
1149 } 687 }
@@ -1162,7 +700,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1162 unsigned long zone_pfn; 700 unsigned long zone_pfn;
1163 struct pbe *pbpage, *tail, *p; 701 struct pbe *pbpage, *tail, *p;
1164 void *m; 702 void *m;
1165 int rel = 0, error = 0; 703 int rel = 0;
1166 704
1167 if (!pblist) /* a sanity check */ 705 if (!pblist) /* a sanity check */
1168 return NULL; 706 return NULL;
@@ -1170,41 +708,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1170 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", 708 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
1171 swsusp_info.pagedir_pages); 709 swsusp_info.pagedir_pages);
1172 710
1173 /* Set page flags */ 711 /* Clear page flags */
1174 712
1175 for_each_zone (zone) { 713 for_each_zone (zone) {
1176 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 714 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
1177 SetPageNosaveFree(pfn_to_page(zone_pfn + 715 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
716 ClearPageNosaveFree(pfn_to_page(zone_pfn +
1178 zone->zone_start_pfn)); 717 zone->zone_start_pfn));
1179 } 718 }
1180 719
1181 /* Clear orig addresses */ 720 /* Mark orig addresses */
1182 721
1183 for_each_pbe (p, pblist) 722 for_each_pbe (p, pblist)
1184 ClearPageNosaveFree(virt_to_page(p->orig_address)); 723 SetPageNosaveFree(virt_to_page(p->orig_address));
1185 724
1186 tail = pblist + PB_PAGE_SKIP; 725 tail = pblist + PB_PAGE_SKIP;
1187 726
1188 /* Relocate colliding pages */ 727 /* Relocate colliding pages */
1189 728
1190 for_each_pb_page (pbpage, pblist) { 729 for_each_pb_page (pbpage, pblist) {
1191 if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { 730 if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
1192 m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); 731 m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
1193 if (!m) { 732 if (!m)
1194 error = -ENOMEM; 733 return NULL;
1195 break;
1196 }
1197 memcpy(m, (void *)pbpage, PAGE_SIZE); 734 memcpy(m, (void *)pbpage, PAGE_SIZE);
1198 if (pbpage == pblist) 735 if (pbpage == pblist)
1199 pblist = (struct pbe *)m; 736 pblist = (struct pbe *)m;
1200 else 737 else
1201 tail->next = (struct pbe *)m; 738 tail->next = (struct pbe *)m;
1202
1203 eat_page((void *)pbpage);
1204 pbpage = (struct pbe *)m; 739 pbpage = (struct pbe *)m;
1205 740
1206 /* We have to link the PBEs again */ 741 /* We have to link the PBEs again */
1207
1208 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) 742 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
1209 if (p->next) /* needed to save the end */ 743 if (p->next) /* needed to save the end */
1210 p->next = p + 1; 744 p->next = p + 1;
@@ -1214,15 +748,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1214 tail = pbpage + PB_PAGE_SKIP; 748 tail = pbpage + PB_PAGE_SKIP;
1215 } 749 }
1216 750
1217 if (error) { 751 /* This is for swsusp_free() */
1218 printk("\nswsusp: Out of memory\n\n"); 752 for_each_pb_page (pbpage, pblist) {
1219 free_pagedir(pblist); 753 SetPageNosave(virt_to_page(pbpage));
1220 free_eaten_memory(); 754 SetPageNosaveFree(virt_to_page(pbpage));
1221 pblist = NULL; 755 }
1222 /* Is this even worth handling? It should never ever happen, and we 756
1223 have just lost user's state, anyway... */ 757 printk("swsusp: Relocated %d pages\n", rel);
1224 } else
1225 printk("swsusp: Relocated %d pages\n", rel);
1226 758
1227 return pblist; 759 return pblist;
1228} 760}
@@ -1440,9 +972,7 @@ static int read_pagedir(struct pbe *pblist)
1440 break; 972 break;
1441 } 973 }
1442 974
1443 if (error) 975 if (!error)
1444 free_pagedir(pblist);
1445 else
1446 BUG_ON(i != swsusp_info.pagedir_pages); 976 BUG_ON(i != swsusp_info.pagedir_pages);
1447 977
1448 return error; 978 return error;
@@ -1485,15 +1015,6 @@ static int read_suspend_image(void)
1485 if (!error) 1015 if (!error)
1486 error = data_read(pagedir_nosave); 1016 error = data_read(pagedir_nosave);
1487 1017
1488 if (error) { /* We fail cleanly */
1489 free_eaten_memory();
1490 for_each_pbe (p, pagedir_nosave)
1491 if (p->address) {
1492 free_page(p->address);
1493 p->address = 0UL;
1494 }
1495 free_pagedir(pagedir_nosave);
1496 }
1497 return error; 1018 return error;
1498} 1019}
1499 1020