aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/kernel/suspend.c84
-rw-r--r--include/linux/suspend.h3
-rw-r--r--kernel/power/disk.c14
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/swsusp.c110
6 files changed, 65 insertions, 150 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index 02516823f514..fd2bef780882 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -147,57 +147,7 @@ extern int restore_image(void);
147 147
148pgd_t *temp_level4_pgt; 148pgd_t *temp_level4_pgt;
149 149
150static void **pages; 150static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
151
152static inline void *__add_page(void)
153{
154 void **c;
155
156 c = (void **)get_usable_page(GFP_ATOMIC);
157 if (c) {
158 *c = pages;
159 pages = c;
160 }
161 return c;
162}
163
164static inline void *__next_page(void)
165{
166 void **c;
167
168 c = pages;
169 if (c) {
170 pages = *c;
171 *c = NULL;
172 }
173 return c;
174}
175
176/*
177 * Try to allocate as many usable pages as needed and daisy chain them.
178 * If one allocation fails, free the pages allocated so far
179 */
180static int alloc_usable_pages(unsigned long n)
181{
182 void *p;
183
184 pages = NULL;
185 do
186 if (!__add_page())
187 break;
188 while (--n);
189 if (n) {
190 p = __next_page();
191 while (p) {
192 free_page((unsigned long)p);
193 p = __next_page();
194 }
195 return -ENOMEM;
196 }
197 return 0;
198}
199
200static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
201{ 151{
202 long i, j; 152 long i, j;
203 153
@@ -211,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
211 if (paddr >= end) 161 if (paddr >= end)
212 break; 162 break;
213 163
214 pmd = (pmd_t *)__next_page(); 164 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
165 if (!pmd)
166 return -ENOMEM;
215 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
216 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 168 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
217 unsigned long pe; 169 unsigned long pe;
@@ -223,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
223 set_pmd(pmd, __pmd(pe)); 175 set_pmd(pmd, __pmd(pe));
224 } 176 }
225 } 177 }
178 return 0;
226} 179}
227 180
228static void set_up_temporary_mappings(void) 181static int set_up_temporary_mappings(void)
229{ 182{
230 unsigned long start, end, next; 183 unsigned long start, end, next;
184 int error;
231 185
232 temp_level4_pgt = (pgd_t *)__next_page(); 186 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
187 if (!temp_level4_pgt)
188 return -ENOMEM;
233 189
234 /* It is safe to reuse the original kernel mapping */ 190 /* It is safe to reuse the original kernel mapping */
235 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 191 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@@ -240,29 +196,27 @@ static void set_up_temporary_mappings(void)
240 end = (unsigned long)pfn_to_kaddr(end_pfn); 196 end = (unsigned long)pfn_to_kaddr(end_pfn);
241 197
242 for (; start < end; start = next) { 198 for (; start < end; start = next) {
243 pud_t *pud = (pud_t *)__next_page(); 199 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
200 if (!pud)
201 return -ENOMEM;
244 next = start + PGDIR_SIZE; 202 next = start + PGDIR_SIZE;
245 if (next > end) 203 if (next > end)
246 next = end; 204 next = end;
247 res_phys_pud_init(pud, __pa(start), __pa(next)); 205 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
206 return error;
248 set_pgd(temp_level4_pgt + pgd_index(start), 207 set_pgd(temp_level4_pgt + pgd_index(start),
249 mk_kernel_pgd(__pa(pud))); 208 mk_kernel_pgd(__pa(pud)));
250 } 209 }
210 return 0;
251} 211}
252 212
253int swsusp_arch_resume(void) 213int swsusp_arch_resume(void)
254{ 214{
255 unsigned long n; 215 int error;
256 216
257 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
258 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
259 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
260 if (alloc_usable_pages(n)) {
261 free_eaten_memory();
262 return -ENOMEM;
263 }
264 /* We have got enough memory and from now on we cannot recover */ 217 /* We have got enough memory and from now on we cannot recover */
265 set_up_temporary_mappings(); 218 if ((error = set_up_temporary_mappings()))
219 return error;
266 restore_image(); 220 restore_image();
267 return 0; 221 return 0;
268} 222}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 380915e9563d..a61c04f804b2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,8 +71,7 @@ void restore_processor_state(void);
71struct saved_context; 71struct saved_context;
72void __save_processor_state(struct saved_context *ctxt); 72void __save_processor_state(struct saved_context *ctxt);
73void __restore_processor_state(struct saved_context *ctxt); 73void __restore_processor_state(struct saved_context *ctxt);
74extern unsigned long get_usable_page(gfp_t gfp_mask); 74unsigned long get_safe_page(gfp_t gfp_mask);
75extern void free_eaten_memory(void);
76 75
77/* 76/*
78 * XXX: We try to keep some more pages free so that I/O operations succeed 77 * XXX: We try to keep some more pages free so that I/O operations succeed
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 761956e813f5..44ef5e799df0 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -30,7 +30,6 @@ extern int swsusp_check(void);
30extern int swsusp_read(void); 30extern int swsusp_read(void);
31extern void swsusp_close(void); 31extern void swsusp_close(void);
32extern int swsusp_resume(void); 32extern int swsusp_resume(void);
33extern int swsusp_free(void);
34 33
35 34
36static int noresume = 0; 35static int noresume = 0;
@@ -252,14 +251,17 @@ static int software_resume(void)
252 251
253 pr_debug("PM: Reading swsusp image.\n"); 252 pr_debug("PM: Reading swsusp image.\n");
254 253
255 if ((error = swsusp_read())) 254 if ((error = swsusp_read())) {
256 goto Cleanup; 255 swsusp_free();
256 goto Thaw;
257 }
257 258
258 pr_debug("PM: Preparing devices for restore.\n"); 259 pr_debug("PM: Preparing devices for restore.\n");
259 260
260 if ((error = device_suspend(PMSG_FREEZE))) { 261 if ((error = device_suspend(PMSG_FREEZE))) {
261 printk("Some devices failed to suspend\n"); 262 printk("Some devices failed to suspend\n");
262 goto Free; 263 swsusp_free();
264 goto Thaw;
263 } 265 }
264 266
265 mb(); 267 mb();
@@ -268,9 +270,7 @@ static int software_resume(void)
268 swsusp_resume(); 270 swsusp_resume();
269 pr_debug("PM: Restore failed, recovering.n"); 271 pr_debug("PM: Restore failed, recovering.n");
270 device_resume(); 272 device_resume();
271 Free: 273 Thaw:
272 swsusp_free();
273 Cleanup:
274 unprepare_processes(); 274 unprepare_processes();
275 Done: 275 Done:
276 /* For success case, the suspend path will release the lock */ 276 /* For success case, the suspend path will release the lock */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 28afcb090149..d4fd96a135ab 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -66,7 +66,7 @@ extern asmlinkage int swsusp_arch_suspend(void);
66extern asmlinkage int swsusp_arch_resume(void); 66extern asmlinkage int swsusp_arch_resume(void);
67 67
68extern int restore_highmem(void); 68extern int restore_highmem(void);
69extern void free_pagedir(struct pbe *pblist);
70extern struct pbe * alloc_pagedir(unsigned nr_pages); 69extern struct pbe * alloc_pagedir(unsigned nr_pages);
71extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); 70extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
71extern void swsusp_free(void);
72extern int enough_swap(unsigned nr_pages); 72extern int enough_swap(unsigned nr_pages);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 03916cf3ff02..84e686bdb40b 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -240,7 +240,7 @@ static void copy_data_pages(struct pbe *pblist)
240 * free_pagedir - free pages allocated with alloc_pagedir() 240 * free_pagedir - free pages allocated with alloc_pagedir()
241 */ 241 */
242 242
243void free_pagedir(struct pbe *pblist) 243static void free_pagedir(struct pbe *pblist)
244{ 244{
245 struct pbe *pbe; 245 struct pbe *pbe;
246 246
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index f6abfdb0a02a..50667f4f3a2b 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -629,6 +629,11 @@ int swsusp_resume(void)
629 * execution continues at place where swsusp_arch_suspend was called 629 * execution continues at place where swsusp_arch_suspend was called
630 */ 630 */
631 BUG_ON(!error); 631 BUG_ON(!error);
632 /* The only reason why swsusp_arch_resume() can fail is memory being
633 * very tight, so we have to free it as soon as we can to avoid
634 * subsequent failures
635 */
636 swsusp_free();
632 restore_processor_state(); 637 restore_processor_state();
633 restore_highmem(); 638 restore_highmem();
634 touch_softlockup_watchdog(); 639 touch_softlockup_watchdog();
@@ -644,54 +649,28 @@ int swsusp_resume(void)
644 * 649 *
645 * We don't know which pages are usable until we allocate them. 650 * We don't know which pages are usable until we allocate them.
646 * 651 *
647 * Allocated but unusable (ie eaten) memory pages are linked together 652 * Allocated but unusable (ie eaten) memory pages are marked so that
648 * to create a list, so that we can free them easily 653 * swsusp_free() can release them
649 *
650 * We could have used a type other than (void *)
651 * for this purpose, but ...
652 */ 654 */
653static void **eaten_memory = NULL;
654 655
655static inline void eat_page(void *page) 656unsigned long get_safe_page(gfp_t gfp_mask)
656{
657 void **c;
658
659 c = eaten_memory;
660 eaten_memory = page;
661 *eaten_memory = c;
662}
663
664unsigned long get_usable_page(gfp_t gfp_mask)
665{ 657{
666 unsigned long m; 658 unsigned long m;
667 659
668 m = get_zeroed_page(gfp_mask); 660 do {
669 while (!PageNosaveFree(virt_to_page(m))) {
670 eat_page((void *)m);
671 m = get_zeroed_page(gfp_mask); 661 m = get_zeroed_page(gfp_mask);
672 if (!m) 662 if (m && PageNosaveFree(virt_to_page(m)))
673 break; 663 /* This is for swsusp_free() */
664 SetPageNosave(virt_to_page(m));
665 } while (m && PageNosaveFree(virt_to_page(m)));
666 if (m) {
667 /* This is for swsusp_free() */
668 SetPageNosave(virt_to_page(m));
669 SetPageNosaveFree(virt_to_page(m));
674 } 670 }
675 return m; 671 return m;
676} 672}
677 673
678void free_eaten_memory(void)
679{
680 unsigned long m;
681 void **c;
682 int i = 0;
683
684 c = eaten_memory;
685 while (c) {
686 m = (unsigned long)c;
687 c = *c;
688 free_page(m);
689 i++;
690 }
691 eaten_memory = NULL;
692 pr_debug("swsusp: %d unused pages freed\n", i);
693}
694
695/** 674/**
696 * check_pagedir - We ensure here that pages that the PBEs point to 675 * check_pagedir - We ensure here that pages that the PBEs point to
697 * won't collide with pages where we're going to restore from the loaded 676 * won't collide with pages where we're going to restore from the loaded
@@ -709,7 +688,7 @@ static int check_pagedir(struct pbe *pblist)
709 p->address = 0UL; 688 p->address = 0UL;
710 689
711 for_each_pbe (p, pblist) { 690 for_each_pbe (p, pblist) {
712 p->address = get_usable_page(GFP_ATOMIC); 691 p->address = get_safe_page(GFP_ATOMIC);
713 if (!p->address) 692 if (!p->address)
714 return -ENOMEM; 693 return -ENOMEM;
715 } 694 }
@@ -728,7 +707,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
728 unsigned long zone_pfn; 707 unsigned long zone_pfn;
729 struct pbe *pbpage, *tail, *p; 708 struct pbe *pbpage, *tail, *p;
730 void *m; 709 void *m;
731 int rel = 0, error = 0; 710 int rel = 0;
732 711
733 if (!pblist) /* a sanity check */ 712 if (!pblist) /* a sanity check */
734 return NULL; 713 return NULL;
@@ -736,41 +715,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
736 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", 715 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
737 swsusp_info.pagedir_pages); 716 swsusp_info.pagedir_pages);
738 717
739 /* Set page flags */ 718 /* Clear page flags */
740 719
741 for_each_zone (zone) { 720 for_each_zone (zone) {
742 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 721 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
743 SetPageNosaveFree(pfn_to_page(zone_pfn + 722 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
723 ClearPageNosaveFree(pfn_to_page(zone_pfn +
744 zone->zone_start_pfn)); 724 zone->zone_start_pfn));
745 } 725 }
746 726
747 /* Clear orig addresses */ 727 /* Mark orig addresses */
748 728
749 for_each_pbe (p, pblist) 729 for_each_pbe (p, pblist)
750 ClearPageNosaveFree(virt_to_page(p->orig_address)); 730 SetPageNosaveFree(virt_to_page(p->orig_address));
751 731
752 tail = pblist + PB_PAGE_SKIP; 732 tail = pblist + PB_PAGE_SKIP;
753 733
754 /* Relocate colliding pages */ 734 /* Relocate colliding pages */
755 735
756 for_each_pb_page (pbpage, pblist) { 736 for_each_pb_page (pbpage, pblist) {
757 if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { 737 if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
758 m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); 738 m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
759 if (!m) { 739 if (!m)
760 error = -ENOMEM; 740 return NULL;
761 break;
762 }
763 memcpy(m, (void *)pbpage, PAGE_SIZE); 741 memcpy(m, (void *)pbpage, PAGE_SIZE);
764 if (pbpage == pblist) 742 if (pbpage == pblist)
765 pblist = (struct pbe *)m; 743 pblist = (struct pbe *)m;
766 else 744 else
767 tail->next = (struct pbe *)m; 745 tail->next = (struct pbe *)m;
768
769 eat_page((void *)pbpage);
770 pbpage = (struct pbe *)m; 746 pbpage = (struct pbe *)m;
771 747
772 /* We have to link the PBEs again */ 748 /* We have to link the PBEs again */
773
774 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) 749 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
775 if (p->next) /* needed to save the end */ 750 if (p->next) /* needed to save the end */
776 p->next = p + 1; 751 p->next = p + 1;
@@ -780,15 +755,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
780 tail = pbpage + PB_PAGE_SKIP; 755 tail = pbpage + PB_PAGE_SKIP;
781 } 756 }
782 757
783 if (error) { 758 /* This is for swsusp_free() */
784 printk("\nswsusp: Out of memory\n\n"); 759 for_each_pb_page (pbpage, pblist) {
785 free_pagedir(pblist); 760 SetPageNosave(virt_to_page(pbpage));
786 free_eaten_memory(); 761 SetPageNosaveFree(virt_to_page(pbpage));
787 pblist = NULL; 762 }
788 /* Is this even worth handling? It should never ever happen, and we 763
789 have just lost user's state, anyway... */ 764 printk("swsusp: Relocated %d pages\n", rel);
790 } else
791 printk("swsusp: Relocated %d pages\n", rel);
792 765
793 return pblist; 766 return pblist;
794} 767}
@@ -1006,9 +979,7 @@ static int read_pagedir(struct pbe *pblist)
1006 break; 979 break;
1007 } 980 }
1008 981
1009 if (error) 982 if (!error)
1010 free_pagedir(pblist);
1011 else
1012 BUG_ON(i != swsusp_info.pagedir_pages); 983 BUG_ON(i != swsusp_info.pagedir_pages);
1013 984
1014 return error; 985 return error;
@@ -1051,15 +1022,6 @@ static int read_suspend_image(void)
1051 if (!error) 1022 if (!error)
1052 error = data_read(pagedir_nosave); 1023 error = data_read(pagedir_nosave);
1053 1024
1054 if (error) { /* We fail cleanly */
1055 free_eaten_memory();
1056 for_each_pbe (p, pagedir_nosave)
1057 if (p->address) {
1058 free_page(p->address);
1059 p->address = 0UL;
1060 }
1061 free_pagedir(pagedir_nosave);
1062 }
1063 return error; 1025 return error;
1064} 1026}
1065 1027