diff options
Diffstat (limited to 'kernel/power')
-rw-r--r-- | kernel/power/power.h | 3 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 62 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 57 |
3 files changed, 52 insertions, 70 deletions
diff --git a/kernel/power/power.h b/kernel/power/power.h index d4fd96a135ab..c98923e13e75 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -66,7 +66,8 @@ extern asmlinkage int swsusp_arch_suspend(void); | |||
66 | extern asmlinkage int swsusp_arch_resume(void); | 66 | extern asmlinkage int swsusp_arch_resume(void); |
67 | 67 | ||
68 | extern int restore_highmem(void); | 68 | extern int restore_highmem(void); |
69 | extern struct pbe * alloc_pagedir(unsigned nr_pages); | 69 | extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); |
70 | extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); | 70 | extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); |
71 | extern void swsusp_free(void); | 71 | extern void swsusp_free(void); |
72 | extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); | ||
72 | extern int enough_swap(unsigned nr_pages); | 73 | extern int enough_swap(unsigned nr_pages); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 723f5179883e..96cc3e21e97d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -269,9 +269,30 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) | |||
269 | pr_debug("create_pbe_list(): initialized %d PBEs\n", num); | 269 | pr_debug("create_pbe_list(): initialized %d PBEs\n", num); |
270 | } | 270 | } |
271 | 271 | ||
272 | static void *alloc_image_page(void) | 272 | /** |
273 | * @safe_needed - on resume, for storing the PBE list and the image, | ||
274 | * we can only use memory pages that do not conflict with the pages | ||
275 | * which had been used before suspend. | ||
276 | * | ||
277 | * The unsafe pages are marked with the PG_nosave_free flag | ||
278 | * | ||
279 | * Allocated but unusable (ie eaten) memory pages should be marked | ||
280 | * so that swsusp_free() can release them | ||
281 | */ | ||
282 | |||
283 | static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) | ||
273 | { | 284 | { |
274 | void *res = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); | 285 | void *res; |
286 | |||
287 | if (safe_needed) | ||
288 | do { | ||
289 | res = (void *)get_zeroed_page(gfp_mask); | ||
290 | if (res && PageNosaveFree(virt_to_page(res))) | ||
291 | /* This is for swsusp_free() */ | ||
292 | SetPageNosave(virt_to_page(res)); | ||
293 | } while (res && PageNosaveFree(virt_to_page(res))); | ||
294 | else | ||
295 | res = (void *)get_zeroed_page(gfp_mask); | ||
275 | if (res) { | 296 | if (res) { |
276 | SetPageNosave(virt_to_page(res)); | 297 | SetPageNosave(virt_to_page(res)); |
277 | SetPageNosaveFree(virt_to_page(res)); | 298 | SetPageNosaveFree(virt_to_page(res)); |
@@ -279,6 +300,11 @@ static void *alloc_image_page(void) | |||
279 | return res; | 300 | return res; |
280 | } | 301 | } |
281 | 302 | ||
303 | unsigned long get_safe_page(gfp_t gfp_mask) | ||
304 | { | ||
305 | return (unsigned long)alloc_image_page(gfp_mask, 1); | ||
306 | } | ||
307 | |||
282 | /** | 308 | /** |
283 | * alloc_pagedir - Allocate the page directory. | 309 | * alloc_pagedir - Allocate the page directory. |
284 | * | 310 | * |
@@ -292,7 +318,7 @@ static void *alloc_image_page(void) | |||
292 | * On each page we set up a list of struct_pbe elements. | 318 | * On each page we set up a list of struct_pbe elements. |
293 | */ | 319 | */ |
294 | 320 | ||
295 | struct pbe *alloc_pagedir(unsigned int nr_pages) | 321 | struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed) |
296 | { | 322 | { |
297 | unsigned int num; | 323 | unsigned int num; |
298 | struct pbe *pblist, *pbe; | 324 | struct pbe *pblist, *pbe; |
@@ -301,12 +327,12 @@ struct pbe *alloc_pagedir(unsigned int nr_pages) | |||
301 | return NULL; | 327 | return NULL; |
302 | 328 | ||
303 | pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); | 329 | pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); |
304 | pblist = alloc_image_page(); | 330 | pblist = alloc_image_page(gfp_mask, safe_needed); |
305 | /* FIXME: rewrite this ugly loop */ | 331 | /* FIXME: rewrite this ugly loop */ |
306 | for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; | 332 | for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; |
307 | pbe = pbe->next, num += PBES_PER_PAGE) { | 333 | pbe = pbe->next, num += PBES_PER_PAGE) { |
308 | pbe += PB_PAGE_SKIP; | 334 | pbe += PB_PAGE_SKIP; |
309 | pbe->next = alloc_image_page(); | 335 | pbe->next = alloc_image_page(gfp_mask, safe_needed); |
310 | } | 336 | } |
311 | if (!pbe) { /* get_zeroed_page() failed */ | 337 | if (!pbe) { /* get_zeroed_page() failed */ |
312 | free_pagedir(pblist); | 338 | free_pagedir(pblist); |
@@ -354,24 +380,32 @@ static int enough_free_mem(unsigned int nr_pages) | |||
354 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); | 380 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); |
355 | } | 381 | } |
356 | 382 | ||
383 | int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) | ||
384 | { | ||
385 | struct pbe *p; | ||
386 | |||
387 | for_each_pbe (p, pblist) { | ||
388 | p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); | ||
389 | if (!p->address) | ||
390 | return -ENOMEM; | ||
391 | } | ||
392 | return 0; | ||
393 | } | ||
357 | 394 | ||
358 | static struct pbe *swsusp_alloc(unsigned int nr_pages) | 395 | static struct pbe *swsusp_alloc(unsigned int nr_pages) |
359 | { | 396 | { |
360 | struct pbe *pblist, *p; | 397 | struct pbe *pblist; |
361 | 398 | ||
362 | if (!(pblist = alloc_pagedir(nr_pages))) { | 399 | if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) { |
363 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); | 400 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); |
364 | return NULL; | 401 | return NULL; |
365 | } | 402 | } |
366 | create_pbe_list(pblist, nr_pages); | 403 | create_pbe_list(pblist, nr_pages); |
367 | 404 | ||
368 | for_each_pbe (p, pblist) { | 405 | if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { |
369 | p->address = (unsigned long)alloc_image_page(); | 406 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); |
370 | if (!p->address) { | 407 | swsusp_free(); |
371 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); | 408 | return NULL; |
372 | swsusp_free(); | ||
373 | return NULL; | ||
374 | } | ||
375 | } | 409 | } |
376 | 410 | ||
377 | return pblist; | 411 | return pblist; |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index e1ab28b9b217..a456ffe7a3c8 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -629,59 +629,6 @@ int swsusp_resume(void) | |||
629 | } | 629 | } |
630 | 630 | ||
631 | /** | 631 | /** |
632 | * On resume, for storing the PBE list and the image, | ||
633 | * we can only use memory pages that do not conflict with the pages | ||
634 | * which had been used before suspend. | ||
635 | * | ||
636 | * We don't know which pages are usable until we allocate them. | ||
637 | * | ||
638 | * Allocated but unusable (ie eaten) memory pages are marked so that | ||
639 | * swsusp_free() can release them | ||
640 | */ | ||
641 | |||
642 | unsigned long get_safe_page(gfp_t gfp_mask) | ||
643 | { | ||
644 | unsigned long m; | ||
645 | |||
646 | do { | ||
647 | m = get_zeroed_page(gfp_mask); | ||
648 | if (m && PageNosaveFree(virt_to_page(m))) | ||
649 | /* This is for swsusp_free() */ | ||
650 | SetPageNosave(virt_to_page(m)); | ||
651 | } while (m && PageNosaveFree(virt_to_page(m))); | ||
652 | if (m) { | ||
653 | /* This is for swsusp_free() */ | ||
654 | SetPageNosave(virt_to_page(m)); | ||
655 | SetPageNosaveFree(virt_to_page(m)); | ||
656 | } | ||
657 | return m; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * check_pagedir - We ensure here that pages that the PBEs point to | ||
662 | * won't collide with pages where we're going to restore from the loaded | ||
663 | * pages later | ||
664 | */ | ||
665 | |||
666 | static int check_pagedir(struct pbe *pblist) | ||
667 | { | ||
668 | struct pbe *p; | ||
669 | |||
670 | /* This is necessary, so that we can free allocated pages | ||
671 | * in case of failure | ||
672 | */ | ||
673 | for_each_pbe (p, pblist) | ||
674 | p->address = 0UL; | ||
675 | |||
676 | for_each_pbe (p, pblist) { | ||
677 | p->address = get_safe_page(GFP_ATOMIC); | ||
678 | if (!p->address) | ||
679 | return -ENOMEM; | ||
680 | } | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * swsusp_pagedir_relocate - It is possible, that some memory pages | 632 | * swsusp_pagedir_relocate - It is possible, that some memory pages |
686 | * occupied by the list of PBEs collide with pages where we're going to | 633 | * occupied by the list of PBEs collide with pages where we're going to |
687 | * restore from the loaded pages later. We relocate them here. | 634 | * restore from the loaded pages later. We relocate them here. |
@@ -990,7 +937,7 @@ static int read_suspend_image(void) | |||
990 | int error = 0; | 937 | int error = 0; |
991 | struct pbe *p; | 938 | struct pbe *p; |
992 | 939 | ||
993 | if (!(p = alloc_pagedir(nr_copy_pages))) | 940 | if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0))) |
994 | return -ENOMEM; | 941 | return -ENOMEM; |
995 | 942 | ||
996 | if ((error = read_pagedir(p))) | 943 | if ((error = read_pagedir(p))) |
@@ -1003,7 +950,7 @@ static int read_suspend_image(void) | |||
1003 | 950 | ||
1004 | /* Allocate memory for the image and read the data from swap */ | 951 | /* Allocate memory for the image and read the data from swap */ |
1005 | 952 | ||
1006 | error = check_pagedir(pagedir_nosave); | 953 | error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1); |
1007 | 954 | ||
1008 | if (!error) | 955 | if (!error) |
1009 | error = data_read(pagedir_nosave); | 956 | error = data_read(pagedir_nosave); |