aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c210
1 files changed, 73 insertions, 137 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index e1ab28b9b217..c05f46e7348f 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -73,6 +73,14 @@
73 73
74#include "power.h" 74#include "power.h"
75 75
76#ifdef CONFIG_HIGHMEM
77int save_highmem(void);
78int restore_highmem(void);
79#else
80static int save_highmem(void) { return 0; }
81static int restore_highmem(void) { return 0; }
82#endif
83
76#define CIPHER "aes" 84#define CIPHER "aes"
77#define MAXKEY 32 85#define MAXKEY 32
78#define MAXIV 32 86#define MAXIV 32
@@ -500,6 +508,26 @@ static int write_pagedir(void)
500} 508}
501 509
502/** 510/**
511 * enough_swap - Make sure we have enough swap to save the image.
512 *
513 * Returns TRUE or FALSE after checking the total amount of swap
514 * space avaiable.
515 *
516 * FIXME: si_swapinfo(&i) returns all swap devices information.
517 * We should only consider resume_device.
518 */
519
520static int enough_swap(unsigned int nr_pages)
521{
522 struct sysinfo i;
523
524 si_swapinfo(&i);
525 pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
526 return i.freeswap > (nr_pages + PAGES_FOR_IO +
527 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
528}
529
530/**
503 * write_suspend_image - Write entire image and metadata. 531 * write_suspend_image - Write entire image and metadata.
504 * 532 *
505 */ 533 */
@@ -507,6 +535,11 @@ static int write_suspend_image(void)
507{ 535{
508 int error; 536 int error;
509 537
538 if (!enough_swap(nr_copy_pages)) {
539 printk(KERN_ERR "swsusp: Not enough free swap\n");
540 return -ENOSPC;
541 }
542
510 init_header(); 543 init_header();
511 if ((error = data_write())) 544 if ((error = data_write()))
512 goto FreeData; 545 goto FreeData;
@@ -526,27 +559,6 @@ static int write_suspend_image(void)
526 goto Done; 559 goto Done;
527} 560}
528 561
529/**
530 * enough_swap - Make sure we have enough swap to save the image.
531 *
532 * Returns TRUE or FALSE after checking the total amount of swap
533 * space avaiable.
534 *
535 * FIXME: si_swapinfo(&i) returns all swap devices information.
536 * We should only consider resume_device.
537 */
538
539int enough_swap(unsigned int nr_pages)
540{
541 struct sysinfo i;
542
543 si_swapinfo(&i);
544 pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
545 return i.freeswap > (nr_pages + PAGES_FOR_IO +
546 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
547}
548
549
550/* It is important _NOT_ to umount filesystems at this point. We want 562/* It is important _NOT_ to umount filesystems at this point. We want
551 * them synced (in case something goes wrong) but we DO not want to mark 563 * them synced (in case something goes wrong) but we DO not want to mark
552 * filesystem clean: it is not. (And it does not matter, if we resume 564 * filesystem clean: it is not. (And it does not matter, if we resume
@@ -556,12 +568,15 @@ int swsusp_write(void)
556{ 568{
557 int error; 569 int error;
558 570
571 if ((error = swsusp_swap_check())) {
572 printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n");
573 return error;
574 }
559 lock_swapdevices(); 575 lock_swapdevices();
560 error = write_suspend_image(); 576 error = write_suspend_image();
561 /* This will unlock ignored swap devices since writing is finished */ 577 /* This will unlock ignored swap devices since writing is finished */
562 lock_swapdevices(); 578 lock_swapdevices();
563 return error; 579 return error;
564
565} 580}
566 581
567 582
@@ -569,6 +584,7 @@ int swsusp_write(void)
569int swsusp_suspend(void) 584int swsusp_suspend(void)
570{ 585{
571 int error; 586 int error;
587
572 if ((error = arch_prepare_suspend())) 588 if ((error = arch_prepare_suspend()))
573 return error; 589 return error;
574 local_irq_disable(); 590 local_irq_disable();
@@ -580,15 +596,12 @@ int swsusp_suspend(void)
580 */ 596 */
581 if ((error = device_power_down(PMSG_FREEZE))) { 597 if ((error = device_power_down(PMSG_FREEZE))) {
582 printk(KERN_ERR "Some devices failed to power down, aborting suspend\n"); 598 printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
583 local_irq_enable(); 599 goto Enable_irqs;
584 return error;
585 } 600 }
586 601
587 if ((error = swsusp_swap_check())) { 602 if ((error = save_highmem())) {
588 printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n"); 603 printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
589 device_power_up(); 604 goto Restore_highmem;
590 local_irq_enable();
591 return error;
592 } 605 }
593 606
594 save_processor_state(); 607 save_processor_state();
@@ -596,8 +609,10 @@ int swsusp_suspend(void)
596 printk(KERN_ERR "Error %d suspending\n", error); 609 printk(KERN_ERR "Error %d suspending\n", error);
597 /* Restore control flow magically appears here */ 610 /* Restore control flow magically appears here */
598 restore_processor_state(); 611 restore_processor_state();
612Restore_highmem:
599 restore_highmem(); 613 restore_highmem();
600 device_power_up(); 614 device_power_up();
615Enable_irqs:
601 local_irq_enable(); 616 local_irq_enable();
602 return error; 617 return error;
603} 618}
@@ -629,127 +644,43 @@ int swsusp_resume(void)
629} 644}
630 645
631/** 646/**
632 * On resume, for storing the PBE list and the image, 647 * mark_unsafe_pages - mark the pages that cannot be used for storing
633 * we can only use memory pages that do not conflict with the pages 648 * the image during resume, because they conflict with the pages that
634 * which had been used before suspend. 649 * had been used before suspend
635 *
636 * We don't know which pages are usable until we allocate them.
637 *
638 * Allocated but unusable (ie eaten) memory pages are marked so that
639 * swsusp_free() can release them
640 */
641
642unsigned long get_safe_page(gfp_t gfp_mask)
643{
644 unsigned long m;
645
646 do {
647 m = get_zeroed_page(gfp_mask);
648 if (m && PageNosaveFree(virt_to_page(m)))
649 /* This is for swsusp_free() */
650 SetPageNosave(virt_to_page(m));
651 } while (m && PageNosaveFree(virt_to_page(m)));
652 if (m) {
653 /* This is for swsusp_free() */
654 SetPageNosave(virt_to_page(m));
655 SetPageNosaveFree(virt_to_page(m));
656 }
657 return m;
658}
659
660/**
661 * check_pagedir - We ensure here that pages that the PBEs point to
662 * won't collide with pages where we're going to restore from the loaded
663 * pages later
664 */
665
666static int check_pagedir(struct pbe *pblist)
667{
668 struct pbe *p;
669
670 /* This is necessary, so that we can free allocated pages
671 * in case of failure
672 */
673 for_each_pbe (p, pblist)
674 p->address = 0UL;
675
676 for_each_pbe (p, pblist) {
677 p->address = get_safe_page(GFP_ATOMIC);
678 if (!p->address)
679 return -ENOMEM;
680 }
681 return 0;
682}
683
684/**
685 * swsusp_pagedir_relocate - It is possible, that some memory pages
686 * occupied by the list of PBEs collide with pages where we're going to
687 * restore from the loaded pages later. We relocate them here.
688 */ 650 */
689 651
690static struct pbe *swsusp_pagedir_relocate(struct pbe *pblist) 652static void mark_unsafe_pages(struct pbe *pblist)
691{ 653{
692 struct zone *zone; 654 struct zone *zone;
693 unsigned long zone_pfn; 655 unsigned long zone_pfn;
694 struct pbe *pbpage, *tail, *p; 656 struct pbe *p;
695 void *m;
696 int rel = 0;
697 657
698 if (!pblist) /* a sanity check */ 658 if (!pblist) /* a sanity check */
699 return NULL; 659 return;
700
701 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
702 swsusp_info.pagedir_pages);
703 660
704 /* Clear page flags */ 661 /* Clear page flags */
705
706 for_each_zone (zone) { 662 for_each_zone (zone) {
707 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 663 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
708 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) 664 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
709 ClearPageNosaveFree(pfn_to_page(zone_pfn + 665 ClearPageNosaveFree(pfn_to_page(zone_pfn +
710 zone->zone_start_pfn)); 666 zone->zone_start_pfn));
711 } 667 }
712 668
713 /* Mark orig addresses */ 669 /* Mark orig addresses */
714
715 for_each_pbe (p, pblist) 670 for_each_pbe (p, pblist)
716 SetPageNosaveFree(virt_to_page(p->orig_address)); 671 SetPageNosaveFree(virt_to_page(p->orig_address));
717 672
718 tail = pblist + PB_PAGE_SKIP; 673}
719
720 /* Relocate colliding pages */
721
722 for_each_pb_page (pbpage, pblist) {
723 if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
724 m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
725 if (!m)
726 return NULL;
727 memcpy(m, (void *)pbpage, PAGE_SIZE);
728 if (pbpage == pblist)
729 pblist = (struct pbe *)m;
730 else
731 tail->next = (struct pbe *)m;
732 pbpage = (struct pbe *)m;
733
734 /* We have to link the PBEs again */
735 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
736 if (p->next) /* needed to save the end */
737 p->next = p + 1;
738
739 rel++;
740 }
741 tail = pbpage + PB_PAGE_SKIP;
742 }
743 674
744 /* This is for swsusp_free() */ 675static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
745 for_each_pb_page (pbpage, pblist) { 676{
746 SetPageNosave(virt_to_page(pbpage)); 677 /* We assume both lists contain the same number of elements */
747 SetPageNosaveFree(virt_to_page(pbpage)); 678 while (src) {
679 dst->orig_address = src->orig_address;
680 dst->swap_address = src->swap_address;
681 dst = dst->next;
682 src = src->next;
748 } 683 }
749
750 printk("swsusp: Relocated %d pages\n", rel);
751
752 return pblist;
753} 684}
754 685
755/* 686/*
@@ -888,7 +819,7 @@ static int check_sig(void)
888 * Reset swap signature now. 819 * Reset swap signature now.
889 */ 820 */
890 error = bio_write_page(0, &swsusp_header); 821 error = bio_write_page(0, &swsusp_header);
891 } else { 822 } else {
892 return -EINVAL; 823 return -EINVAL;
893 } 824 }
894 if (!error) 825 if (!error)
@@ -990,20 +921,25 @@ static int read_suspend_image(void)
990 int error = 0; 921 int error = 0;
991 struct pbe *p; 922 struct pbe *p;
992 923
993 if (!(p = alloc_pagedir(nr_copy_pages))) 924 if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0)))
994 return -ENOMEM; 925 return -ENOMEM;
995 926
996 if ((error = read_pagedir(p))) 927 if ((error = read_pagedir(p)))
997 return error; 928 return error;
998
999 create_pbe_list(p, nr_copy_pages); 929 create_pbe_list(p, nr_copy_pages);
1000 930 mark_unsafe_pages(p);
1001 if (!(pagedir_nosave = swsusp_pagedir_relocate(p))) 931 pagedir_nosave = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
932 if (pagedir_nosave) {
933 create_pbe_list(pagedir_nosave, nr_copy_pages);
934 copy_page_backup_list(pagedir_nosave, p);
935 }
936 free_pagedir(p);
937 if (!pagedir_nosave)
1002 return -ENOMEM; 938 return -ENOMEM;
1003 939
1004 /* Allocate memory for the image and read the data from swap */ 940 /* Allocate memory for the image and read the data from swap */
1005 941
1006 error = check_pagedir(pagedir_nosave); 942 error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1);
1007 943
1008 if (!error) 944 if (!error)
1009 error = data_read(pagedir_nosave); 945 error = data_read(pagedir_nosave);