diff options
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r-- | kernel/power/swsusp.c | 576 |
1 files changed, 45 insertions, 531 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 016504ccfccf..12db1d2ad61f 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/power/swsusp.c | 2 | * linux/kernel/power/swsusp.c |
3 | * | 3 | * |
4 | * This file is to realize architecture-independent | 4 | * This file provides code to write suspend image to swap and read it back. |
5 | * machine suspend feature using pretty near only high-level routines | ||
6 | * | 5 | * |
7 | * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> | 6 | * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> |
8 | * Copyright (C) 1998,2001-2004 Pavel Machek <pavel@suse.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> |
9 | * | 8 | * |
10 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
11 | * | 10 | * |
@@ -47,11 +46,7 @@ | |||
47 | #include <linux/utsname.h> | 46 | #include <linux/utsname.h> |
48 | #include <linux/version.h> | 47 | #include <linux/version.h> |
49 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
50 | #include <linux/reboot.h> | ||
51 | #include <linux/bitops.h> | 49 | #include <linux/bitops.h> |
52 | #include <linux/vt_kern.h> | ||
53 | #include <linux/kbd_kern.h> | ||
54 | #include <linux/keyboard.h> | ||
55 | #include <linux/spinlock.h> | 50 | #include <linux/spinlock.h> |
56 | #include <linux/genhd.h> | 51 | #include <linux/genhd.h> |
57 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
@@ -63,10 +58,8 @@ | |||
63 | #include <linux/swapops.h> | 58 | #include <linux/swapops.h> |
64 | #include <linux/bootmem.h> | 59 | #include <linux/bootmem.h> |
65 | #include <linux/syscalls.h> | 60 | #include <linux/syscalls.h> |
66 | #include <linux/console.h> | ||
67 | #include <linux/highmem.h> | 61 | #include <linux/highmem.h> |
68 | #include <linux/bio.h> | 62 | #include <linux/bio.h> |
69 | #include <linux/mount.h> | ||
70 | 63 | ||
71 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
72 | #include <asm/mmu_context.h> | 65 | #include <asm/mmu_context.h> |
@@ -84,16 +77,10 @@ | |||
84 | #define MAXKEY 32 | 77 | #define MAXKEY 32 |
85 | #define MAXIV 32 | 78 | #define MAXIV 32 |
86 | 79 | ||
87 | /* References to section boundaries */ | ||
88 | extern const void __nosave_begin, __nosave_end; | ||
89 | |||
90 | /* Variables to be preserved over suspend */ | ||
91 | static int nr_copy_pages_check; | ||
92 | |||
93 | extern char resume_file[]; | 80 | extern char resume_file[]; |
94 | 81 | ||
95 | /* Local variables that should not be affected by save */ | 82 | /* Local variables that should not be affected by save */ |
96 | static unsigned int nr_copy_pages __nosavedata = 0; | 83 | unsigned int nr_copy_pages __nosavedata = 0; |
97 | 84 | ||
98 | /* Suspend pagedir is allocated before final copy, therefore it | 85 | /* Suspend pagedir is allocated before final copy, therefore it |
99 | must be freed after resume | 86 | must be freed after resume |
@@ -109,7 +96,7 @@ static unsigned int nr_copy_pages __nosavedata = 0; | |||
109 | MMU hardware. | 96 | MMU hardware. |
110 | */ | 97 | */ |
111 | suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; | 98 | suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; |
112 | static suspend_pagedir_t *pagedir_save; | 99 | suspend_pagedir_t *pagedir_save; |
113 | 100 | ||
114 | #define SWSUSP_SIG "S1SUSPEND" | 101 | #define SWSUSP_SIG "S1SUSPEND" |
115 | 102 | ||
@@ -124,12 +111,6 @@ static struct swsusp_header { | |||
124 | static struct swsusp_info swsusp_info; | 111 | static struct swsusp_info swsusp_info; |
125 | 112 | ||
126 | /* | 113 | /* |
127 | * XXX: We try to keep some more pages free so that I/O operations succeed | ||
128 | * without paging. Might this be more? | ||
129 | */ | ||
130 | #define PAGES_FOR_IO 512 | ||
131 | |||
132 | /* | ||
133 | * Saving part... | 114 | * Saving part... |
134 | */ | 115 | */ |
135 | 116 | ||
@@ -552,353 +533,6 @@ static int write_suspend_image(void) | |||
552 | goto Done; | 533 | goto Done; |
553 | } | 534 | } |
554 | 535 | ||
555 | |||
556 | #ifdef CONFIG_HIGHMEM | ||
557 | struct highmem_page { | ||
558 | char *data; | ||
559 | struct page *page; | ||
560 | struct highmem_page *next; | ||
561 | }; | ||
562 | |||
563 | static struct highmem_page *highmem_copy; | ||
564 | |||
565 | static int save_highmem_zone(struct zone *zone) | ||
566 | { | ||
567 | unsigned long zone_pfn; | ||
568 | mark_free_pages(zone); | ||
569 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { | ||
570 | struct page *page; | ||
571 | struct highmem_page *save; | ||
572 | void *kaddr; | ||
573 | unsigned long pfn = zone_pfn + zone->zone_start_pfn; | ||
574 | |||
575 | if (!(pfn%1000)) | ||
576 | printk("."); | ||
577 | if (!pfn_valid(pfn)) | ||
578 | continue; | ||
579 | page = pfn_to_page(pfn); | ||
580 | /* | ||
581 | * PageReserved results from rvmalloc() sans vmalloc_32() | ||
582 | * and architectural memory reservations. | ||
583 | * | ||
584 | * rvmalloc should not cause this, because all implementations | ||
585 | * appear to always be using vmalloc_32 on architectures with | ||
586 | * highmem. This is a good thing, because we would like to save | ||
587 | * rvmalloc pages. | ||
588 | * | ||
589 | * It appears to be triggered by pages which do not point to | ||
590 | * valid memory (see arch/i386/mm/init.c:one_highpage_init(), | ||
591 | * which sets PageReserved if the page does not point to valid | ||
592 | * RAM. | ||
593 | * | ||
594 | * XXX: must remove usage of PageReserved! | ||
595 | */ | ||
596 | if (PageReserved(page)) | ||
597 | continue; | ||
598 | BUG_ON(PageNosave(page)); | ||
599 | if (PageNosaveFree(page)) | ||
600 | continue; | ||
601 | save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); | ||
602 | if (!save) | ||
603 | return -ENOMEM; | ||
604 | save->next = highmem_copy; | ||
605 | save->page = page; | ||
606 | save->data = (void *) get_zeroed_page(GFP_ATOMIC); | ||
607 | if (!save->data) { | ||
608 | kfree(save); | ||
609 | return -ENOMEM; | ||
610 | } | ||
611 | kaddr = kmap_atomic(page, KM_USER0); | ||
612 | memcpy(save->data, kaddr, PAGE_SIZE); | ||
613 | kunmap_atomic(kaddr, KM_USER0); | ||
614 | highmem_copy = save; | ||
615 | } | ||
616 | return 0; | ||
617 | } | ||
618 | #endif /* CONFIG_HIGHMEM */ | ||
619 | |||
620 | |||
621 | static int save_highmem(void) | ||
622 | { | ||
623 | #ifdef CONFIG_HIGHMEM | ||
624 | struct zone *zone; | ||
625 | int res = 0; | ||
626 | |||
627 | pr_debug("swsusp: Saving Highmem\n"); | ||
628 | for_each_zone (zone) { | ||
629 | if (is_highmem(zone)) | ||
630 | res = save_highmem_zone(zone); | ||
631 | if (res) | ||
632 | return res; | ||
633 | } | ||
634 | #endif | ||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static int restore_highmem(void) | ||
639 | { | ||
640 | #ifdef CONFIG_HIGHMEM | ||
641 | printk("swsusp: Restoring Highmem\n"); | ||
642 | while (highmem_copy) { | ||
643 | struct highmem_page *save = highmem_copy; | ||
644 | void *kaddr; | ||
645 | highmem_copy = save->next; | ||
646 | |||
647 | kaddr = kmap_atomic(save->page, KM_USER0); | ||
648 | memcpy(kaddr, save->data, PAGE_SIZE); | ||
649 | kunmap_atomic(kaddr, KM_USER0); | ||
650 | free_page((long) save->data); | ||
651 | kfree(save); | ||
652 | } | ||
653 | #endif | ||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | |||
658 | static int pfn_is_nosave(unsigned long pfn) | ||
659 | { | ||
660 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
661 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; | ||
662 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * saveable - Determine whether a page should be cloned or not. | ||
667 | * @pfn: The page | ||
668 | * | ||
669 | * We save a page if it's Reserved, and not in the range of pages | ||
670 | * statically defined as 'unsaveable', or if it isn't reserved, and | ||
671 | * isn't part of a free chunk of pages. | ||
672 | */ | ||
673 | |||
674 | static int saveable(struct zone * zone, unsigned long * zone_pfn) | ||
675 | { | ||
676 | unsigned long pfn = *zone_pfn + zone->zone_start_pfn; | ||
677 | struct page * page; | ||
678 | |||
679 | if (!pfn_valid(pfn)) | ||
680 | return 0; | ||
681 | |||
682 | page = pfn_to_page(pfn); | ||
683 | if (PageNosave(page)) | ||
684 | return 0; | ||
685 | if (pfn_is_nosave(pfn)) { | ||
686 | pr_debug("[nosave pfn 0x%lx]", pfn); | ||
687 | return 0; | ||
688 | } | ||
689 | if (PageNosaveFree(page)) | ||
690 | return 0; | ||
691 | |||
692 | return 1; | ||
693 | } | ||
694 | |||
695 | static void count_data_pages(void) | ||
696 | { | ||
697 | struct zone *zone; | ||
698 | unsigned long zone_pfn; | ||
699 | |||
700 | nr_copy_pages = 0; | ||
701 | |||
702 | for_each_zone (zone) { | ||
703 | if (is_highmem(zone)) | ||
704 | continue; | ||
705 | mark_free_pages(zone); | ||
706 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) | ||
707 | nr_copy_pages += saveable(zone, &zone_pfn); | ||
708 | } | ||
709 | } | ||
710 | |||
711 | |||
712 | static void copy_data_pages(void) | ||
713 | { | ||
714 | struct zone *zone; | ||
715 | unsigned long zone_pfn; | ||
716 | struct pbe * pbe = pagedir_nosave; | ||
717 | |||
718 | pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages); | ||
719 | for_each_zone (zone) { | ||
720 | if (is_highmem(zone)) | ||
721 | continue; | ||
722 | mark_free_pages(zone); | ||
723 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { | ||
724 | if (saveable(zone, &zone_pfn)) { | ||
725 | struct page * page; | ||
726 | page = pfn_to_page(zone_pfn + zone->zone_start_pfn); | ||
727 | BUG_ON(!pbe); | ||
728 | pbe->orig_address = (long) page_address(page); | ||
729 | /* copy_page is not usable for copying task structs. */ | ||
730 | memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); | ||
731 | pbe = pbe->next; | ||
732 | } | ||
733 | } | ||
734 | } | ||
735 | BUG_ON(pbe); | ||
736 | } | ||
737 | |||
738 | |||
739 | /** | ||
740 | * calc_nr - Determine the number of pages needed for a pbe list. | ||
741 | */ | ||
742 | |||
743 | static int calc_nr(int nr_copy) | ||
744 | { | ||
745 | return nr_copy + (nr_copy+PBES_PER_PAGE-2)/(PBES_PER_PAGE-1); | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * free_pagedir - free pages allocated with alloc_pagedir() | ||
750 | */ | ||
751 | |||
752 | static inline void free_pagedir(struct pbe *pblist) | ||
753 | { | ||
754 | struct pbe *pbe; | ||
755 | |||
756 | while (pblist) { | ||
757 | pbe = (pblist + PB_PAGE_SKIP)->next; | ||
758 | free_page((unsigned long)pblist); | ||
759 | pblist = pbe; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | /** | ||
764 | * fill_pb_page - Create a list of PBEs on a given memory page | ||
765 | */ | ||
766 | |||
767 | static inline void fill_pb_page(struct pbe *pbpage) | ||
768 | { | ||
769 | struct pbe *p; | ||
770 | |||
771 | p = pbpage; | ||
772 | pbpage += PB_PAGE_SKIP; | ||
773 | do | ||
774 | p->next = p + 1; | ||
775 | while (++p < pbpage); | ||
776 | } | ||
777 | |||
778 | /** | ||
779 | * create_pbe_list - Create a list of PBEs on top of a given chain | ||
780 | * of memory pages allocated with alloc_pagedir() | ||
781 | */ | ||
782 | |||
783 | static void create_pbe_list(struct pbe *pblist, unsigned nr_pages) | ||
784 | { | ||
785 | struct pbe *pbpage, *p; | ||
786 | unsigned num = PBES_PER_PAGE; | ||
787 | |||
788 | for_each_pb_page (pbpage, pblist) { | ||
789 | if (num >= nr_pages) | ||
790 | break; | ||
791 | |||
792 | fill_pb_page(pbpage); | ||
793 | num += PBES_PER_PAGE; | ||
794 | } | ||
795 | if (pbpage) { | ||
796 | for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++) | ||
797 | p->next = p + 1; | ||
798 | p->next = NULL; | ||
799 | } | ||
800 | pr_debug("create_pbe_list(): initialized %d PBEs\n", num); | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * alloc_pagedir - Allocate the page directory. | ||
805 | * | ||
806 | * First, determine exactly how many pages we need and | ||
807 | * allocate them. | ||
808 | * | ||
809 | * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE | ||
810 | * struct pbe elements (pbes) and the last element in the page points | ||
811 | * to the next page. | ||
812 | * | ||
813 | * On each page we set up a list of struct_pbe elements. | ||
814 | */ | ||
815 | |||
816 | static struct pbe * alloc_pagedir(unsigned nr_pages) | ||
817 | { | ||
818 | unsigned num; | ||
819 | struct pbe *pblist, *pbe; | ||
820 | |||
821 | if (!nr_pages) | ||
822 | return NULL; | ||
823 | |||
824 | pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); | ||
825 | pblist = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); | ||
826 | for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; | ||
827 | pbe = pbe->next, num += PBES_PER_PAGE) { | ||
828 | pbe += PB_PAGE_SKIP; | ||
829 | pbe->next = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); | ||
830 | } | ||
831 | if (!pbe) { /* get_zeroed_page() failed */ | ||
832 | free_pagedir(pblist); | ||
833 | pblist = NULL; | ||
834 | } | ||
835 | return pblist; | ||
836 | } | ||
837 | |||
838 | /** | ||
839 | * free_image_pages - Free pages allocated for snapshot | ||
840 | */ | ||
841 | |||
842 | static void free_image_pages(void) | ||
843 | { | ||
844 | struct pbe * p; | ||
845 | |||
846 | for_each_pbe (p, pagedir_save) { | ||
847 | if (p->address) { | ||
848 | ClearPageNosave(virt_to_page(p->address)); | ||
849 | free_page(p->address); | ||
850 | p->address = 0; | ||
851 | } | ||
852 | } | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * alloc_image_pages - Allocate pages for the snapshot. | ||
857 | */ | ||
858 | |||
859 | static int alloc_image_pages(void) | ||
860 | { | ||
861 | struct pbe * p; | ||
862 | |||
863 | for_each_pbe (p, pagedir_save) { | ||
864 | p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); | ||
865 | if (!p->address) | ||
866 | return -ENOMEM; | ||
867 | SetPageNosave(virt_to_page(p->address)); | ||
868 | } | ||
869 | return 0; | ||
870 | } | ||
871 | |||
872 | /* Free pages we allocated for suspend. Suspend pages are alocated | ||
873 | * before atomic copy, so we need to free them after resume. | ||
874 | */ | ||
875 | void swsusp_free(void) | ||
876 | { | ||
877 | BUG_ON(PageNosave(virt_to_page(pagedir_save))); | ||
878 | BUG_ON(PageNosaveFree(virt_to_page(pagedir_save))); | ||
879 | free_image_pages(); | ||
880 | free_pagedir(pagedir_save); | ||
881 | } | ||
882 | |||
883 | |||
884 | /** | ||
885 | * enough_free_mem - Make sure we enough free memory to snapshot. | ||
886 | * | ||
887 | * Returns TRUE or FALSE after checking the number of available | ||
888 | * free pages. | ||
889 | */ | ||
890 | |||
891 | static int enough_free_mem(void) | ||
892 | { | ||
893 | if (nr_free_pages() < (nr_copy_pages + PAGES_FOR_IO)) { | ||
894 | pr_debug("swsusp: Not enough free pages: Have %d\n", | ||
895 | nr_free_pages()); | ||
896 | return 0; | ||
897 | } | ||
898 | return 1; | ||
899 | } | ||
900 | |||
901 | |||
902 | /** | 536 | /** |
903 | * enough_swap - Make sure we have enough swap to save the image. | 537 | * enough_swap - Make sure we have enough swap to save the image. |
904 | * | 538 | * |
@@ -909,87 +543,14 @@ static int enough_free_mem(void) | |||
909 | * We should only consider resume_device. | 543 | * We should only consider resume_device. |
910 | */ | 544 | */ |
911 | 545 | ||
912 | static int enough_swap(void) | 546 | int enough_swap(unsigned nr_pages) |
913 | { | 547 | { |
914 | struct sysinfo i; | 548 | struct sysinfo i; |
915 | 549 | ||
916 | si_swapinfo(&i); | 550 | si_swapinfo(&i); |
917 | if (i.freeswap < (nr_copy_pages + PAGES_FOR_IO)) { | 551 | pr_debug("swsusp: available swap: %lu pages\n", i.freeswap); |
918 | pr_debug("swsusp: Not enough swap. Need %ld\n",i.freeswap); | 552 | return i.freeswap > (nr_pages + PAGES_FOR_IO + |
919 | return 0; | 553 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); |
920 | } | ||
921 | return 1; | ||
922 | } | ||
923 | |||
924 | static int swsusp_alloc(void) | ||
925 | { | ||
926 | int error; | ||
927 | |||
928 | pagedir_nosave = NULL; | ||
929 | nr_copy_pages = calc_nr(nr_copy_pages); | ||
930 | nr_copy_pages_check = nr_copy_pages; | ||
931 | |||
932 | pr_debug("suspend: (pages needed: %d + %d free: %d)\n", | ||
933 | nr_copy_pages, PAGES_FOR_IO, nr_free_pages()); | ||
934 | |||
935 | if (!enough_free_mem()) | ||
936 | return -ENOMEM; | ||
937 | |||
938 | if (!enough_swap()) | ||
939 | return -ENOSPC; | ||
940 | |||
941 | if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE + | ||
942 | !!(nr_copy_pages % PBES_PER_PAGE)) | ||
943 | return -ENOSPC; | ||
944 | |||
945 | if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) { | ||
946 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); | ||
947 | return -ENOMEM; | ||
948 | } | ||
949 | create_pbe_list(pagedir_save, nr_copy_pages); | ||
950 | pagedir_nosave = pagedir_save; | ||
951 | if ((error = alloc_image_pages())) { | ||
952 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); | ||
953 | swsusp_free(); | ||
954 | return error; | ||
955 | } | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static int suspend_prepare_image(void) | ||
961 | { | ||
962 | int error; | ||
963 | |||
964 | pr_debug("swsusp: critical section: \n"); | ||
965 | if (save_highmem()) { | ||
966 | printk(KERN_CRIT "Suspend machine: Not enough free pages for highmem\n"); | ||
967 | restore_highmem(); | ||
968 | return -ENOMEM; | ||
969 | } | ||
970 | |||
971 | drain_local_pages(); | ||
972 | count_data_pages(); | ||
973 | printk("swsusp: Need to copy %u pages\n", nr_copy_pages); | ||
974 | |||
975 | error = swsusp_alloc(); | ||
976 | if (error) | ||
977 | return error; | ||
978 | |||
979 | /* During allocating of suspend pagedir, new cold pages may appear. | ||
980 | * Kill them. | ||
981 | */ | ||
982 | drain_local_pages(); | ||
983 | copy_data_pages(); | ||
984 | |||
985 | /* | ||
986 | * End of critical section. From now on, we can write to memory, | ||
987 | * but we should not touch disk. This specially means we must _not_ | ||
988 | * touch swap space! Except we must write out our image of course. | ||
989 | */ | ||
990 | |||
991 | printk("swsusp: critical section/: done (%d pages copied)\n", nr_copy_pages ); | ||
992 | return 0; | ||
993 | } | 554 | } |
994 | 555 | ||
995 | 556 | ||
@@ -1001,7 +562,7 @@ static int suspend_prepare_image(void) | |||
1001 | int swsusp_write(void) | 562 | int swsusp_write(void) |
1002 | { | 563 | { |
1003 | int error; | 564 | int error; |
1004 | device_resume(); | 565 | |
1005 | lock_swapdevices(); | 566 | lock_swapdevices(); |
1006 | error = write_suspend_image(); | 567 | error = write_suspend_image(); |
1007 | /* This will unlock ignored swap devices since writing is finished */ | 568 | /* This will unlock ignored swap devices since writing is finished */ |
@@ -1011,14 +572,6 @@ int swsusp_write(void) | |||
1011 | } | 572 | } |
1012 | 573 | ||
1013 | 574 | ||
1014 | extern asmlinkage int swsusp_arch_suspend(void); | ||
1015 | extern asmlinkage int swsusp_arch_resume(void); | ||
1016 | |||
1017 | |||
1018 | asmlinkage int swsusp_save(void) | ||
1019 | { | ||
1020 | return suspend_prepare_image(); | ||
1021 | } | ||
1022 | 575 | ||
1023 | int swsusp_suspend(void) | 576 | int swsusp_suspend(void) |
1024 | { | 577 | { |
@@ -1050,7 +603,6 @@ int swsusp_suspend(void) | |||
1050 | printk(KERN_ERR "Error %d suspending\n", error); | 603 | printk(KERN_ERR "Error %d suspending\n", error); |
1051 | /* Restore control flow magically appears here */ | 604 | /* Restore control flow magically appears here */ |
1052 | restore_processor_state(); | 605 | restore_processor_state(); |
1053 | BUG_ON (nr_copy_pages_check != nr_copy_pages); | ||
1054 | restore_highmem(); | 606 | restore_highmem(); |
1055 | device_power_up(); | 607 | device_power_up(); |
1056 | local_irq_enable(); | 608 | local_irq_enable(); |
@@ -1070,6 +622,11 @@ int swsusp_resume(void) | |||
1070 | * execution continues at place where swsusp_arch_suspend was called | 622 | * execution continues at place where swsusp_arch_suspend was called |
1071 | */ | 623 | */ |
1072 | BUG_ON(!error); | 624 | BUG_ON(!error); |
625 | /* The only reason why swsusp_arch_resume() can fail is memory being | ||
626 | * very tight, so we have to free it as soon as we can to avoid | ||
627 | * subsequent failures | ||
628 | */ | ||
629 | swsusp_free(); | ||
1073 | restore_processor_state(); | 630 | restore_processor_state(); |
1074 | restore_highmem(); | 631 | restore_highmem(); |
1075 | touch_softlockup_watchdog(); | 632 | touch_softlockup_watchdog(); |
@@ -1085,54 +642,28 @@ int swsusp_resume(void) | |||
1085 | * | 642 | * |
1086 | * We don't know which pages are usable until we allocate them. | 643 | * We don't know which pages are usable until we allocate them. |
1087 | * | 644 | * |
1088 | * Allocated but unusable (ie eaten) memory pages are linked together | 645 | * Allocated but unusable (ie eaten) memory pages are marked so that |
1089 | * to create a list, so that we can free them easily | 646 | * swsusp_free() can release them |
1090 | * | ||
1091 | * We could have used a type other than (void *) | ||
1092 | * for this purpose, but ... | ||
1093 | */ | 647 | */ |
1094 | static void **eaten_memory = NULL; | ||
1095 | |||
1096 | static inline void eat_page(void *page) | ||
1097 | { | ||
1098 | void **c; | ||
1099 | |||
1100 | c = eaten_memory; | ||
1101 | eaten_memory = page; | ||
1102 | *eaten_memory = c; | ||
1103 | } | ||
1104 | 648 | ||
1105 | unsigned long get_usable_page(gfp_t gfp_mask) | 649 | unsigned long get_safe_page(gfp_t gfp_mask) |
1106 | { | 650 | { |
1107 | unsigned long m; | 651 | unsigned long m; |
1108 | 652 | ||
1109 | m = get_zeroed_page(gfp_mask); | 653 | do { |
1110 | while (!PageNosaveFree(virt_to_page(m))) { | ||
1111 | eat_page((void *)m); | ||
1112 | m = get_zeroed_page(gfp_mask); | 654 | m = get_zeroed_page(gfp_mask); |
1113 | if (!m) | 655 | if (m && PageNosaveFree(virt_to_page(m))) |
1114 | break; | 656 | /* This is for swsusp_free() */ |
657 | SetPageNosave(virt_to_page(m)); | ||
658 | } while (m && PageNosaveFree(virt_to_page(m))); | ||
659 | if (m) { | ||
660 | /* This is for swsusp_free() */ | ||
661 | SetPageNosave(virt_to_page(m)); | ||
662 | SetPageNosaveFree(virt_to_page(m)); | ||
1115 | } | 663 | } |
1116 | return m; | 664 | return m; |
1117 | } | 665 | } |
1118 | 666 | ||
1119 | void free_eaten_memory(void) | ||
1120 | { | ||
1121 | unsigned long m; | ||
1122 | void **c; | ||
1123 | int i = 0; | ||
1124 | |||
1125 | c = eaten_memory; | ||
1126 | while (c) { | ||
1127 | m = (unsigned long)c; | ||
1128 | c = *c; | ||
1129 | free_page(m); | ||
1130 | i++; | ||
1131 | } | ||
1132 | eaten_memory = NULL; | ||
1133 | pr_debug("swsusp: %d unused pages freed\n", i); | ||
1134 | } | ||
1135 | |||
1136 | /** | 667 | /** |
1137 | * check_pagedir - We ensure here that pages that the PBEs point to | 668 | * check_pagedir - We ensure here that pages that the PBEs point to |
1138 | * won't collide with pages where we're going to restore from the loaded | 669 | * won't collide with pages where we're going to restore from the loaded |
@@ -1150,7 +681,7 @@ static int check_pagedir(struct pbe *pblist) | |||
1150 | p->address = 0UL; | 681 | p->address = 0UL; |
1151 | 682 | ||
1152 | for_each_pbe (p, pblist) { | 683 | for_each_pbe (p, pblist) { |
1153 | p->address = get_usable_page(GFP_ATOMIC); | 684 | p->address = get_safe_page(GFP_ATOMIC); |
1154 | if (!p->address) | 685 | if (!p->address) |
1155 | return -ENOMEM; | 686 | return -ENOMEM; |
1156 | } | 687 | } |
@@ -1169,7 +700,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) | |||
1169 | unsigned long zone_pfn; | 700 | unsigned long zone_pfn; |
1170 | struct pbe *pbpage, *tail, *p; | 701 | struct pbe *pbpage, *tail, *p; |
1171 | void *m; | 702 | void *m; |
1172 | int rel = 0, error = 0; | 703 | int rel = 0; |
1173 | 704 | ||
1174 | if (!pblist) /* a sanity check */ | 705 | if (!pblist) /* a sanity check */ |
1175 | return NULL; | 706 | return NULL; |
@@ -1177,41 +708,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) | |||
1177 | pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", | 708 | pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", |
1178 | swsusp_info.pagedir_pages); | 709 | swsusp_info.pagedir_pages); |
1179 | 710 | ||
1180 | /* Set page flags */ | 711 | /* Clear page flags */ |
1181 | 712 | ||
1182 | for_each_zone (zone) { | 713 | for_each_zone (zone) { |
1183 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) | 714 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) |
1184 | SetPageNosaveFree(pfn_to_page(zone_pfn + | 715 | if (pfn_valid(zone_pfn + zone->zone_start_pfn)) |
716 | ClearPageNosaveFree(pfn_to_page(zone_pfn + | ||
1185 | zone->zone_start_pfn)); | 717 | zone->zone_start_pfn)); |
1186 | } | 718 | } |
1187 | 719 | ||
1188 | /* Clear orig addresses */ | 720 | /* Mark orig addresses */ |
1189 | 721 | ||
1190 | for_each_pbe (p, pblist) | 722 | for_each_pbe (p, pblist) |
1191 | ClearPageNosaveFree(virt_to_page(p->orig_address)); | 723 | SetPageNosaveFree(virt_to_page(p->orig_address)); |
1192 | 724 | ||
1193 | tail = pblist + PB_PAGE_SKIP; | 725 | tail = pblist + PB_PAGE_SKIP; |
1194 | 726 | ||
1195 | /* Relocate colliding pages */ | 727 | /* Relocate colliding pages */ |
1196 | 728 | ||
1197 | for_each_pb_page (pbpage, pblist) { | 729 | for_each_pb_page (pbpage, pblist) { |
1198 | if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { | 730 | if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) { |
1199 | m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); | 731 | m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD); |
1200 | if (!m) { | 732 | if (!m) |
1201 | error = -ENOMEM; | 733 | return NULL; |
1202 | break; | ||
1203 | } | ||
1204 | memcpy(m, (void *)pbpage, PAGE_SIZE); | 734 | memcpy(m, (void *)pbpage, PAGE_SIZE); |
1205 | if (pbpage == pblist) | 735 | if (pbpage == pblist) |
1206 | pblist = (struct pbe *)m; | 736 | pblist = (struct pbe *)m; |
1207 | else | 737 | else |
1208 | tail->next = (struct pbe *)m; | 738 | tail->next = (struct pbe *)m; |
1209 | |||
1210 | eat_page((void *)pbpage); | ||
1211 | pbpage = (struct pbe *)m; | 739 | pbpage = (struct pbe *)m; |
1212 | 740 | ||
1213 | /* We have to link the PBEs again */ | 741 | /* We have to link the PBEs again */ |
1214 | |||
1215 | for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) | 742 | for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) |
1216 | if (p->next) /* needed to save the end */ | 743 | if (p->next) /* needed to save the end */ |
1217 | p->next = p + 1; | 744 | p->next = p + 1; |
@@ -1221,15 +748,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) | |||
1221 | tail = pbpage + PB_PAGE_SKIP; | 748 | tail = pbpage + PB_PAGE_SKIP; |
1222 | } | 749 | } |
1223 | 750 | ||
1224 | if (error) { | 751 | /* This is for swsusp_free() */ |
1225 | printk("\nswsusp: Out of memory\n\n"); | 752 | for_each_pb_page (pbpage, pblist) { |
1226 | free_pagedir(pblist); | 753 | SetPageNosave(virt_to_page(pbpage)); |
1227 | free_eaten_memory(); | 754 | SetPageNosaveFree(virt_to_page(pbpage)); |
1228 | pblist = NULL; | 755 | } |
1229 | /* Is this even worth handling? It should never ever happen, and we | 756 | |
1230 | have just lost user's state, anyway... */ | 757 | printk("swsusp: Relocated %d pages\n", rel); |
1231 | } else | ||
1232 | printk("swsusp: Relocated %d pages\n", rel); | ||
1233 | 758 | ||
1234 | return pblist; | 759 | return pblist; |
1235 | } | 760 | } |
@@ -1447,9 +972,7 @@ static int read_pagedir(struct pbe *pblist) | |||
1447 | break; | 972 | break; |
1448 | } | 973 | } |
1449 | 974 | ||
1450 | if (error) | 975 | if (!error) |
1451 | free_pagedir(pblist); | ||
1452 | else | ||
1453 | BUG_ON(i != swsusp_info.pagedir_pages); | 976 | BUG_ON(i != swsusp_info.pagedir_pages); |
1454 | 977 | ||
1455 | return error; | 978 | return error; |
@@ -1492,15 +1015,6 @@ static int read_suspend_image(void) | |||
1492 | if (!error) | 1015 | if (!error) |
1493 | error = data_read(pagedir_nosave); | 1016 | error = data_read(pagedir_nosave); |
1494 | 1017 | ||
1495 | if (error) { /* We fail cleanly */ | ||
1496 | free_eaten_memory(); | ||
1497 | for_each_pbe (p, pagedir_nosave) | ||
1498 | if (p->address) { | ||
1499 | free_page(p->address); | ||
1500 | p->address = 0UL; | ||
1501 | } | ||
1502 | free_pagedir(pagedir_nosave); | ||
1503 | } | ||
1504 | return error; | 1018 | return error; |
1505 | } | 1019 | } |
1506 | 1020 | ||