aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2005-10-30 17:59:56 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 20:37:14 -0500
commit25761b6eb7b33823bcfff6bfe2a015badcd76fb8 (patch)
treea25841a3f4c4cf087ce75c0907c00966f19d339a /kernel/power/swsusp.c
parent351619baf9878731b4272fa10dda0f84f5582241 (diff)
[PATCH] swsusp: move snapshot functionality to separate file
The following patch moves the functionality of swsusp related to creating and handling the snapshot of memory to a separate file, snapshot.c This should enable us to untangle the code in the future and eventually to implement some parts of swsusp.c in the user space. The patch does not change the code. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c440
1 files changed, 4 insertions, 436 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index ae46506e213..fc50b5d2dd2 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -5,7 +5,7 @@
5 * machine suspend feature using pretty near only high-level routines 5 * machine suspend feature using pretty near only high-level routines
6 * 6 *
7 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> 7 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
8 * Copyright (C) 1998,2001-2004 Pavel Machek <pavel@suse.cz> 8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
9 * 9 *
10 * This file is released under the GPLv2. 10 * This file is released under the GPLv2.
11 * 11 *
@@ -84,16 +84,10 @@
84#define MAXKEY 32 84#define MAXKEY 32
85#define MAXIV 32 85#define MAXIV 32
86 86
87/* References to section boundaries */
88extern const void __nosave_begin, __nosave_end;
89
90/* Variables to be preserved over suspend */
91static int nr_copy_pages_check;
92
93extern char resume_file[]; 87extern char resume_file[];
94 88
95/* Local variables that should not be affected by save */ 89/* Local variables that should not be affected by save */
96static unsigned int nr_copy_pages __nosavedata = 0; 90unsigned int nr_copy_pages __nosavedata = 0;
97 91
98/* Suspend pagedir is allocated before final copy, therefore it 92/* Suspend pagedir is allocated before final copy, therefore it
99 must be freed after resume 93 must be freed after resume
@@ -109,7 +103,7 @@ static unsigned int nr_copy_pages __nosavedata = 0;
109 MMU hardware. 103 MMU hardware.
110 */ 104 */
111suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; 105suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
112static suspend_pagedir_t *pagedir_save; 106suspend_pagedir_t *pagedir_save;
113 107
114#define SWSUSP_SIG "S1SUSPEND" 108#define SWSUSP_SIG "S1SUSPEND"
115 109
@@ -124,12 +118,6 @@ static struct swsusp_header {
124static struct swsusp_info swsusp_info; 118static struct swsusp_info swsusp_info;
125 119
126/* 120/*
127 * XXX: We try to keep some more pages free so that I/O operations succeed
128 * without paging. Might this be more?
129 */
130#define PAGES_FOR_IO 512
131
132/*
133 * Saving part... 121 * Saving part...
134 */ 122 */
135 123
@@ -552,335 +540,6 @@ static int write_suspend_image(void)
552 goto Done; 540 goto Done;
553} 541}
554 542
555
556#ifdef CONFIG_HIGHMEM
557struct highmem_page {
558 char *data;
559 struct page *page;
560 struct highmem_page *next;
561};
562
563static struct highmem_page *highmem_copy;
564
565static int save_highmem_zone(struct zone *zone)
566{
567 unsigned long zone_pfn;
568 mark_free_pages(zone);
569 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
570 struct page *page;
571 struct highmem_page *save;
572 void *kaddr;
573 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
574
575 if (!(pfn%1000))
576 printk(".");
577 if (!pfn_valid(pfn))
578 continue;
579 page = pfn_to_page(pfn);
580 /*
581 * PageReserved results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations.
583 *
584 * rvmalloc should not cause this, because all implementations
585 * appear to always be using vmalloc_32 on architectures with
586 * highmem. This is a good thing, because we would like to save
587 * rvmalloc pages.
588 *
589 * It appears to be triggered by pages which do not point to
590 * valid memory (see arch/i386/mm/init.c:one_highpage_init(),
591 * which sets PageReserved if the page does not point to valid
592 * RAM.
593 *
594 * XXX: must remove usage of PageReserved!
595 */
596 if (PageReserved(page))
597 continue;
598 BUG_ON(PageNosave(page));
599 if (PageNosaveFree(page))
600 continue;
601 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
602 if (!save)
603 return -ENOMEM;
604 save->next = highmem_copy;
605 save->page = page;
606 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
607 if (!save->data) {
608 kfree(save);
609 return -ENOMEM;
610 }
611 kaddr = kmap_atomic(page, KM_USER0);
612 memcpy(save->data, kaddr, PAGE_SIZE);
613 kunmap_atomic(kaddr, KM_USER0);
614 highmem_copy = save;
615 }
616 return 0;
617}
618#endif /* CONFIG_HIGHMEM */
619
620
621static int save_highmem(void)
622{
623#ifdef CONFIG_HIGHMEM
624 struct zone *zone;
625 int res = 0;
626
627 pr_debug("swsusp: Saving Highmem\n");
628 for_each_zone (zone) {
629 if (is_highmem(zone))
630 res = save_highmem_zone(zone);
631 if (res)
632 return res;
633 }
634#endif
635 return 0;
636}
637
638static int restore_highmem(void)
639{
640#ifdef CONFIG_HIGHMEM
641 printk("swsusp: Restoring Highmem\n");
642 while (highmem_copy) {
643 struct highmem_page *save = highmem_copy;
644 void *kaddr;
645 highmem_copy = save->next;
646
647 kaddr = kmap_atomic(save->page, KM_USER0);
648 memcpy(kaddr, save->data, PAGE_SIZE);
649 kunmap_atomic(kaddr, KM_USER0);
650 free_page((long) save->data);
651 kfree(save);
652 }
653#endif
654 return 0;
655}
656
657
658static int pfn_is_nosave(unsigned long pfn)
659{
660 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
661 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
662 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
663}
664
665/**
666 * saveable - Determine whether a page should be cloned or not.
667 * @pfn: The page
668 *
669 * We save a page if it's Reserved, and not in the range of pages
670 * statically defined as 'unsaveable', or if it isn't reserved, and
671 * isn't part of a free chunk of pages.
672 */
673
674static int saveable(struct zone * zone, unsigned long * zone_pfn)
675{
676 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
677 struct page * page;
678
679 if (!pfn_valid(pfn))
680 return 0;
681
682 page = pfn_to_page(pfn);
683 if (PageNosave(page))
684 return 0;
685 if (pfn_is_nosave(pfn)) {
686 pr_debug("[nosave pfn 0x%lx]", pfn);
687 return 0;
688 }
689 if (PageNosaveFree(page))
690 return 0;
691
692 return 1;
693}
694
695static void count_data_pages(void)
696{
697 struct zone *zone;
698 unsigned long zone_pfn;
699
700 nr_copy_pages = 0;
701
702 for_each_zone (zone) {
703 if (is_highmem(zone))
704 continue;
705 mark_free_pages(zone);
706 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
707 nr_copy_pages += saveable(zone, &zone_pfn);
708 }
709}
710
711static void copy_data_pages(void)
712{
713 struct zone *zone;
714 unsigned long zone_pfn;
715 struct pbe *pbe = pagedir_nosave, *p;
716
717 pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages);
718 for_each_zone (zone) {
719 if (is_highmem(zone))
720 continue;
721 mark_free_pages(zone);
722 /* This is necessary for swsusp_free() */
723 for_each_pb_page (p, pagedir_nosave)
724 SetPageNosaveFree(virt_to_page(p));
725 for_each_pbe(p, pagedir_nosave)
726 SetPageNosaveFree(virt_to_page(p->address));
727 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
728 if (saveable(zone, &zone_pfn)) {
729 struct page * page;
730 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
731 BUG_ON(!pbe);
732 pbe->orig_address = (unsigned long)page_address(page);
733 /* copy_page is not usable for copying task structs. */
734 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
735 pbe = pbe->next;
736 }
737 }
738 }
739 BUG_ON(pbe);
740}
741
742
743/**
744 * free_pagedir - free pages allocated with alloc_pagedir()
745 */
746
747static inline void free_pagedir(struct pbe *pblist)
748{
749 struct pbe *pbe;
750
751 while (pblist) {
752 pbe = (pblist + PB_PAGE_SKIP)->next;
753 ClearPageNosave(virt_to_page(pblist));
754 ClearPageNosaveFree(virt_to_page(pblist));
755 free_page((unsigned long)pblist);
756 pblist = pbe;
757 }
758}
759
760/**
761 * fill_pb_page - Create a list of PBEs on a given memory page
762 */
763
764static inline void fill_pb_page(struct pbe *pbpage)
765{
766 struct pbe *p;
767
768 p = pbpage;
769 pbpage += PB_PAGE_SKIP;
770 do
771 p->next = p + 1;
772 while (++p < pbpage);
773}
774
775/**
776 * create_pbe_list - Create a list of PBEs on top of a given chain
777 * of memory pages allocated with alloc_pagedir()
778 */
779
780static void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
781{
782 struct pbe *pbpage, *p;
783 unsigned num = PBES_PER_PAGE;
784
785 for_each_pb_page (pbpage, pblist) {
786 if (num >= nr_pages)
787 break;
788
789 fill_pb_page(pbpage);
790 num += PBES_PER_PAGE;
791 }
792 if (pbpage) {
793 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
794 p->next = p + 1;
795 p->next = NULL;
796 }
797 pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
798}
799
800static void *alloc_image_page(void)
801{
802 void *res = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
803 if (res) {
804 SetPageNosave(virt_to_page(res));
805 SetPageNosaveFree(virt_to_page(res));
806 }
807 return res;
808}
809
810/**
811 * alloc_pagedir - Allocate the page directory.
812 *
813 * First, determine exactly how many pages we need and
814 * allocate them.
815 *
816 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
817 * struct pbe elements (pbes) and the last element in the page points
818 * to the next page.
819 *
820 * On each page we set up a list of struct_pbe elements.
821 */
822
823static struct pbe * alloc_pagedir(unsigned nr_pages)
824{
825 unsigned num;
826 struct pbe *pblist, *pbe;
827
828 if (!nr_pages)
829 return NULL;
830
831 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
832 pblist = (struct pbe *)alloc_image_page();
833 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
834 pbe = pbe->next, num += PBES_PER_PAGE) {
835 pbe += PB_PAGE_SKIP;
836 pbe->next = (struct pbe *)alloc_image_page();
837 }
838 if (!pbe) { /* get_zeroed_page() failed */
839 free_pagedir(pblist);
840 pblist = NULL;
841 }
842 return pblist;
843}
844
845/**
846 * Free pages we allocated for suspend. Suspend pages are alocated
847 * before atomic copy, so we need to free them after resume.
848 */
849
850void swsusp_free(void)
851{
852 struct zone *zone;
853 unsigned long zone_pfn;
854
855 for_each_zone(zone) {
856 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
857 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
858 struct page * page;
859 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
860 if (PageNosave(page) && PageNosaveFree(page)) {
861 ClearPageNosave(page);
862 ClearPageNosaveFree(page);
863 free_page((long) page_address(page));
864 }
865 }
866 }
867}
868
869/**
870 * enough_free_mem - Make sure we enough free memory to snapshot.
871 *
872 * Returns TRUE or FALSE after checking the number of available
873 * free pages.
874 */
875
876static int enough_free_mem(void)
877{
878 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
879 return nr_free_pages() > (nr_copy_pages + PAGES_FOR_IO +
880 nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE));
881}
882
883
884/** 543/**
885 * enough_swap - Make sure we have enough swap to save the image. 544 * enough_swap - Make sure we have enough swap to save the image.
886 * 545 *
@@ -891,7 +550,7 @@ static int enough_free_mem(void)
891 * We should only consider resume_device. 550 * We should only consider resume_device.
892 */ 551 */
893 552
894static int enough_swap(void) 553int enough_swap(void)
895{ 554{
896 struct sysinfo i; 555 struct sysinfo i;
897 556
@@ -901,88 +560,6 @@ static int enough_swap(void)
901 nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE)); 560 nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE));
902} 561}
903 562
904static int swsusp_alloc(void)
905{
906 struct pbe * p;
907
908 pagedir_nosave = NULL;
909
910 if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
911 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
912 return -ENOMEM;
913 }
914 create_pbe_list(pagedir_save, nr_copy_pages);
915 pagedir_nosave = pagedir_save;
916
917 for_each_pbe (p, pagedir_save) {
918 p->address = (unsigned long)alloc_image_page();
919 if (!p->address) {
920 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
921 swsusp_free();
922 return -ENOMEM;
923 }
924 }
925
926 return 0;
927}
928
929static int suspend_prepare_image(void)
930{
931 int error;
932
933 pr_debug("swsusp: critical section: \n");
934 if (save_highmem()) {
935 printk(KERN_CRIT "swsusp: Not enough free pages for highmem\n");
936 restore_highmem();
937 return -ENOMEM;
938 }
939
940 drain_local_pages();
941 count_data_pages();
942 printk("swsusp: Need to copy %u pages\n", nr_copy_pages);
943 nr_copy_pages_check = nr_copy_pages;
944
945 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
946 nr_copy_pages,
947 nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE),
948 PAGES_FOR_IO, nr_free_pages());
949
950 if (!enough_free_mem()) {
951 printk(KERN_ERR "swsusp: Not enough free memory\n");
952 return -ENOMEM;
953 }
954
955 if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
956 !!(nr_copy_pages % PBES_PER_PAGE)) {
957 printk(KERN_ERR "swsusp: Too many image pages\n");
958 return -ENOSPC;
959 }
960
961 if (!enough_swap()) {
962 printk(KERN_ERR "swsusp: Not enough free swap\n");
963 return -ENOSPC;
964 }
965
966 error = swsusp_alloc();
967 if (error)
968 return error;
969
970 /* During allocating of suspend pagedir, new cold pages may appear.
971 * Kill them.
972 */
973 drain_local_pages();
974 copy_data_pages();
975
976 /*
977 * End of critical section. From now on, we can write to memory,
978 * but we should not touch disk. This specially means we must _not_
979 * touch swap space! Except we must write out our image of course.
980 */
981
982 printk("swsusp: critical section/: done (%d pages copied)\n", nr_copy_pages );
983 return 0;
984}
985
986 563
987/* It is important _NOT_ to umount filesystems at this point. We want 564/* It is important _NOT_ to umount filesystems at this point. We want
988 * them synced (in case something goes wrong) but we DO not want to mark 565 * them synced (in case something goes wrong) but we DO not want to mark
@@ -1002,14 +579,6 @@ int swsusp_write(void)
1002} 579}
1003 580
1004 581
1005extern asmlinkage int swsusp_arch_suspend(void);
1006extern asmlinkage int swsusp_arch_resume(void);
1007
1008
1009asmlinkage int swsusp_save(void)
1010{
1011 return suspend_prepare_image();
1012}
1013 582
1014int swsusp_suspend(void) 583int swsusp_suspend(void)
1015{ 584{
@@ -1041,7 +610,6 @@ int swsusp_suspend(void)
1041 printk(KERN_ERR "Error %d suspending\n", error); 610 printk(KERN_ERR "Error %d suspending\n", error);
1042 /* Restore control flow magically appears here */ 611 /* Restore control flow magically appears here */
1043 restore_processor_state(); 612 restore_processor_state();
1044 BUG_ON (nr_copy_pages_check != nr_copy_pages);
1045 restore_highmem(); 613 restore_highmem();
1046 device_power_up(); 614 device_power_up();
1047 local_irq_enable(); 615 local_irq_enable();