diff options
author | Hugh Dickins <hughd@google.com> | 2010-09-09 19:38:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-09-09 21:57:25 -0400 |
commit | 910321ea817a202ff70fac666e37e2c8e2f88823 (patch) | |
tree | aaead29e7797986e2b804746b565bb5d05117c54 /mm | |
parent | ac8456d6f9a3011c824176bd6084d39e5f70a382 (diff) |
swap: revert special hibernation allocation
Please revert 2.6.36-rc commit d2997b1042ec150616c1963b5e5e919ffd0b0ebf
"hibernation: freeze swap at hibernation". It complicated matters by
adding a second swap allocation path, just for hibernation; without in any
way fixing the issue that it was intended to address - page reclaim after
fixing the hibernation image might free swap from a page already imaged as
swapcache, letting its swap be reallocated to store a different page of
the image: resulting in data corruption if the imaged page were freed as
clean then swapped back in. Pages freed to si->swap_map were still in
danger of being reallocated by the alternative allocation path.
I guess it inadvertently fixed slow SSD swap allocation for hibernation,
as reported by Nigel Cunningham: by missing out the discards that occur on
the usual swap allocation path; but that was unintentional, and needs a
separate fix.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Ondrej Zary <linux@rainbow-software.org>
Cc: Andrea Gelmini <andrea.gelmini@gmail.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nigel Cunningham <nigel@tuxonice.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/swapfile.c | 94 |
1 files changed, 22 insertions, 72 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..f08d165871b3 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -47,8 +47,6 @@ long nr_swap_pages; | |||
47 | long total_swap_pages; | 47 | long total_swap_pages; |
48 | static int least_priority; | 48 | static int least_priority; |
49 | 49 | ||
50 | static bool swap_for_hibernation; | ||
51 | |||
52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
@@ -453,8 +451,6 @@ swp_entry_t get_swap_page(void) | |||
453 | spin_lock(&swap_lock); | 451 | spin_lock(&swap_lock); |
454 | if (nr_swap_pages <= 0) | 452 | if (nr_swap_pages <= 0) |
455 | goto noswap; | 453 | goto noswap; |
456 | if (swap_for_hibernation) | ||
457 | goto noswap; | ||
458 | nr_swap_pages--; | 454 | nr_swap_pages--; |
459 | 455 | ||
460 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 456 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
@@ -487,6 +483,28 @@ noswap: | |||
487 | return (swp_entry_t) {0}; | 483 | return (swp_entry_t) {0}; |
488 | } | 484 | } |
489 | 485 | ||
486 | /* The only caller of this function is now susupend routine */ | ||
487 | swp_entry_t get_swap_page_of_type(int type) | ||
488 | { | ||
489 | struct swap_info_struct *si; | ||
490 | pgoff_t offset; | ||
491 | |||
492 | spin_lock(&swap_lock); | ||
493 | si = swap_info[type]; | ||
494 | if (si && (si->flags & SWP_WRITEOK)) { | ||
495 | nr_swap_pages--; | ||
496 | /* This is called for allocating swap entry, not cache */ | ||
497 | offset = scan_swap_map(si, 1); | ||
498 | if (offset) { | ||
499 | spin_unlock(&swap_lock); | ||
500 | return swp_entry(type, offset); | ||
501 | } | ||
502 | nr_swap_pages++; | ||
503 | } | ||
504 | spin_unlock(&swap_lock); | ||
505 | return (swp_entry_t) {0}; | ||
506 | } | ||
507 | |||
490 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 508 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
491 | { | 509 | { |
492 | struct swap_info_struct *p; | 510 | struct swap_info_struct *p; |
@@ -746,74 +764,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
746 | #endif | 764 | #endif |
747 | 765 | ||
748 | #ifdef CONFIG_HIBERNATION | 766 | #ifdef CONFIG_HIBERNATION |
749 | |||
750 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
751 | /* | ||
752 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
753 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
754 | * changing without synchronization with hibernation snap shot. | ||
755 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
756 | * used maps easily. | ||
757 | */ | ||
758 | void hibernation_freeze_swap(void) | ||
759 | { | ||
760 | int i; | ||
761 | |||
762 | spin_lock(&swap_lock); | ||
763 | |||
764 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
765 | swap_for_hibernation = true; | ||
766 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
767 | hibernation_offset[i] = 1; | ||
768 | spin_unlock(&swap_lock); | ||
769 | } | ||
770 | |||
771 | void hibernation_thaw_swap(void) | ||
772 | { | ||
773 | spin_lock(&swap_lock); | ||
774 | if (swap_for_hibernation) { | ||
775 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
776 | swap_for_hibernation = false; | ||
777 | } | ||
778 | spin_unlock(&swap_lock); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Because updateing swap_map[] can make not-saved-status-change, | ||
783 | * we use our own easy allocator. | ||
784 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
785 | * RB-tree. | ||
786 | */ | ||
787 | swp_entry_t get_swap_for_hibernation(int type) | ||
788 | { | ||
789 | pgoff_t off; | ||
790 | swp_entry_t val = {0}; | ||
791 | struct swap_info_struct *si; | ||
792 | |||
793 | spin_lock(&swap_lock); | ||
794 | |||
795 | si = swap_info[type]; | ||
796 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
797 | goto done; | ||
798 | |||
799 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
800 | if (!si->swap_map[off]) | ||
801 | break; | ||
802 | } | ||
803 | if (off < si->max) { | ||
804 | val = swp_entry(type, off); | ||
805 | hibernation_offset[type] = off + 1; | ||
806 | } | ||
807 | done: | ||
808 | spin_unlock(&swap_lock); | ||
809 | return val; | ||
810 | } | ||
811 | |||
812 | void swap_free_for_hibernation(swp_entry_t ent) | ||
813 | { | ||
814 | /* Nothing to do */ | ||
815 | } | ||
816 | |||
817 | /* | 767 | /* |
818 | * Find the swap type that corresponds to given device (if any). | 768 | * Find the swap type that corresponds to given device (if any). |
819 | * | 769 | * |