aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2010-09-09 19:38:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-09-09 21:57:25 -0400
commit910321ea817a202ff70fac666e37e2c8e2f88823 (patch)
treeaaead29e7797986e2b804746b565bb5d05117c54
parentac8456d6f9a3011c824176bd6084d39e5f70a382 (diff)
swap: revert special hibernation allocation
Please revert 2.6.36-rc commit d2997b1042ec150616c1963b5e5e919ffd0b0ebf "hibernation: freeze swap at hibernation". It complicated matters by adding a second swap allocation path, just for hibernation; without in any way fixing the issue that it was intended to address - page reclaim after fixing the hibernation image might free swap from a page already imaged as swapcache, letting its swap be reallocated to store a different page of the image: resulting in data corruption if the imaged page were freed as clean then swapped back in. Pages freed to si->swap_map were still in danger of being reallocated by the alternative allocation path. I guess it inadvertently fixed slow SSD swap allocation for hibernation, as reported by Nigel Cunningham: by missing out the discards that occur on the usual swap allocation path; but that was unintentional, and needs a separate fix. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Ondrej Zary <linux@rainbow-software.org> Cc: Andrea Gelmini <andrea.gelmini@gmail.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Nigel Cunningham <nigel@tuxonice.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h8
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/snapshot.c1
-rw-r--r--kernel/power/swap.c6
-rw-r--r--mm/swapfile.c94
5 files changed, 26 insertions, 84 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a11b7..bf4eb62506d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -315,6 +315,7 @@ extern long nr_swap_pages;
315extern long total_swap_pages; 315extern long total_swap_pages;
316extern void si_swapinfo(struct sysinfo *); 316extern void si_swapinfo(struct sysinfo *);
317extern swp_entry_t get_swap_page(void); 317extern swp_entry_t get_swap_page(void);
318extern swp_entry_t get_swap_page_of_type(int);
318extern int valid_swaphandles(swp_entry_t, unsigned long *); 319extern int valid_swaphandles(swp_entry_t, unsigned long *);
319extern int add_swap_count_continuation(swp_entry_t, gfp_t); 320extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t); 321extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +332,6 @@ extern int reuse_swap_page(struct page *);
331extern int try_to_free_swap(struct page *); 332extern int try_to_free_swap(struct page *);
332struct backing_dev_info; 333struct backing_dev_info;
333 334
334#ifdef CONFIG_HIBERNATION
335void hibernation_freeze_swap(void);
336void hibernation_thaw_swap(void);
337swp_entry_t get_swap_for_hibernation(int type);
338void swap_free_for_hibernation(swp_entry_t val);
339#endif
340
341/* linux/mm/thrash.c */ 335/* linux/mm/thrash.c */
342extern struct mm_struct *swap_token_mm; 336extern struct mm_struct *swap_token_mm;
343extern void grab_swap_token(struct mm_struct *); 337extern void grab_swap_token(struct mm_struct *);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c77963938bc..8dc31e02ae1 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
338 goto Close; 338 goto Close;
339 339
340 suspend_console(); 340 suspend_console();
341 hibernation_freeze_swap();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb05e6..f6cd6faf84f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap();
1090} 1089}
1091 1090
1092/* Helper functions used for the shrinking of memory. */ 1091/* Helper functions used for the shrinking of memory. */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059eed3e..e6a5bdf61a3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
136{ 136{
137 unsigned long offset; 137 unsigned long offset;
138 138
139 offset = swp_offset(get_swap_for_hibernation(swap)); 139 offset = swp_offset(get_swap_page_of_type(swap));
140 if (offset) { 140 if (offset) {
141 if (swsusp_extents_insert(offset)) 141 if (swsusp_extents_insert(offset))
142 swap_free_for_hibernation(swp_entry(swap, offset)); 142 swap_free(swp_entry(swap, offset));
143 else 143 else
144 return swapdev_block(swap, offset); 144 return swapdev_block(swap, offset);
145 } 145 }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
163 ext = container_of(node, struct swsusp_extent, node); 163 ext = container_of(node, struct swsusp_extent, node);
164 rb_erase(node, &swsusp_extents); 164 rb_erase(node, &swsusp_extents);
165 for (offset = ext->start; offset <= ext->end; offset++) 165 for (offset = ext->start; offset <= ext->end; offset++)
166 swap_free_for_hibernation(swp_entry(swap, offset)); 166 swap_free(swp_entry(swap, offset));
167 167
168 kfree(ext); 168 kfree(ext);
169 } 169 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c59a73..f08d165871b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@ long nr_swap_pages;
47long total_swap_pages; 47long total_swap_pages;
48static int least_priority; 48static int least_priority;
49 49
50static bool swap_for_hibernation;
51
52static const char Bad_file[] = "Bad swap file entry "; 50static const char Bad_file[] = "Bad swap file entry ";
53static const char Unused_file[] = "Unused swap file entry "; 51static const char Unused_file[] = "Unused swap file entry ";
54static const char Bad_offset[] = "Bad swap offset entry "; 52static const char Bad_offset[] = "Bad swap offset entry ";
@@ -453,8 +451,6 @@ swp_entry_t get_swap_page(void)
453 spin_lock(&swap_lock); 451 spin_lock(&swap_lock);
454 if (nr_swap_pages <= 0) 452 if (nr_swap_pages <= 0)
455 goto noswap; 453 goto noswap;
456 if (swap_for_hibernation)
457 goto noswap;
458 nr_swap_pages--; 454 nr_swap_pages--;
459 455
460 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 456 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +483,28 @@ noswap:
487 return (swp_entry_t) {0}; 483 return (swp_entry_t) {0};
488} 484}
489 485
486/* The only caller of this function is now susupend routine */
487swp_entry_t get_swap_page_of_type(int type)
488{
489 struct swap_info_struct *si;
490 pgoff_t offset;
491
492 spin_lock(&swap_lock);
493 si = swap_info[type];
494 if (si && (si->flags & SWP_WRITEOK)) {
495 nr_swap_pages--;
496 /* This is called for allocating swap entry, not cache */
497 offset = scan_swap_map(si, 1);
498 if (offset) {
499 spin_unlock(&swap_lock);
500 return swp_entry(type, offset);
501 }
502 nr_swap_pages++;
503 }
504 spin_unlock(&swap_lock);
505 return (swp_entry_t) {0};
506}
507
490static struct swap_info_struct *swap_info_get(swp_entry_t entry) 508static struct swap_info_struct *swap_info_get(swp_entry_t entry)
491{ 509{
492 struct swap_info_struct *p; 510 struct swap_info_struct *p;
@@ -746,74 +764,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
746#endif 764#endif
747 765
748#ifdef CONFIG_HIBERNATION 766#ifdef CONFIG_HIBERNATION
749
750static pgoff_t hibernation_offset[MAX_SWAPFILES];
751/*
752 * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
753 * saved swap_map[] image to the disk will be an incomplete because it's
754 * changing without synchronization with hibernation snap shot.
755 * At resume, we just make swap_for_hibernation=false. We can forget
756 * used maps easily.
757 */
758void hibernation_freeze_swap(void)
759{
760 int i;
761
762 spin_lock(&swap_lock);
763
764 printk(KERN_INFO "PM: Freeze Swap\n");
765 swap_for_hibernation = true;
766 for (i = 0; i < MAX_SWAPFILES; i++)
767 hibernation_offset[i] = 1;
768 spin_unlock(&swap_lock);
769}
770
771void hibernation_thaw_swap(void)
772{
773 spin_lock(&swap_lock);
774 if (swap_for_hibernation) {
775 printk(KERN_INFO "PM: Thaw Swap\n");
776 swap_for_hibernation = false;
777 }
778 spin_unlock(&swap_lock);
779}
780
781/*
782 * Because updateing swap_map[] can make not-saved-status-change,
783 * we use our own easy allocator.
784 * Please see kernel/power/swap.c, Used swaps are recorded into
785 * RB-tree.
786 */
787swp_entry_t get_swap_for_hibernation(int type)
788{
789 pgoff_t off;
790 swp_entry_t val = {0};
791 struct swap_info_struct *si;
792
793 spin_lock(&swap_lock);
794
795 si = swap_info[type];
796 if (!si || !(si->flags & SWP_WRITEOK))
797 goto done;
798
799 for (off = hibernation_offset[type]; off < si->max; ++off) {
800 if (!si->swap_map[off])
801 break;
802 }
803 if (off < si->max) {
804 val = swp_entry(type, off);
805 hibernation_offset[type] = off + 1;
806 }
807done:
808 spin_unlock(&swap_lock);
809 return val;
810}
811
812void swap_free_for_hibernation(swp_entry_t ent)
813{
814 /* Nothing to do */
815}
816
817/* 767/*
818 * Find the swap type that corresponds to given device (if any). 768 * Find the swap type that corresponds to given device (if any).
819 * 769 *