aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2010-09-09 19:38:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-09-09 21:57:25 -0400
commitb73d7fcecd93dc15eaa3c45c8c587b613f6673c4 (patch)
treeebf5a5e10fb246237807317a81f6f40c3eebcd9c /mm
parent910321ea817a202ff70fac666e37e2c8e2f88823 (diff)
swap: prevent reuse during hibernation
Move the hibernation check from scan_swap_map() into try_to_free_swap(): to catch not only the common case when hibernation's allocation itself triggers swap reuse, but also the less likely case when concurrent page reclaim (shrink_page_list) might happen to try_to_free_swap from a page. Hibernation already clears __GFP_IO from the gfp_allowed_mask, to stop reclaim from going to swap: check that to prevent swap reuse too. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Ondrej Zary <linux@rainbow-software.org> Cc: Andrea Gelmini <andrea.gelmini@gmail.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Nigel Cunningham <nigel@tuxonice.net> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f08d165871b3..ed5151079f59 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -318,10 +318,8 @@ checks:
318 if (offset > si->highest_bit) 318 if (offset > si->highest_bit)
319 scan_base = offset = si->lowest_bit; 319 scan_base = offset = si->lowest_bit;
320 320
321 /* reuse swap entry of cache-only swap if not hibernation. */ 321 /* reuse swap entry of cache-only swap if not busy. */
322 if (vm_swap_full() 322 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
323 && usage == SWAP_HAS_CACHE
324 && si->swap_map[offset] == SWAP_HAS_CACHE) {
325 int swap_was_freed; 323 int swap_was_freed;
326 spin_unlock(&swap_lock); 324 spin_unlock(&swap_lock);
327 swap_was_freed = __try_to_reclaim_swap(si, offset); 325 swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -688,6 +686,24 @@ int try_to_free_swap(struct page *page)
688 if (page_swapcount(page)) 686 if (page_swapcount(page))
689 return 0; 687 return 0;
690 688
689 /*
690 * Once hibernation has begun to create its image of memory,
691 * there's a danger that one of the calls to try_to_free_swap()
692 * - most probably a call from __try_to_reclaim_swap() while
693 * hibernation is allocating its own swap pages for the image,
694 * but conceivably even a call from memory reclaim - will free
695 * the swap from a page which has already been recorded in the
696 * image as a clean swapcache page, and then reuse its swap for
697 * another page of the image. On waking from hibernation, the
698 * original page might be freed under memory pressure, then
699 * later read back in from swap, now with the wrong data.
700 *
701 * Hibernation clears bits from gfp_allowed_mask to prevent
702 * memory reclaim from writing to disk, so check that here.
703 */
704 if (!(gfp_allowed_mask & __GFP_IO))
705 return 0;
706
691 delete_from_swap_cache(page); 707 delete_from_swap_cache(page);
692 SetPageDirty(page); 708 SetPageDirty(page);
693 return 1; 709 return 1;