aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2017-02-22 18:45:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:30 -0500
commitba81f83842549871cbd7226fc11530dc464500bb (patch)
tree1722a6591e970b93efbbf51c39b6dd09f7832d6a
parent039939a65059852242c823ece685579370bc574f (diff)
mm/swap: skip readahead only when swap slot cache is enabled
Because during swap off, a swap entry may have swap_map[] == SWAP_HAS_CACHE (for example, just allocated). If we return NULL in __read_swap_cache_async(), the swap off will abort. So when swap slot cache is disabled, (for swap off), we will wait for page to be put into swap cache in such race condition. This should not be a problem for swap slot cache, because swap slot cache should be drained after clearing swap_slot_cache_enabled. [ying.huang@intel.com: fix memory leak in __read_swap_cache_async()] Link: http://lkml.kernel.org/r/874lzt6znd.fsf@yhuang-dev.intel.com Link: http://lkml.kernel.org/r/5e2c5f6abe8e6eb0797408897b1bba80938e9b9d.1484082593.git.tim.c.chen@linux.intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Cc: Aaron Lu <aaron.lu@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> escreveu: Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap_slots.h2
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/swap_state.c13
3 files changed, 13 insertions, 4 deletions
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
index ba5623b27c60..6ef92d17633d 100644
--- a/include/linux/swap_slots.h
+++ b/include/linux/swap_slots.h
@@ -25,4 +25,6 @@ void reenable_swap_slots_cache_unlock(void);
25int enable_swap_slots_cache(void); 25int enable_swap_slots_cache(void);
26int free_swap_slot(swp_entry_t entry); 26int free_swap_slot(swp_entry_t entry);
27 27
28extern bool swap_slot_cache_enabled;
29
28#endif /* _LINUX_SWAP_SLOTS_H */ 30#endif /* _LINUX_SWAP_SLOTS_H */
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index ebf4f1cbac04..9b5bc86f96ad 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -36,7 +36,7 @@
36 36
37static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); 37static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
38static bool swap_slot_cache_active; 38static bool swap_slot_cache_active;
39static bool swap_slot_cache_enabled; 39bool swap_slot_cache_enabled;
40static bool swap_slot_cache_initialized; 40static bool swap_slot_cache_initialized;
41DEFINE_MUTEX(swap_slots_cache_mutex); 41DEFINE_MUTEX(swap_slots_cache_mutex);
42/* Serialize swap slots cache enable/disable operations */ 42/* Serialize swap slots cache enable/disable operations */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e1f07cafecaa..473b71e052a8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -324,9 +324,16 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
324 if (found_page) 324 if (found_page)
325 break; 325 break;
326 326
327 /* Just skip read ahead for unused swap slot */ 327 /*
328 if (!__swp_swapcount(entry)) 328 * Just skip read ahead for unused swap slot.
329 return NULL; 329 * During swap_off when swap_slot_cache is disabled,
330 * we have to handle the race between putting
331 * swap entry in swap cache and marking swap slot
332 * as SWAP_HAS_CACHE. That's done in later part of code or
333 * else swap_off will be aborted if we return NULL.
334 */
335 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
336 break;
330 337
331 /* 338 /*
332 * Get a new page to read into from swap. 339 * Get a new page to read into from swap.