aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-06-16 18:32:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:42 -0400
commitc9e444103b5e7a5a3519f9913f59767f92e33baf (patch)
tree6b14020dc9271b5ef22ba34daf5494ef6572abb9 /mm
parent355cfa73ddff2fb8fa14e93bd94a057cc022512e (diff)
mm: reuse unused swap entry if necessary
Presently we can know a swap entry is just used as SwapCache via swap_map, without looking up swap cache. Then, we have a chance to reuse swap-cache-only swap entries in get_swap_pages(). This patch tries to free swap-cache-only swap entries if swap is not enough. Note: We hit following path when swap_cluster code cannot find a free cluster. Then, vm_swap_full() is not only condition to allow the kernel to reclaim unused swap. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@in.ibm.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0d7296971ad9..28faa01cf578 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache)
79 return ret; 79 return ret;
80} 80}
81 81
82/* returnes 1 if swap entry is freed */
83static int
84__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
85{
86 int type = si - swap_info;
87 swp_entry_t entry = swp_entry(type, offset);
88 struct page *page;
89 int ret = 0;
90
91 page = find_get_page(&swapper_space, entry.val);
92 if (!page)
93 return 0;
94 /*
95 * This function is called from scan_swap_map() and it's called
96 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
97 * We have to use trylock for avoiding deadlock. This is a special
98 * case and you should use try_to_free_swap() with explicit lock_page()
99 * in usual operations.
100 */
101 if (trylock_page(page)) {
102 ret = try_to_free_swap(page);
103 unlock_page(page);
104 }
105 page_cache_release(page);
106 return ret;
107}
82 108
83/* 109/*
84 * We need this because the bdev->unplug_fn can sleep and we cannot 110 * We need this because the bdev->unplug_fn can sleep and we cannot
@@ -301,6 +327,19 @@ checks:
301 goto no_page; 327 goto no_page;
302 if (offset > si->highest_bit) 328 if (offset > si->highest_bit)
303 scan_base = offset = si->lowest_bit; 329 scan_base = offset = si->lowest_bit;
330
331 /* reuse swap entry of cache-only swap if not busy. */
332 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
333 int swap_was_freed;
334 spin_unlock(&swap_lock);
335 swap_was_freed = __try_to_reclaim_swap(si, offset);
336 spin_lock(&swap_lock);
337 /* entry was freed successfully, try to use this again */
338 if (swap_was_freed)
339 goto checks;
340 goto scan; /* check next one */
341 }
342
304 if (si->swap_map[offset]) 343 if (si->swap_map[offset])
305 goto scan; 344 goto scan;
306 345
@@ -382,6 +421,10 @@ scan:
382 spin_lock(&swap_lock); 421 spin_lock(&swap_lock);
383 goto checks; 422 goto checks;
384 } 423 }
424 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
425 spin_lock(&swap_lock);
426 goto checks;
427 }
385 if (unlikely(--latency_ration < 0)) { 428 if (unlikely(--latency_ration < 0)) {
386 cond_resched(); 429 cond_resched();
387 latency_ration = LATENCY_LIMIT; 430 latency_ration = LATENCY_LIMIT;
@@ -393,6 +436,10 @@ scan:
393 spin_lock(&swap_lock); 436 spin_lock(&swap_lock);
394 goto checks; 437 goto checks;
395 } 438 }
439 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
440 spin_lock(&swap_lock);
441 goto checks;
442 }
396 if (unlikely(--latency_ration < 0)) { 443 if (unlikely(--latency_ration < 0)) {
397 cond_resched(); 444 cond_resched();
398 latency_ration = LATENCY_LIMIT; 445 latency_ration = LATENCY_LIMIT;