aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0d7296971ad9..28faa01cf578 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache)
79 return ret; 79 return ret;
80} 80}
81 81
82/* returnes 1 if swap entry is freed */
83static int
84__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
85{
86 int type = si - swap_info;
87 swp_entry_t entry = swp_entry(type, offset);
88 struct page *page;
89 int ret = 0;
90
91 page = find_get_page(&swapper_space, entry.val);
92 if (!page)
93 return 0;
94 /*
95 * This function is called from scan_swap_map() and it's called
96 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
97 * We have to use trylock for avoiding deadlock. This is a special
98 * case and you should use try_to_free_swap() with explicit lock_page()
99 * in usual operations.
100 */
101 if (trylock_page(page)) {
102 ret = try_to_free_swap(page);
103 unlock_page(page);
104 }
105 page_cache_release(page);
106 return ret;
107}
82 108
83/* 109/*
84 * We need this because the bdev->unplug_fn can sleep and we cannot 110 * We need this because the bdev->unplug_fn can sleep and we cannot
@@ -301,6 +327,19 @@ checks:
301 goto no_page; 327 goto no_page;
302 if (offset > si->highest_bit) 328 if (offset > si->highest_bit)
303 scan_base = offset = si->lowest_bit; 329 scan_base = offset = si->lowest_bit;
330
331 /* reuse swap entry of cache-only swap if not busy. */
332 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
333 int swap_was_freed;
334 spin_unlock(&swap_lock);
335 swap_was_freed = __try_to_reclaim_swap(si, offset);
336 spin_lock(&swap_lock);
337 /* entry was freed successfully, try to use this again */
338 if (swap_was_freed)
339 goto checks;
340 goto scan; /* check next one */
341 }
342
304 if (si->swap_map[offset]) 343 if (si->swap_map[offset])
305 goto scan; 344 goto scan;
306 345
@@ -382,6 +421,10 @@ scan:
382 spin_lock(&swap_lock); 421 spin_lock(&swap_lock);
383 goto checks; 422 goto checks;
384 } 423 }
424 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
425 spin_lock(&swap_lock);
426 goto checks;
427 }
385 if (unlikely(--latency_ration < 0)) { 428 if (unlikely(--latency_ration < 0)) {
386 cond_resched(); 429 cond_resched();
387 latency_ration = LATENCY_LIMIT; 430 latency_ration = LATENCY_LIMIT;
@@ -393,6 +436,10 @@ scan:
393 spin_lock(&swap_lock); 436 spin_lock(&swap_lock);
394 goto checks; 437 goto checks;
395 } 438 }
439 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
440 spin_lock(&swap_lock);
441 goto checks;
442 }
396 if (unlikely(--latency_ration < 0)) { 443 if (unlikely(--latency_ration < 0)) {
397 cond_resched(); 444 cond_resched();
398 latency_ration = LATENCY_LIMIT; 445 latency_ration = LATENCY_LIMIT;