diff options
author | Hugh Dickins <hugh@veritas.com> | 2009-01-06 17:39:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 18:59:03 -0500 |
commit | a2c43eed8334e878702fca713b212ae2a11d84b9 (patch) | |
tree | 4eb3f9b9153df5e7a638b15a6f7c9aa924066a94 | |
parent | 7b1fe59793e61f826bef053107b57b23954833bb (diff) |
mm: try_to_free_swap replaces remove_exclusive_swap_page
remove_exclusive_swap_page(): its problem is in living up to its name.
It doesn't matter if someone else has a reference to the page (raised
page_count); it doesn't matter if the page is mapped into userspace
(raised page_mapcount - though that hints it may be worth keeping the
swap): all that matters is that there be no more references to the swap
(and no writeback in progress).
swapoff (try_to_unuse) has been removing pages from swapcache for years,
with no concern for page count or page mapcount, and we used to have a
comment in lookup_swap_cache() recognizing that: if you go for a page of
swapcache, you'll get the right page, but it could have been removed from
swapcache by the time you get page lock.
So, give up asking for exclusivity: get rid of
remove_exclusive_swap_page(), and remove_exclusive_swap_page_ref() and
remove_exclusive_swap_page_count() which were spawned for the recent LRU
work: replace them by the simpler try_to_free_swap() which just checks
page_swapcount().
Similarly, remove the page_count limitation from free_swap_and_count(),
but assume that it's worth holding on to the swap if page is mapped and
swap nowhere near full. Add a vm_swap_full() test in free_swap_cache()?
It would be consistent, but I think we probably have enough for now.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 10 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 3 | ||||
-rw-r--r-- | mm/swap_state.c | 8 | ||||
-rw-r--r-- | mm/swapfile.c | 70 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
7 files changed, 22 insertions, 75 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 366556c5b148..c3ecd478840e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -305,8 +305,7 @@ extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); | |||
305 | extern sector_t swapdev_block(int, pgoff_t); | 305 | extern sector_t swapdev_block(int, pgoff_t); |
306 | extern struct swap_info_struct *get_swap_info_struct(unsigned); | 306 | extern struct swap_info_struct *get_swap_info_struct(unsigned); |
307 | extern int reuse_swap_page(struct page *); | 307 | extern int reuse_swap_page(struct page *); |
308 | extern int remove_exclusive_swap_page(struct page *); | 308 | extern int try_to_free_swap(struct page *); |
309 | extern int remove_exclusive_swap_page_ref(struct page *); | ||
310 | struct backing_dev_info; | 309 | struct backing_dev_info; |
311 | 310 | ||
312 | /* linux/mm/thrash.c */ | 311 | /* linux/mm/thrash.c */ |
@@ -388,12 +387,7 @@ static inline void delete_from_swap_cache(struct page *page) | |||
388 | 387 | ||
389 | #define reuse_swap_page(page) (page_mapcount(page) == 1) | 388 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
390 | 389 | ||
391 | static inline int remove_exclusive_swap_page(struct page *p) | 390 | static inline int try_to_free_swap(struct page *page) |
392 | { | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static inline int remove_exclusive_swap_page_ref(struct page *page) | ||
397 | { | 391 | { |
398 | return 0; | 392 | return 0; |
399 | } | 393 | } |
diff --git a/mm/memory.c b/mm/memory.c index 8f471edcb985..1a83fe5339a9 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2403,7 +2403,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2403 | 2403 | ||
2404 | swap_free(entry); | 2404 | swap_free(entry); |
2405 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2405 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
2406 | remove_exclusive_swap_page(page); | 2406 | try_to_free_swap(page); |
2407 | unlock_page(page); | 2407 | unlock_page(page); |
2408 | 2408 | ||
2409 | if (write_access) { | 2409 | if (write_access) { |
diff --git a/mm/page_io.c b/mm/page_io.c index d277a80efa71..dc6ce0afbded 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -98,7 +98,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
98 | struct bio *bio; | 98 | struct bio *bio; |
99 | int ret = 0, rw = WRITE; | 99 | int ret = 0, rw = WRITE; |
100 | 100 | ||
101 | if (remove_exclusive_swap_page(page)) { | 101 | if (try_to_free_swap(page)) { |
102 | unlock_page(page); | 102 | unlock_page(page); |
103 | goto out; | 103 | goto out; |
104 | } | 104 | } |
@@ -454,8 +454,7 @@ void pagevec_swap_free(struct pagevec *pvec) | |||
454 | struct page *page = pvec->pages[i]; | 454 | struct page *page = pvec->pages[i]; |
455 | 455 | ||
456 | if (PageSwapCache(page) && trylock_page(page)) { | 456 | if (PageSwapCache(page) && trylock_page(page)) { |
457 | if (PageSwapCache(page)) | 457 | try_to_free_swap(page); |
458 | remove_exclusive_swap_page_ref(page); | ||
459 | unlock_page(page); | 458 | unlock_page(page); |
460 | } | 459 | } |
461 | } | 460 | } |
diff --git a/mm/swap_state.c b/mm/swap_state.c index e793fdea275d..bcb472769299 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -195,14 +195,14 @@ void delete_from_swap_cache(struct page *page) | |||
195 | * If we are the only user, then try to free up the swap cache. | 195 | * If we are the only user, then try to free up the swap cache. |
196 | * | 196 | * |
197 | * Its ok to check for PageSwapCache without the page lock | 197 | * Its ok to check for PageSwapCache without the page lock |
198 | * here because we are going to recheck again inside | 198 | * here because we are going to recheck again inside |
199 | * exclusive_swap_page() _with_ the lock. | 199 | * try_to_free_swap() _with_ the lock. |
200 | * - Marcelo | 200 | * - Marcelo |
201 | */ | 201 | */ |
202 | static inline void free_swap_cache(struct page *page) | 202 | static inline void free_swap_cache(struct page *page) |
203 | { | 203 | { |
204 | if (PageSwapCache(page) && trylock_page(page)) { | 204 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
205 | remove_exclusive_swap_page(page); | 205 | try_to_free_swap(page); |
206 | unlock_page(page); | 206 | unlock_page(page); |
207 | } | 207 | } |
208 | } | 208 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index bfd4ee59cb88..f43601827607 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -348,68 +348,23 @@ int reuse_swap_page(struct page *page) | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /* | 350 | /* |
351 | * Work out if there are any other processes sharing this | 351 | * If swap is getting full, or if there are no more mappings of this page, |
352 | * swap cache page. Free it if you can. Return success. | 352 | * then try_to_free_swap is called to free its swap space. |
353 | */ | 353 | */ |
354 | static int remove_exclusive_swap_page_count(struct page *page, int count) | 354 | int try_to_free_swap(struct page *page) |
355 | { | 355 | { |
356 | int retval; | ||
357 | struct swap_info_struct * p; | ||
358 | swp_entry_t entry; | ||
359 | |||
360 | VM_BUG_ON(!PageLocked(page)); | 356 | VM_BUG_ON(!PageLocked(page)); |
361 | 357 | ||
362 | if (!PageSwapCache(page)) | 358 | if (!PageSwapCache(page)) |
363 | return 0; | 359 | return 0; |
364 | if (PageWriteback(page)) | 360 | if (PageWriteback(page)) |
365 | return 0; | 361 | return 0; |
366 | if (page_count(page) != count) /* us + cache + ptes */ | 362 | if (page_swapcount(page)) |
367 | return 0; | ||
368 | |||
369 | entry.val = page_private(page); | ||
370 | p = swap_info_get(entry); | ||
371 | if (!p) | ||
372 | return 0; | 363 | return 0; |
373 | 364 | ||
374 | /* Is the only swap cache user the cache itself? */ | 365 | delete_from_swap_cache(page); |
375 | retval = 0; | 366 | SetPageDirty(page); |
376 | if (p->swap_map[swp_offset(entry)] == 1) { | 367 | return 1; |
377 | /* Recheck the page count with the swapcache lock held.. */ | ||
378 | spin_lock_irq(&swapper_space.tree_lock); | ||
379 | if ((page_count(page) == count) && !PageWriteback(page)) { | ||
380 | __delete_from_swap_cache(page); | ||
381 | SetPageDirty(page); | ||
382 | retval = 1; | ||
383 | } | ||
384 | spin_unlock_irq(&swapper_space.tree_lock); | ||
385 | } | ||
386 | spin_unlock(&swap_lock); | ||
387 | |||
388 | if (retval) { | ||
389 | swap_free(entry); | ||
390 | page_cache_release(page); | ||
391 | } | ||
392 | |||
393 | return retval; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Most of the time the page should have two references: one for the | ||
398 | * process and one for the swap cache. | ||
399 | */ | ||
400 | int remove_exclusive_swap_page(struct page *page) | ||
401 | { | ||
402 | return remove_exclusive_swap_page_count(page, 2); | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * The pageout code holds an extra reference to the page. That raises | ||
407 | * the reference count to test for to 2 for a page that is only in the | ||
408 | * swap cache plus 1 for each process that maps the page. | ||
409 | */ | ||
410 | int remove_exclusive_swap_page_ref(struct page *page) | ||
411 | { | ||
412 | return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page)); | ||
413 | } | 368 | } |
414 | 369 | ||
415 | /* | 370 | /* |
@@ -436,13 +391,12 @@ void free_swap_and_cache(swp_entry_t entry) | |||
436 | spin_unlock(&swap_lock); | 391 | spin_unlock(&swap_lock); |
437 | } | 392 | } |
438 | if (page) { | 393 | if (page) { |
439 | int one_user; | 394 | /* |
440 | 395 | * Not mapped elsewhere, or swap space full? Free it! | |
441 | one_user = (page_count(page) == 2); | 396 | * Also recheck PageSwapCache now page is locked (above). |
442 | /* Only cache user (+us), or swap space full? Free it! */ | 397 | */ |
443 | /* Also recheck PageSwapCache after page is locked (above) */ | ||
444 | if (PageSwapCache(page) && !PageWriteback(page) && | 398 | if (PageSwapCache(page) && !PageWriteback(page) && |
445 | (one_user || vm_swap_full())) { | 399 | (!page_mapped(page) || vm_swap_full())) { |
446 | delete_from_swap_cache(page); | 400 | delete_from_swap_cache(page); |
447 | SetPageDirty(page); | 401 | SetPageDirty(page); |
448 | } | 402 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d196f46c8808..c8601dd36603 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -759,7 +759,7 @@ cull_mlocked: | |||
759 | activate_locked: | 759 | activate_locked: |
760 | /* Not a candidate for swapping, so reclaim swap space. */ | 760 | /* Not a candidate for swapping, so reclaim swap space. */ |
761 | if (PageSwapCache(page) && vm_swap_full()) | 761 | if (PageSwapCache(page) && vm_swap_full()) |
762 | remove_exclusive_swap_page_ref(page); | 762 | try_to_free_swap(page); |
763 | VM_BUG_ON(PageActive(page)); | 763 | VM_BUG_ON(PageActive(page)); |
764 | SetPageActive(page); | 764 | SetPageActive(page); |
765 | pgactivate++; | 765 | pgactivate++; |