diff options
author | Hugh Dickins <hugh@veritas.com> | 2009-01-06 17:39:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 18:59:03 -0500 |
commit | 63d6c5ad7fc27455ce5cb4706884671fb7e0df08 (patch) | |
tree | 82cf04209a4825ab4daf2a94977fc44550d2c9b7 /mm/vmscan.c | |
parent | 68bdc8d64742ccc5e340c5d122ebbab3f0cf2a74 (diff) |
mm: remove try_to_munlock from vmscan
An unfortunate feature of the Unevictable LRU work was that reclaiming an
anonymous page involved an extra scan through the anon_vma: to check that
the page is evictable before allocating swap, because the swap could not
be freed reliably soon afterwards.
Now try_to_free_swap() has replaced remove_exclusive_swap_page(), that's
not an issue any more: remove try_to_munlock() call from
shrink_page_list(), leaving it to try_to_munmap() to discover if the page
is one to be culled to the unevictable list - in which case then
try_to_free_swap().
Update unevictable-lru.txt to remove comments on the try_to_munlock() in
shrink_page_list(), and shorten some lines over 80 columns.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 2 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index c8601dd36603..74f875733e2b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -625,15 +625,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
625 | if (PageAnon(page) && !PageSwapCache(page)) { | 625 | if (PageAnon(page) && !PageSwapCache(page)) { |
626 | if (!(sc->gfp_mask & __GFP_IO)) | 626 | if (!(sc->gfp_mask & __GFP_IO)) |
627 | goto keep_locked; | 627 | goto keep_locked; |
628 | switch (try_to_munlock(page)) { | ||
629 | case SWAP_FAIL: /* shouldn't happen */ | ||
630 | case SWAP_AGAIN: | ||
631 | goto keep_locked; | ||
632 | case SWAP_MLOCK: | ||
633 | goto cull_mlocked; | ||
634 | case SWAP_SUCCESS: | ||
635 | ; /* fall thru'; add to swap cache */ | ||
636 | } | ||
637 | if (!add_to_swap(page, GFP_ATOMIC)) | 628 | if (!add_to_swap(page, GFP_ATOMIC)) |
638 | goto activate_locked; | 629 | goto activate_locked; |
639 | may_enter_fs = 1; | 630 | may_enter_fs = 1; |
@@ -752,6 +743,8 @@ free_it: | |||
752 | continue; | 743 | continue; |
753 | 744 | ||
754 | cull_mlocked: | 745 | cull_mlocked: |
746 | if (PageSwapCache(page)) | ||
747 | try_to_free_swap(page); | ||
755 | unlock_page(page); | 748 | unlock_page(page); |
756 | putback_lru_page(page); | 749 | putback_lru_page(page); |
757 | continue; | 750 | continue; |