diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-12-14 20:59:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:20 -0500 |
commit | caed0f486e582eeeb6e3546417fd758230fe4ad9 (patch) | |
tree | 203d4724dcfaae3ad4b349e1971bb3efc0da7db2 /mm/rmap.c | |
parent | 23ce932a5e3ec3b9f06e92c8797d834d43abfb0f (diff) |
mm: simplify try_to_unmap_one()
SWAP_MLOCK mean "We marked the page as PG_MLOCK, please move it to
unevictable-lru". So, following code is easy confusable.
if (vma->vm_flags & VM_LOCKED) {
ret = SWAP_MLOCK;
goto out_unmap;
}
Plus, if the VMA doesn't have VM_LOCKED, We don't need to check
the needed of calling mlock_vma_page().
Also, add some commentary to try_to_unmap_one().
Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 35 |
1 files changed, 22 insertions, 13 deletions
@@ -789,10 +789,9 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
789 | * skipped over this mm) then we should reactivate it. | 789 | * skipped over this mm) then we should reactivate it. |
790 | */ | 790 | */ |
791 | if (!(flags & TTU_IGNORE_MLOCK)) { | 791 | if (!(flags & TTU_IGNORE_MLOCK)) { |
792 | if (vma->vm_flags & VM_LOCKED) { | 792 | if (vma->vm_flags & VM_LOCKED) |
793 | ret = SWAP_MLOCK; | 793 | goto out_mlock; |
794 | goto out_unmap; | 794 | |
795 | } | ||
796 | if (TTU_ACTION(flags) == TTU_MUNLOCK) | 795 | if (TTU_ACTION(flags) == TTU_MUNLOCK) |
797 | goto out_unmap; | 796 | goto out_unmap; |
798 | } | 797 | } |
@@ -865,18 +864,28 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
865 | 864 | ||
866 | out_unmap: | 865 | out_unmap: |
867 | pte_unmap_unlock(pte, ptl); | 866 | pte_unmap_unlock(pte, ptl); |
867 | out: | ||
868 | return ret; | ||
868 | 869 | ||
869 | if (ret == SWAP_MLOCK) { | 870 | out_mlock: |
870 | ret = SWAP_AGAIN; | 871 | pte_unmap_unlock(pte, ptl); |
871 | if (down_read_trylock(&vma->vm_mm->mmap_sem)) { | 872 | |
872 | if (vma->vm_flags & VM_LOCKED) { | 873 | |
873 | mlock_vma_page(page); | 874 | /* |
874 | ret = SWAP_MLOCK; | 875 | * We need mmap_sem locking, Otherwise VM_LOCKED check makes |
875 | } | 876 | * unstable result and race. Plus, We can't wait here because |
876 | up_read(&vma->vm_mm->mmap_sem); | 877 | * we now hold anon_vma->lock or mapping->i_mmap_lock. |
878 | * if trylock failed, the page remain in evictable lru and later | ||
879 | * vmscan could retry to move the page to unevictable lru if the | ||
880 | * page is actually mlocked. | ||
881 | */ | ||
882 | if (down_read_trylock(&vma->vm_mm->mmap_sem)) { | ||
883 | if (vma->vm_flags & VM_LOCKED) { | ||
884 | mlock_vma_page(page); | ||
885 | ret = SWAP_MLOCK; | ||
877 | } | 886 | } |
887 | up_read(&vma->vm_mm->mmap_sem); | ||
878 | } | 888 | } |
879 | out: | ||
880 | return ret; | 889 | return ret; |
881 | } | 890 | } |
882 | 891 | ||