diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-01-21 18:49:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:45 -0500 |
commit | e8351ac9bfa7f4412d5d196b6742309473ca506d (patch) | |
tree | a7110ca9274ac25e72f3fac02cc7ba5e934e995b /mm/ksm.c | |
parent | 52629506420ce32997f1fba0a1ab2f1aaa9a4f79 (diff) |
mm/rmap: use rmap_walk() in try_to_munlock()
Now, we have an infrastructure in rmap_walk() to handle difference from
variants of rmap traversing functions.
So, just use it in try_to_munlock().
In this patch, I change following things.
1. remove some variants of rmap traversing functions.
cf> try_to_unmap_ksm, try_to_unmap_anon, try_to_unmap_file
2. mechanical change to use rmap_walk() in try_to_munlock().
3. copy and paste comments.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 50 |
1 files changed, 0 insertions, 50 deletions
@@ -1946,56 +1946,6 @@ out: | |||
1946 | return referenced; | 1946 | return referenced; |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | ||
1950 | { | ||
1951 | struct stable_node *stable_node; | ||
1952 | struct rmap_item *rmap_item; | ||
1953 | int ret = SWAP_AGAIN; | ||
1954 | int search_new_forks = 0; | ||
1955 | |||
1956 | VM_BUG_ON(!PageKsm(page)); | ||
1957 | VM_BUG_ON(!PageLocked(page)); | ||
1958 | |||
1959 | stable_node = page_stable_node(page); | ||
1960 | if (!stable_node) | ||
1961 | return SWAP_FAIL; | ||
1962 | again: | ||
1963 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | ||
1964 | struct anon_vma *anon_vma = rmap_item->anon_vma; | ||
1965 | struct anon_vma_chain *vmac; | ||
1966 | struct vm_area_struct *vma; | ||
1967 | |||
1968 | anon_vma_lock_read(anon_vma); | ||
1969 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | ||
1970 | 0, ULONG_MAX) { | ||
1971 | vma = vmac->vma; | ||
1972 | if (rmap_item->address < vma->vm_start || | ||
1973 | rmap_item->address >= vma->vm_end) | ||
1974 | continue; | ||
1975 | /* | ||
1976 | * Initially we examine only the vma which covers this | ||
1977 | * rmap_item; but later, if there is still work to do, | ||
1978 | * we examine covering vmas in other mms: in case they | ||
1979 | * were forked from the original since ksmd passed. | ||
1980 | */ | ||
1981 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | ||
1982 | continue; | ||
1983 | |||
1984 | ret = try_to_unmap_one(page, vma, | ||
1985 | rmap_item->address, (void *)flags); | ||
1986 | if (ret != SWAP_AGAIN || !page_mapped(page)) { | ||
1987 | anon_vma_unlock_read(anon_vma); | ||
1988 | goto out; | ||
1989 | } | ||
1990 | } | ||
1991 | anon_vma_unlock_read(anon_vma); | ||
1992 | } | ||
1993 | if (!search_new_forks++) | ||
1994 | goto again; | ||
1995 | out: | ||
1996 | return ret; | ||
1997 | } | ||
1998 | |||
1999 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) | 1949 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
2000 | { | 1950 | { |
2001 | struct stable_node *stable_node; | 1951 | struct stable_node *stable_node; |