diff options
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 135 |
1 files changed, 21 insertions, 114 deletions
@@ -1891,21 +1891,24 @@ struct page *ksm_might_need_to_copy(struct page *page, | |||
1891 | return new_page; | 1891 | return new_page; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, | 1894 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
1895 | unsigned long *vm_flags) | ||
1896 | { | 1895 | { |
1897 | struct stable_node *stable_node; | 1896 | struct stable_node *stable_node; |
1898 | struct rmap_item *rmap_item; | 1897 | struct rmap_item *rmap_item; |
1899 | unsigned int mapcount = page_mapcount(page); | 1898 | int ret = SWAP_AGAIN; |
1900 | int referenced = 0; | ||
1901 | int search_new_forks = 0; | 1899 | int search_new_forks = 0; |
1902 | 1900 | ||
1903 | VM_BUG_ON(!PageKsm(page)); | 1901 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
1904 | VM_BUG_ON(!PageLocked(page)); | 1902 | |
1903 | /* | ||
1904 | * Rely on the page lock to protect against concurrent modifications | ||
1905 | * to that page's node of the stable tree. | ||
1906 | */ | ||
1907 | VM_BUG_ON_PAGE(!PageLocked(page), page); | ||
1905 | 1908 | ||
1906 | stable_node = page_stable_node(page); | 1909 | stable_node = page_stable_node(page); |
1907 | if (!stable_node) | 1910 | if (!stable_node) |
1908 | return 0; | 1911 | return ret; |
1909 | again: | 1912 | again: |
1910 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | 1913 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
1911 | struct anon_vma *anon_vma = rmap_item->anon_vma; | 1914 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
@@ -1928,113 +1931,16 @@ again: | |||
1928 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | 1931 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
1929 | continue; | 1932 | continue; |
1930 | 1933 | ||
1931 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | 1934 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1932 | continue; | ||
1933 | |||
1934 | referenced += page_referenced_one(page, vma, | ||
1935 | rmap_item->address, &mapcount, vm_flags); | ||
1936 | if (!search_new_forks || !mapcount) | ||
1937 | break; | ||
1938 | } | ||
1939 | anon_vma_unlock_read(anon_vma); | ||
1940 | if (!mapcount) | ||
1941 | goto out; | ||
1942 | } | ||
1943 | if (!search_new_forks++) | ||
1944 | goto again; | ||
1945 | out: | ||
1946 | return referenced; | ||
1947 | } | ||
1948 | |||
1949 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | ||
1950 | { | ||
1951 | struct stable_node *stable_node; | ||
1952 | struct rmap_item *rmap_item; | ||
1953 | int ret = SWAP_AGAIN; | ||
1954 | int search_new_forks = 0; | ||
1955 | |||
1956 | VM_BUG_ON(!PageKsm(page)); | ||
1957 | VM_BUG_ON(!PageLocked(page)); | ||
1958 | |||
1959 | stable_node = page_stable_node(page); | ||
1960 | if (!stable_node) | ||
1961 | return SWAP_FAIL; | ||
1962 | again: | ||
1963 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | ||
1964 | struct anon_vma *anon_vma = rmap_item->anon_vma; | ||
1965 | struct anon_vma_chain *vmac; | ||
1966 | struct vm_area_struct *vma; | ||
1967 | |||
1968 | anon_vma_lock_read(anon_vma); | ||
1969 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | ||
1970 | 0, ULONG_MAX) { | ||
1971 | vma = vmac->vma; | ||
1972 | if (rmap_item->address < vma->vm_start || | ||
1973 | rmap_item->address >= vma->vm_end) | ||
1974 | continue; | ||
1975 | /* | ||
1976 | * Initially we examine only the vma which covers this | ||
1977 | * rmap_item; but later, if there is still work to do, | ||
1978 | * we examine covering vmas in other mms: in case they | ||
1979 | * were forked from the original since ksmd passed. | ||
1980 | */ | ||
1981 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | ||
1982 | continue; | 1935 | continue; |
1983 | 1936 | ||
1984 | ret = try_to_unmap_one(page, vma, | 1937 | ret = rwc->rmap_one(page, vma, |
1985 | rmap_item->address, flags); | 1938 | rmap_item->address, rwc->arg); |
1986 | if (ret != SWAP_AGAIN || !page_mapped(page)) { | 1939 | if (ret != SWAP_AGAIN) { |
1987 | anon_vma_unlock_read(anon_vma); | 1940 | anon_vma_unlock_read(anon_vma); |
1988 | goto out; | 1941 | goto out; |
1989 | } | 1942 | } |
1990 | } | 1943 | if (rwc->done && rwc->done(page)) { |
1991 | anon_vma_unlock_read(anon_vma); | ||
1992 | } | ||
1993 | if (!search_new_forks++) | ||
1994 | goto again; | ||
1995 | out: | ||
1996 | return ret; | ||
1997 | } | ||
1998 | |||
1999 | #ifdef CONFIG_MIGRATION | ||
2000 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | ||
2001 | struct vm_area_struct *, unsigned long, void *), void *arg) | ||
2002 | { | ||
2003 | struct stable_node *stable_node; | ||
2004 | struct rmap_item *rmap_item; | ||
2005 | int ret = SWAP_AGAIN; | ||
2006 | int search_new_forks = 0; | ||
2007 | |||
2008 | VM_BUG_ON(!PageKsm(page)); | ||
2009 | VM_BUG_ON(!PageLocked(page)); | ||
2010 | |||
2011 | stable_node = page_stable_node(page); | ||
2012 | if (!stable_node) | ||
2013 | return ret; | ||
2014 | again: | ||
2015 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | ||
2016 | struct anon_vma *anon_vma = rmap_item->anon_vma; | ||
2017 | struct anon_vma_chain *vmac; | ||
2018 | struct vm_area_struct *vma; | ||
2019 | |||
2020 | anon_vma_lock_read(anon_vma); | ||
2021 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | ||
2022 | 0, ULONG_MAX) { | ||
2023 | vma = vmac->vma; | ||
2024 | if (rmap_item->address < vma->vm_start || | ||
2025 | rmap_item->address >= vma->vm_end) | ||
2026 | continue; | ||
2027 | /* | ||
2028 | * Initially we examine only the vma which covers this | ||
2029 | * rmap_item; but later, if there is still work to do, | ||
2030 | * we examine covering vmas in other mms: in case they | ||
2031 | * were forked from the original since ksmd passed. | ||
2032 | */ | ||
2033 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | ||
2034 | continue; | ||
2035 | |||
2036 | ret = rmap_one(page, vma, rmap_item->address, arg); | ||
2037 | if (ret != SWAP_AGAIN) { | ||
2038 | anon_vma_unlock_read(anon_vma); | 1944 | anon_vma_unlock_read(anon_vma); |
2039 | goto out; | 1945 | goto out; |
2040 | } | 1946 | } |
@@ -2047,17 +1953,18 @@ out: | |||
2047 | return ret; | 1953 | return ret; |
2048 | } | 1954 | } |
2049 | 1955 | ||
1956 | #ifdef CONFIG_MIGRATION | ||
2050 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 1957 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
2051 | { | 1958 | { |
2052 | struct stable_node *stable_node; | 1959 | struct stable_node *stable_node; |
2053 | 1960 | ||
2054 | VM_BUG_ON(!PageLocked(oldpage)); | 1961 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
2055 | VM_BUG_ON(!PageLocked(newpage)); | 1962 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
2056 | VM_BUG_ON(newpage->mapping != oldpage->mapping); | 1963 | VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); |
2057 | 1964 | ||
2058 | stable_node = page_stable_node(newpage); | 1965 | stable_node = page_stable_node(newpage); |
2059 | if (stable_node) { | 1966 | if (stable_node) { |
2060 | VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); | 1967 | VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); |
2061 | stable_node->kpfn = page_to_pfn(newpage); | 1968 | stable_node->kpfn = page_to_pfn(newpage); |
2062 | /* | 1969 | /* |
2063 | * newpage->mapping was set in advance; now we need smp_wmb() | 1970 | * newpage->mapping was set in advance; now we need smp_wmb() |
@@ -2438,4 +2345,4 @@ out_free: | |||
2438 | out: | 2345 | out: |
2439 | return err; | 2346 | return err; |
2440 | } | 2347 | } |
2441 | module_init(ksm_init) | 2348 | subsys_initcall(ksm_init); |