diff options
author | Minchan Kim <minchan@kernel.org> | 2017-05-03 17:54:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 18:52:10 -0400 |
commit | 1df631ae19819cff343d316eda42eca32d3de7fc (patch) | |
tree | f643ec82c7c5aff378f11453ff2660a27b0cfdf4 | |
parent | 666e5a406c3ed562e7b3ceff8b631b6067bdaead (diff) |
mm: make rmap_walk() return void
There is no user of the return value from rmap_walk() and friends so
this patch makes them void-returning functions.
Link: http://lkml.kernel.org/r/1489555493-14659-9-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/ksm.h | 5 | ||||
-rw-r--r-- | include/linux/rmap.h | 4 | ||||
-rw-r--r-- | mm/ksm.c | 16 | ||||
-rw-r--r-- | mm/rmap.c | 32 |
4 files changed, 23 insertions, 34 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index e1cfda4bee58..78b44a024eaa 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page, | |||
61 | struct page *ksm_might_need_to_copy(struct page *page, | 61 | struct page *ksm_might_need_to_copy(struct page *page, |
62 | struct vm_area_struct *vma, unsigned long address); | 62 | struct vm_area_struct *vma, unsigned long address); |
63 | 63 | ||
64 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); | 64 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); |
65 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | 65 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); |
66 | 66 | ||
67 | #else /* !CONFIG_KSM */ | 67 | #else /* !CONFIG_KSM */ |
@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page, | |||
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline int rmap_walk_ksm(struct page *page, | 97 | static inline void rmap_walk_ksm(struct page *page, |
98 | struct rmap_walk_control *rwc) | 98 | struct rmap_walk_control *rwc) |
99 | { | 99 | { |
100 | return 0; | ||
101 | } | 100 | } |
102 | 101 | ||
103 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 102 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 6028c38d3cac..1d7d457ca0dc 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -264,8 +264,8 @@ struct rmap_walk_control { | |||
264 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); | 264 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
265 | }; | 265 | }; |
266 | 266 | ||
267 | int rmap_walk(struct page *page, struct rmap_walk_control *rwc); | 267 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
268 | int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); | 268 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
269 | 269 | ||
270 | #else /* !CONFIG_MMU */ | 270 | #else /* !CONFIG_MMU */ |
271 | 271 | ||
@@ -1933,11 +1933,10 @@ struct page *ksm_might_need_to_copy(struct page *page, | |||
1933 | return new_page; | 1933 | return new_page; |
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) | 1936 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
1937 | { | 1937 | { |
1938 | struct stable_node *stable_node; | 1938 | struct stable_node *stable_node; |
1939 | struct rmap_item *rmap_item; | 1939 | struct rmap_item *rmap_item; |
1940 | int ret = SWAP_AGAIN; | ||
1941 | int search_new_forks = 0; | 1940 | int search_new_forks = 0; |
1942 | 1941 | ||
1943 | VM_BUG_ON_PAGE(!PageKsm(page), page); | 1942 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
@@ -1950,7 +1949,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) | |||
1950 | 1949 | ||
1951 | stable_node = page_stable_node(page); | 1950 | stable_node = page_stable_node(page); |
1952 | if (!stable_node) | 1951 | if (!stable_node) |
1953 | return ret; | 1952 | return; |
1954 | again: | 1953 | again: |
1955 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | 1954 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
1956 | struct anon_vma *anon_vma = rmap_item->anon_vma; | 1955 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
@@ -1978,23 +1977,20 @@ again: | |||
1978 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) | 1977 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1979 | continue; | 1978 | continue; |
1980 | 1979 | ||
1981 | ret = rwc->rmap_one(page, vma, | 1980 | if (SWAP_AGAIN != rwc->rmap_one(page, vma, |
1982 | rmap_item->address, rwc->arg); | 1981 | rmap_item->address, rwc->arg)) { |
1983 | if (ret != SWAP_AGAIN) { | ||
1984 | anon_vma_unlock_read(anon_vma); | 1982 | anon_vma_unlock_read(anon_vma); |
1985 | goto out; | 1983 | return; |
1986 | } | 1984 | } |
1987 | if (rwc->done && rwc->done(page)) { | 1985 | if (rwc->done && rwc->done(page)) { |
1988 | anon_vma_unlock_read(anon_vma); | 1986 | anon_vma_unlock_read(anon_vma); |
1989 | goto out; | 1987 | return; |
1990 | } | 1988 | } |
1991 | } | 1989 | } |
1992 | anon_vma_unlock_read(anon_vma); | 1990 | anon_vma_unlock_read(anon_vma); |
1993 | } | 1991 | } |
1994 | if (!search_new_forks++) | 1992 | if (!search_new_forks++) |
1995 | goto again; | 1993 | goto again; |
1996 | out: | ||
1997 | return ret; | ||
1998 | } | 1994 | } |
1999 | 1995 | ||
2000 | #ifdef CONFIG_MIGRATION | 1996 | #ifdef CONFIG_MIGRATION |
@@ -1607,13 +1607,12 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, | |||
1607 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be | 1607 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be |
1608 | * LOCKED. | 1608 | * LOCKED. |
1609 | */ | 1609 | */ |
1610 | static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | 1610 | static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, |
1611 | bool locked) | 1611 | bool locked) |
1612 | { | 1612 | { |
1613 | struct anon_vma *anon_vma; | 1613 | struct anon_vma *anon_vma; |
1614 | pgoff_t pgoff_start, pgoff_end; | 1614 | pgoff_t pgoff_start, pgoff_end; |
1615 | struct anon_vma_chain *avc; | 1615 | struct anon_vma_chain *avc; |
1616 | int ret = SWAP_AGAIN; | ||
1617 | 1616 | ||
1618 | if (locked) { | 1617 | if (locked) { |
1619 | anon_vma = page_anon_vma(page); | 1618 | anon_vma = page_anon_vma(page); |
@@ -1623,7 +1622,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1623 | anon_vma = rmap_walk_anon_lock(page, rwc); | 1622 | anon_vma = rmap_walk_anon_lock(page, rwc); |
1624 | } | 1623 | } |
1625 | if (!anon_vma) | 1624 | if (!anon_vma) |
1626 | return ret; | 1625 | return; |
1627 | 1626 | ||
1628 | pgoff_start = page_to_pgoff(page); | 1627 | pgoff_start = page_to_pgoff(page); |
1629 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; | 1628 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; |
@@ -1637,8 +1636,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1637 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) | 1636 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1638 | continue; | 1637 | continue; |
1639 | 1638 | ||
1640 | ret = rwc->rmap_one(page, vma, address, rwc->arg); | 1639 | if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg)) |
1641 | if (ret != SWAP_AGAIN) | ||
1642 | break; | 1640 | break; |
1643 | if (rwc->done && rwc->done(page)) | 1641 | if (rwc->done && rwc->done(page)) |
1644 | break; | 1642 | break; |
@@ -1646,7 +1644,6 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1646 | 1644 | ||
1647 | if (!locked) | 1645 | if (!locked) |
1648 | anon_vma_unlock_read(anon_vma); | 1646 | anon_vma_unlock_read(anon_vma); |
1649 | return ret; | ||
1650 | } | 1647 | } |
1651 | 1648 | ||
1652 | /* | 1649 | /* |
@@ -1662,13 +1659,12 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, | |||
1662 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be | 1659 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be |
1663 | * LOCKED. | 1660 | * LOCKED. |
1664 | */ | 1661 | */ |
1665 | static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | 1662 | static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, |
1666 | bool locked) | 1663 | bool locked) |
1667 | { | 1664 | { |
1668 | struct address_space *mapping = page_mapping(page); | 1665 | struct address_space *mapping = page_mapping(page); |
1669 | pgoff_t pgoff_start, pgoff_end; | 1666 | pgoff_t pgoff_start, pgoff_end; |
1670 | struct vm_area_struct *vma; | 1667 | struct vm_area_struct *vma; |
1671 | int ret = SWAP_AGAIN; | ||
1672 | 1668 | ||
1673 | /* | 1669 | /* |
1674 | * The page lock not only makes sure that page->mapping cannot | 1670 | * The page lock not only makes sure that page->mapping cannot |
@@ -1679,7 +1675,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | |||
1679 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 1675 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1680 | 1676 | ||
1681 | if (!mapping) | 1677 | if (!mapping) |
1682 | return ret; | 1678 | return; |
1683 | 1679 | ||
1684 | pgoff_start = page_to_pgoff(page); | 1680 | pgoff_start = page_to_pgoff(page); |
1685 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; | 1681 | pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; |
@@ -1694,8 +1690,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | |||
1694 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) | 1690 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1695 | continue; | 1691 | continue; |
1696 | 1692 | ||
1697 | ret = rwc->rmap_one(page, vma, address, rwc->arg); | 1693 | if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg)) |
1698 | if (ret != SWAP_AGAIN) | ||
1699 | goto done; | 1694 | goto done; |
1700 | if (rwc->done && rwc->done(page)) | 1695 | if (rwc->done && rwc->done(page)) |
1701 | goto done; | 1696 | goto done; |
@@ -1704,28 +1699,27 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, | |||
1704 | done: | 1699 | done: |
1705 | if (!locked) | 1700 | if (!locked) |
1706 | i_mmap_unlock_read(mapping); | 1701 | i_mmap_unlock_read(mapping); |
1707 | return ret; | ||
1708 | } | 1702 | } |
1709 | 1703 | ||
1710 | int rmap_walk(struct page *page, struct rmap_walk_control *rwc) | 1704 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc) |
1711 | { | 1705 | { |
1712 | if (unlikely(PageKsm(page))) | 1706 | if (unlikely(PageKsm(page))) |
1713 | return rmap_walk_ksm(page, rwc); | 1707 | rmap_walk_ksm(page, rwc); |
1714 | else if (PageAnon(page)) | 1708 | else if (PageAnon(page)) |
1715 | return rmap_walk_anon(page, rwc, false); | 1709 | rmap_walk_anon(page, rwc, false); |
1716 | else | 1710 | else |
1717 | return rmap_walk_file(page, rwc, false); | 1711 | rmap_walk_file(page, rwc, false); |
1718 | } | 1712 | } |
1719 | 1713 | ||
1720 | /* Like rmap_walk, but caller holds relevant rmap lock */ | 1714 | /* Like rmap_walk, but caller holds relevant rmap lock */ |
1721 | int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) | 1715 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) |
1722 | { | 1716 | { |
1723 | /* no ksm support for now */ | 1717 | /* no ksm support for now */ |
1724 | VM_BUG_ON_PAGE(PageKsm(page), page); | 1718 | VM_BUG_ON_PAGE(PageKsm(page), page); |
1725 | if (PageAnon(page)) | 1719 | if (PageAnon(page)) |
1726 | return rmap_walk_anon(page, rwc, true); | 1720 | rmap_walk_anon(page, rwc, true); |
1727 | else | 1721 | else |
1728 | return rmap_walk_file(page, rwc, true); | 1722 | rmap_walk_file(page, rwc, true); |
1729 | } | 1723 | } |
1730 | 1724 | ||
1731 | #ifdef CONFIG_HUGETLB_PAGE | 1725 | #ifdef CONFIG_HUGETLB_PAGE |