aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ksm.h7
-rw-r--r--include/linux/rmap.h9
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/rmap.c19
5 files changed, 27 insertions, 21 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 45c9b6a17bcb..0eef8cb0baf7 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,8 +76,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
76int page_referenced_ksm(struct page *page, 76int page_referenced_ksm(struct page *page,
77 struct mem_cgroup *memcg, unsigned long *vm_flags); 77 struct mem_cgroup *memcg, unsigned long *vm_flags);
78int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); 78int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
79int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, 79int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
80 struct vm_area_struct *, unsigned long, void *), void *arg);
81void ksm_migrate_page(struct page *newpage, struct page *oldpage); 80void ksm_migrate_page(struct page *newpage, struct page *oldpage);
82 81
83#else /* !CONFIG_KSM */ 82#else /* !CONFIG_KSM */
@@ -120,8 +119,8 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
120 return 0; 119 return 0;
121} 120}
122 121
123static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, 122static inline int rmap_walk_ksm(struct page *page,
124 struct vm_area_struct *, unsigned long, void *), void *arg) 123 struct rmap_walk_control *rwc)
125{ 124{
126 return 0; 125 return 0;
127} 126}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6dacb93a6d94..6a456ce6de20 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -235,11 +235,16 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page);
235void page_unlock_anon_vma_read(struct anon_vma *anon_vma); 235void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237 237
238struct rmap_walk_control {
239 void *arg;
240 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
241 unsigned long addr, void *arg);
242};
243
238/* 244/*
239 * Called by migrate.c to remove migration ptes, but might be used more later. 245 * Called by migrate.c to remove migration ptes, but might be used more later.
240 */ 246 */
241int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 247int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
242 struct vm_area_struct *, unsigned long, void *), void *arg);
243 248
244#else /* !CONFIG_MMU */ 249#else /* !CONFIG_MMU */
245 250
diff --git a/mm/ksm.c b/mm/ksm.c
index 175fff79dc95..c3035fee8080 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1997,8 +1997,7 @@ out:
1997} 1997}
1998 1998
1999#ifdef CONFIG_MIGRATION 1999#ifdef CONFIG_MIGRATION
2000int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, 2000int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2001 struct vm_area_struct *, unsigned long, void *), void *arg)
2002{ 2001{
2003 struct stable_node *stable_node; 2002 struct stable_node *stable_node;
2004 struct rmap_item *rmap_item; 2003 struct rmap_item *rmap_item;
@@ -2033,7 +2032,8 @@ again:
2033 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2032 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2034 continue; 2033 continue;
2035 2034
2036 ret = rmap_one(page, vma, rmap_item->address, arg); 2035 ret = rwc->rmap_one(page, vma,
2036 rmap_item->address, rwc->arg);
2037 if (ret != SWAP_AGAIN) { 2037 if (ret != SWAP_AGAIN) {
2038 anon_vma_unlock_read(anon_vma); 2038 anon_vma_unlock_read(anon_vma);
2039 goto out; 2039 goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 9194375b2307..11d89dc0574c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -199,7 +199,12 @@ out:
199 */ 199 */
200static void remove_migration_ptes(struct page *old, struct page *new) 200static void remove_migration_ptes(struct page *old, struct page *new)
201{ 201{
202 rmap_walk(new, remove_migration_pte, old); 202 struct rmap_walk_control rwc = {
203 .rmap_one = remove_migration_pte,
204 .arg = old,
205 };
206
207 rmap_walk(new, &rwc);
203} 208}
204 209
205/* 210/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 5a79bf585e27..f8f10ad5d359 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1706,8 +1706,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page)
1706 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1706 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1707 * Called by migrate.c to remove migration ptes, but might be used more later. 1707 * Called by migrate.c to remove migration ptes, but might be used more later.
1708 */ 1708 */
1709static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1709static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1710 struct vm_area_struct *, unsigned long, void *), void *arg)
1711{ 1710{
1712 struct anon_vma *anon_vma; 1711 struct anon_vma *anon_vma;
1713 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1712 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1721,7 +1720,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1721 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1720 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1722 struct vm_area_struct *vma = avc->vma; 1721 struct vm_area_struct *vma = avc->vma;
1723 unsigned long address = vma_address(page, vma); 1722 unsigned long address = vma_address(page, vma);
1724 ret = rmap_one(page, vma, address, arg); 1723 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1725 if (ret != SWAP_AGAIN) 1724 if (ret != SWAP_AGAIN)
1726 break; 1725 break;
1727 } 1726 }
@@ -1729,8 +1728,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1729 return ret; 1728 return ret;
1730} 1729}
1731 1730
1732static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1731static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1733 struct vm_area_struct *, unsigned long, void *), void *arg)
1734{ 1732{
1735 struct address_space *mapping = page->mapping; 1733 struct address_space *mapping = page->mapping;
1736 pgoff_t pgoff = page->index << compound_order(page); 1734 pgoff_t pgoff = page->index << compound_order(page);
@@ -1742,7 +1740,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1742 mutex_lock(&mapping->i_mmap_mutex); 1740 mutex_lock(&mapping->i_mmap_mutex);
1743 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1741 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1744 unsigned long address = vma_address(page, vma); 1742 unsigned long address = vma_address(page, vma);
1745 ret = rmap_one(page, vma, address, arg); 1743 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1746 if (ret != SWAP_AGAIN) 1744 if (ret != SWAP_AGAIN)
1747 break; 1745 break;
1748 } 1746 }
@@ -1755,17 +1753,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1755 return ret; 1753 return ret;
1756} 1754}
1757 1755
1758int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1756int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1759 struct vm_area_struct *, unsigned long, void *), void *arg)
1760{ 1757{
1761 VM_BUG_ON(!PageLocked(page)); 1758 VM_BUG_ON(!PageLocked(page));
1762 1759
1763 if (unlikely(PageKsm(page))) 1760 if (unlikely(PageKsm(page)))
1764 return rmap_walk_ksm(page, rmap_one, arg); 1761 return rmap_walk_ksm(page, rwc);
1765 else if (PageAnon(page)) 1762 else if (PageAnon(page))
1766 return rmap_walk_anon(page, rmap_one, arg); 1763 return rmap_walk_anon(page, rwc);
1767 else 1764 else
1768 return rmap_walk_file(page, rmap_one, arg); 1765 return rmap_walk_file(page, rwc);
1769} 1766}
1770#endif /* CONFIG_MIGRATION */ 1767#endif /* CONFIG_MIGRATION */
1771 1768