aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rmap.h15
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/rmap.c37
3 files changed, 51 insertions, 8 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6a456ce6de20..616aa4d05f0a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -235,10 +235,25 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page);
235void page_unlock_anon_vma_read(struct anon_vma *anon_vma); 235void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237 237
238/*
239 * rmap_walk_control: To control rmap traversing for specific needs
240 *
241 * arg: passed to rmap_one() and invalid_vma()
242 * rmap_one: executed on each vma where page is mapped
243 * done: for checking traversing termination condition
244 * file_nonlinear: for handling file nonlinear mapping
245 * anon_lock: for getting anon_lock by optimized way rather than default
246 * invalid_vma: for skipping uninterested vma
247 */
238struct rmap_walk_control { 248struct rmap_walk_control {
239 void *arg; 249 void *arg;
240 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 250 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
241 unsigned long addr, void *arg); 251 unsigned long addr, void *arg);
252 int (*done)(struct page *page);
253 int (*file_nonlinear)(struct page *, struct address_space *,
254 struct vm_area_struct *vma);
255 struct anon_vma *(*anon_lock)(struct page *page);
256 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
242}; 257};
243 258
244/* 259/*
diff --git a/mm/ksm.c b/mm/ksm.c
index c3035fee8080..91b8cb35f7cc 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2032,12 +2032,19 @@ again:
2032 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2032 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2033 continue; 2033 continue;
2034 2034
2035 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2036 continue;
2037
2035 ret = rwc->rmap_one(page, vma, 2038 ret = rwc->rmap_one(page, vma,
2036 rmap_item->address, rwc->arg); 2039 rmap_item->address, rwc->arg);
2037 if (ret != SWAP_AGAIN) { 2040 if (ret != SWAP_AGAIN) {
2038 anon_vma_unlock_read(anon_vma); 2041 anon_vma_unlock_read(anon_vma);
2039 goto out; 2042 goto out;
2040 } 2043 }
2044 if (rwc->done && rwc->done(page)) {
2045 anon_vma_unlock_read(anon_vma);
2046 goto out;
2047 }
2041 } 2048 }
2042 anon_vma_unlock_read(anon_vma); 2049 anon_vma_unlock_read(anon_vma);
2043 } 2050 }
diff --git a/mm/rmap.c b/mm/rmap.c
index f8f10ad5d359..97bf8f0396f8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1684,10 +1684,14 @@ void __put_anon_vma(struct anon_vma *anon_vma)
1684} 1684}
1685 1685
1686#ifdef CONFIG_MIGRATION 1686#ifdef CONFIG_MIGRATION
1687static struct anon_vma *rmap_walk_anon_lock(struct page *page) 1687static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1688 struct rmap_walk_control *rwc)
1688{ 1689{
1689 struct anon_vma *anon_vma; 1690 struct anon_vma *anon_vma;
1690 1691
1692 if (rwc->anon_lock)
1693 return rwc->anon_lock(page);
1694
1691 /* 1695 /*
1692 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1696 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1693 * because that depends on page_mapped(); but not all its usages 1697 * because that depends on page_mapped(); but not all its usages
@@ -1713,16 +1717,22 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1713 struct anon_vma_chain *avc; 1717 struct anon_vma_chain *avc;
1714 int ret = SWAP_AGAIN; 1718 int ret = SWAP_AGAIN;
1715 1719
1716 anon_vma = rmap_walk_anon_lock(page); 1720 anon_vma = rmap_walk_anon_lock(page, rwc);
1717 if (!anon_vma) 1721 if (!anon_vma)
1718 return ret; 1722 return ret;
1719 1723
1720 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1724 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1721 struct vm_area_struct *vma = avc->vma; 1725 struct vm_area_struct *vma = avc->vma;
1722 unsigned long address = vma_address(page, vma); 1726 unsigned long address = vma_address(page, vma);
1727
1728 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1729 continue;
1730
1723 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1731 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1724 if (ret != SWAP_AGAIN) 1732 if (ret != SWAP_AGAIN)
1725 break; 1733 break;
1734 if (rwc->done && rwc->done(page))
1735 break;
1726 } 1736 }
1727 anon_vma_unlock_read(anon_vma); 1737 anon_vma_unlock_read(anon_vma);
1728 return ret; 1738 return ret;
@@ -1740,15 +1750,26 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1740 mutex_lock(&mapping->i_mmap_mutex); 1750 mutex_lock(&mapping->i_mmap_mutex);
1741 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1751 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1742 unsigned long address = vma_address(page, vma); 1752 unsigned long address = vma_address(page, vma);
1753
1754 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1755 continue;
1756
1743 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1757 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1744 if (ret != SWAP_AGAIN) 1758 if (ret != SWAP_AGAIN)
1745 break; 1759 goto done;
1760 if (rwc->done && rwc->done(page))
1761 goto done;
1746 } 1762 }
1747 /* 1763
1748 * No nonlinear handling: being always shared, nonlinear vmas 1764 if (!rwc->file_nonlinear)
1749 * never contain migration ptes. Decide what to do about this 1765 goto done;
1750 * limitation to linear when we need rmap_walk() on nonlinear. 1766
1751 */ 1767 if (list_empty(&mapping->i_mmap_nonlinear))
1768 goto done;
1769
1770 ret = rwc->file_nonlinear(page, mapping, vma);
1771
1772done:
1752 mutex_unlock(&mapping->i_mmap_mutex); 1773 mutex_unlock(&mapping->i_mmap_mutex);
1753 return ret; 1774 return ret;
1754} 1775}