aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-01-21 18:49:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:45 -0500
commit52629506420ce32997f1fba0a1ab2f1aaa9a4f79 (patch)
tree1dd4e4fbe8f192a0f940adbf466b5cce07e9b6a2
parent0dd1c7bbce8d1d142bb25aefaa50262dfd77cb78 (diff)
mm/rmap: use rmap_walk() in try_to_unmap()
Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in try_to_unmap(). In this patch, I change following things. 1. enable rmap_walk() if !CONFIG_MIGRATION. 2. mechanical change to use rmap_walk() in try_to_unmap(). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h5
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/rmap.c48
3 files changed, 39 insertions, 18 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 616aa4d05f0a..2462458708cd 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -190,7 +190,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
190 190
191int try_to_unmap(struct page *, enum ttu_flags flags); 191int try_to_unmap(struct page *, enum ttu_flags flags);
192int try_to_unmap_one(struct page *, struct vm_area_struct *, 192int try_to_unmap_one(struct page *, struct vm_area_struct *,
193 unsigned long address, enum ttu_flags flags); 193 unsigned long address, void *arg);
194 194
195/* 195/*
196 * Called from mm/filemap_xip.c to unmap empty zero page 196 * Called from mm/filemap_xip.c to unmap empty zero page
@@ -256,9 +256,6 @@ struct rmap_walk_control {
256 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 256 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
257}; 257};
258 258
259/*
260 * Called by migrate.c to remove migration ptes, but might be used more later.
261 */
262int rmap_walk(struct page *page, struct rmap_walk_control *rwc); 259int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
263 260
264#else /* !CONFIG_MMU */ 261#else /* !CONFIG_MMU */
diff --git a/mm/ksm.c b/mm/ksm.c
index 91b8cb35f7cc..6b4baa97f4c0 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1982,7 +1982,7 @@ again:
1982 continue; 1982 continue;
1983 1983
1984 ret = try_to_unmap_one(page, vma, 1984 ret = try_to_unmap_one(page, vma,
1985 rmap_item->address, flags); 1985 rmap_item->address, (void *)flags);
1986 if (ret != SWAP_AGAIN || !page_mapped(page)) { 1986 if (ret != SWAP_AGAIN || !page_mapped(page)) {
1987 anon_vma_unlock_read(anon_vma); 1987 anon_vma_unlock_read(anon_vma);
1988 goto out; 1988 goto out;
@@ -1996,7 +1996,6 @@ out:
1996 return ret; 1996 return ret;
1997} 1997}
1998 1998
1999#ifdef CONFIG_MIGRATION
2000int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 1999int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2001{ 2000{
2002 struct stable_node *stable_node; 2001 struct stable_node *stable_node;
@@ -2054,6 +2053,7 @@ out:
2054 return ret; 2053 return ret;
2055} 2054}
2056 2055
2056#ifdef CONFIG_MIGRATION
2057void ksm_migrate_page(struct page *newpage, struct page *oldpage) 2057void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2058{ 2058{
2059 struct stable_node *stable_node; 2059 struct stable_node *stable_node;
diff --git a/mm/rmap.c b/mm/rmap.c
index 97bf8f0396f8..b3263cb32361 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1179,15 +1179,18 @@ out:
1179/* 1179/*
1180 * Subfunctions of try_to_unmap: try_to_unmap_one called 1180 * Subfunctions of try_to_unmap: try_to_unmap_one called
1181 * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. 1181 * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
1182 *
1183 * @arg: enum ttu_flags will be passed to this argument
1182 */ 1184 */
1183int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1185int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1184 unsigned long address, enum ttu_flags flags) 1186 unsigned long address, void *arg)
1185{ 1187{
1186 struct mm_struct *mm = vma->vm_mm; 1188 struct mm_struct *mm = vma->vm_mm;
1187 pte_t *pte; 1189 pte_t *pte;
1188 pte_t pteval; 1190 pte_t pteval;
1189 spinlock_t *ptl; 1191 spinlock_t *ptl;
1190 int ret = SWAP_AGAIN; 1192 int ret = SWAP_AGAIN;
1193 enum ttu_flags flags = (enum ttu_flags)arg;
1191 1194
1192 pte = page_check_address(page, mm, address, &ptl, 0); 1195 pte = page_check_address(page, mm, address, &ptl, 0);
1193 if (!pte) 1196 if (!pte)
@@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
1513 return false; 1516 return false;
1514} 1517}
1515 1518
1519static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1520{
1521 return is_vma_temporary_stack(vma);
1522}
1523
1516/** 1524/**
1517 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1525 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1518 * rmap method 1526 * rmap method
@@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1558 continue; 1566 continue;
1559 1567
1560 address = vma_address(page, vma); 1568 address = vma_address(page, vma);
1561 ret = try_to_unmap_one(page, vma, address, flags); 1569 ret = try_to_unmap_one(page, vma, address, (void *)flags);
1562 if (ret != SWAP_AGAIN || !page_mapped(page)) 1570 if (ret != SWAP_AGAIN || !page_mapped(page))
1563 break; 1571 break;
1564 } 1572 }
@@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1592 mutex_lock(&mapping->i_mmap_mutex); 1600 mutex_lock(&mapping->i_mmap_mutex);
1593 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1601 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1594 unsigned long address = vma_address(page, vma); 1602 unsigned long address = vma_address(page, vma);
1595 ret = try_to_unmap_one(page, vma, address, flags); 1603 ret = try_to_unmap_one(page, vma, address, (void *)flags);
1596 if (ret != SWAP_AGAIN || !page_mapped(page)) 1604 if (ret != SWAP_AGAIN || !page_mapped(page))
1597 goto out; 1605 goto out;
1598 } 1606 }
@@ -1614,6 +1622,11 @@ out:
1614 return ret; 1622 return ret;
1615} 1623}
1616 1624
1625static int page_not_mapped(struct page *page)
1626{
1627 return !page_mapped(page);
1628};
1629
1617/** 1630/**
1618 * try_to_unmap - try to remove all page table mappings to a page 1631 * try_to_unmap - try to remove all page table mappings to a page
1619 * @page: the page to get unmapped 1632 * @page: the page to get unmapped
@@ -1631,16 +1644,29 @@ out:
1631int try_to_unmap(struct page *page, enum ttu_flags flags) 1644int try_to_unmap(struct page *page, enum ttu_flags flags)
1632{ 1645{
1633 int ret; 1646 int ret;
1647 struct rmap_walk_control rwc = {
1648 .rmap_one = try_to_unmap_one,
1649 .arg = (void *)flags,
1650 .done = page_not_mapped,
1651 .file_nonlinear = try_to_unmap_nonlinear,
1652 .anon_lock = page_lock_anon_vma_read,
1653 };
1634 1654
1635 BUG_ON(!PageLocked(page));
1636 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 1655 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1637 1656
1638 if (unlikely(PageKsm(page))) 1657 /*
1639 ret = try_to_unmap_ksm(page, flags); 1658 * During exec, a temporary VMA is setup and later moved.
1640 else if (PageAnon(page)) 1659 * The VMA is moved under the anon_vma lock but not the
1641 ret = try_to_unmap_anon(page, flags); 1660 * page tables leading to a race where migration cannot
1642 else 1661 * find the migration ptes. Rather than increasing the
1643 ret = try_to_unmap_file(page, flags); 1662 * locking requirements of exec(), migration skips
1663 * temporary VMAs until after exec() completes.
1664 */
1665 if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
1666 rwc.invalid_vma = invalid_migration_vma;
1667
1668 ret = rmap_walk(page, &rwc);
1669
1644 if (ret != SWAP_MLOCK && !page_mapped(page)) 1670 if (ret != SWAP_MLOCK && !page_mapped(page))
1645 ret = SWAP_SUCCESS; 1671 ret = SWAP_SUCCESS;
1646 return ret; 1672 return ret;
@@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma)
1683 anon_vma_free(anon_vma); 1709 anon_vma_free(anon_vma);
1684} 1710}
1685 1711
1686#ifdef CONFIG_MIGRATION
1687static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1712static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1688 struct rmap_walk_control *rwc) 1713 struct rmap_walk_control *rwc)
1689{ 1714{
@@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1785 else 1810 else
1786 return rmap_walk_file(page, rwc); 1811 return rmap_walk_file(page, rwc);
1787} 1812}
1788#endif /* CONFIG_MIGRATION */
1789 1813
1790#ifdef CONFIG_HUGETLB_PAGE 1814#ifdef CONFIG_HUGETLB_PAGE
1791/* 1815/*