diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-01-21 18:49:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:45 -0500 |
commit | 52629506420ce32997f1fba0a1ab2f1aaa9a4f79 (patch) | |
tree | 1dd4e4fbe8f192a0f940adbf466b5cce07e9b6a2 /mm/rmap.c | |
parent | 0dd1c7bbce8d1d142bb25aefaa50262dfd77cb78 (diff) |
mm/rmap: use rmap_walk() in try_to_unmap()
Now, we have an infrastructure in rmap_walk() to handle difference from
variants of rmap traversing functions.
So, just use it in try_to_unmap().
In this patch, I change following things.
1. enable rmap_walk() if !CONFIG_MIGRATION.
2. mechanical change to use rmap_walk() in try_to_unmap().
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 48 |
1 files changed, 36 insertions, 12 deletions
@@ -1179,15 +1179,18 @@ out: | |||
1179 | /* | 1179 | /* |
1180 | * Subfunctions of try_to_unmap: try_to_unmap_one called | 1180 | * Subfunctions of try_to_unmap: try_to_unmap_one called |
1181 | * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. | 1181 | * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. |
1182 | * | ||
1183 | * @arg: enum ttu_flags will be passed to this argument | ||
1182 | */ | 1184 | */ |
1183 | int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | 1185 | int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, |
1184 | unsigned long address, enum ttu_flags flags) | 1186 | unsigned long address, void *arg) |
1185 | { | 1187 | { |
1186 | struct mm_struct *mm = vma->vm_mm; | 1188 | struct mm_struct *mm = vma->vm_mm; |
1187 | pte_t *pte; | 1189 | pte_t *pte; |
1188 | pte_t pteval; | 1190 | pte_t pteval; |
1189 | spinlock_t *ptl; | 1191 | spinlock_t *ptl; |
1190 | int ret = SWAP_AGAIN; | 1192 | int ret = SWAP_AGAIN; |
1193 | enum ttu_flags flags = (enum ttu_flags)arg; | ||
1191 | 1194 | ||
1192 | pte = page_check_address(page, mm, address, &ptl, 0); | 1195 | pte = page_check_address(page, mm, address, &ptl, 0); |
1193 | if (!pte) | 1196 | if (!pte) |
@@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma) | |||
1513 | return false; | 1516 | return false; |
1514 | } | 1517 | } |
1515 | 1518 | ||
1519 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) | ||
1520 | { | ||
1521 | return is_vma_temporary_stack(vma); | ||
1522 | } | ||
1523 | |||
1516 | /** | 1524 | /** |
1517 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based | 1525 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based |
1518 | * rmap method | 1526 | * rmap method |
@@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1558 | continue; | 1566 | continue; |
1559 | 1567 | ||
1560 | address = vma_address(page, vma); | 1568 | address = vma_address(page, vma); |
1561 | ret = try_to_unmap_one(page, vma, address, flags); | 1569 | ret = try_to_unmap_one(page, vma, address, (void *)flags); |
1562 | if (ret != SWAP_AGAIN || !page_mapped(page)) | 1570 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1563 | break; | 1571 | break; |
1564 | } | 1572 | } |
@@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1592 | mutex_lock(&mapping->i_mmap_mutex); | 1600 | mutex_lock(&mapping->i_mmap_mutex); |
1593 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { | 1601 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
1594 | unsigned long address = vma_address(page, vma); | 1602 | unsigned long address = vma_address(page, vma); |
1595 | ret = try_to_unmap_one(page, vma, address, flags); | 1603 | ret = try_to_unmap_one(page, vma, address, (void *)flags); |
1596 | if (ret != SWAP_AGAIN || !page_mapped(page)) | 1604 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1597 | goto out; | 1605 | goto out; |
1598 | } | 1606 | } |
@@ -1614,6 +1622,11 @@ out: | |||
1614 | return ret; | 1622 | return ret; |
1615 | } | 1623 | } |
1616 | 1624 | ||
1625 | static int page_not_mapped(struct page *page) | ||
1626 | { | ||
1627 | return !page_mapped(page); | ||
1628 | }; | ||
1629 | |||
1617 | /** | 1630 | /** |
1618 | * try_to_unmap - try to remove all page table mappings to a page | 1631 | * try_to_unmap - try to remove all page table mappings to a page |
1619 | * @page: the page to get unmapped | 1632 | * @page: the page to get unmapped |
@@ -1631,16 +1644,29 @@ out: | |||
1631 | int try_to_unmap(struct page *page, enum ttu_flags flags) | 1644 | int try_to_unmap(struct page *page, enum ttu_flags flags) |
1632 | { | 1645 | { |
1633 | int ret; | 1646 | int ret; |
1647 | struct rmap_walk_control rwc = { | ||
1648 | .rmap_one = try_to_unmap_one, | ||
1649 | .arg = (void *)flags, | ||
1650 | .done = page_not_mapped, | ||
1651 | .file_nonlinear = try_to_unmap_nonlinear, | ||
1652 | .anon_lock = page_lock_anon_vma_read, | ||
1653 | }; | ||
1634 | 1654 | ||
1635 | BUG_ON(!PageLocked(page)); | ||
1636 | VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); | 1655 | VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); |
1637 | 1656 | ||
1638 | if (unlikely(PageKsm(page))) | 1657 | /* |
1639 | ret = try_to_unmap_ksm(page, flags); | 1658 | * During exec, a temporary VMA is setup and later moved. |
1640 | else if (PageAnon(page)) | 1659 | * The VMA is moved under the anon_vma lock but not the |
1641 | ret = try_to_unmap_anon(page, flags); | 1660 | * page tables leading to a race where migration cannot |
1642 | else | 1661 | * find the migration ptes. Rather than increasing the |
1643 | ret = try_to_unmap_file(page, flags); | 1662 | * locking requirements of exec(), migration skips |
1663 | * temporary VMAs until after exec() completes. | ||
1664 | */ | ||
1665 | if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) | ||
1666 | rwc.invalid_vma = invalid_migration_vma; | ||
1667 | |||
1668 | ret = rmap_walk(page, &rwc); | ||
1669 | |||
1644 | if (ret != SWAP_MLOCK && !page_mapped(page)) | 1670 | if (ret != SWAP_MLOCK && !page_mapped(page)) |
1645 | ret = SWAP_SUCCESS; | 1671 | ret = SWAP_SUCCESS; |
1646 | return ret; | 1672 | return ret; |
@@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma) | |||
1683 | anon_vma_free(anon_vma); | 1709 | anon_vma_free(anon_vma); |
1684 | } | 1710 | } |
1685 | 1711 | ||
1686 | #ifdef CONFIG_MIGRATION | ||
1687 | static struct anon_vma *rmap_walk_anon_lock(struct page *page, | 1712 | static struct anon_vma *rmap_walk_anon_lock(struct page *page, |
1688 | struct rmap_walk_control *rwc) | 1713 | struct rmap_walk_control *rwc) |
1689 | { | 1714 | { |
@@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc) | |||
1785 | else | 1810 | else |
1786 | return rmap_walk_file(page, rwc); | 1811 | return rmap_walk_file(page, rwc); |
1787 | } | 1812 | } |
1788 | #endif /* CONFIG_MIGRATION */ | ||
1789 | 1813 | ||
1790 | #ifdef CONFIG_HUGETLB_PAGE | 1814 | #ifdef CONFIG_HUGETLB_PAGE |
1791 | /* | 1815 | /* |