diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-03-17 17:20:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 18:09:34 -0400 |
commit | 2a52bcbcc688eecead2953143f7ef695b8e44575 (patch) | |
tree | b2e372875693775e97fc4e9373c5a630d2a1dcc0 /mm/rmap.c | |
parent | b97731992d00f09456726bfc5ab6641c07773038 (diff) |
rmap: extend try_to_unmap() to be usable by split_huge_page()
Add support for two ttu_flags:
- TTU_SPLIT_HUGE_PMD would split PMD if it's there, before trying to
unmap page;
- TTU_RMAP_LOCKED indicates that caller holds relevant rmap lock;
Also, change rwc->done to !page_mapcount() instead of !page_mapped().
try_to_unmap() works on pte level, so we are really interested in the
mappedness of this small page rather than of the compound page it's a
part of.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 24 |
1 files changed, 16 insertions, 8 deletions
@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1431 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) | 1431 | if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) |
1432 | goto out; | 1432 | goto out; |
1433 | 1433 | ||
1434 | if (flags & TTU_SPLIT_HUGE_PMD) | ||
1435 | split_huge_pmd_address(vma, address); | ||
1434 | pte = page_check_address(page, mm, address, &ptl, 0); | 1436 | pte = page_check_address(page, mm, address, &ptl, 0); |
1435 | if (!pte) | 1437 | if (!pte) |
1436 | goto out; | 1438 | goto out; |
@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) | |||
1576 | return is_vma_temporary_stack(vma); | 1578 | return is_vma_temporary_stack(vma); |
1577 | } | 1579 | } |
1578 | 1580 | ||
1579 | static int page_not_mapped(struct page *page) | 1581 | static int page_mapcount_is_zero(struct page *page) |
1580 | { | 1582 | { |
1581 | return !page_mapped(page); | 1583 | return !page_mapcount(page); |
1582 | }; | 1584 | } |
1583 | 1585 | ||
1584 | /** | 1586 | /** |
1585 | * try_to_unmap - try to remove all page table mappings to a page | 1587 | * try_to_unmap - try to remove all page table mappings to a page |
@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1606 | struct rmap_walk_control rwc = { | 1608 | struct rmap_walk_control rwc = { |
1607 | .rmap_one = try_to_unmap_one, | 1609 | .rmap_one = try_to_unmap_one, |
1608 | .arg = &rp, | 1610 | .arg = &rp, |
1609 | .done = page_not_mapped, | 1611 | .done = page_mapcount_is_zero, |
1610 | .anon_lock = page_lock_anon_vma_read, | 1612 | .anon_lock = page_lock_anon_vma_read, |
1611 | }; | 1613 | }; |
1612 | 1614 | ||
1613 | VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); | ||
1614 | |||
1615 | /* | 1615 | /* |
1616 | * During exec, a temporary VMA is setup and later moved. | 1616 | * During exec, a temporary VMA is setup and later moved. |
1617 | * The VMA is moved under the anon_vma lock but not the | 1617 | * The VMA is moved under the anon_vma lock but not the |
@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1623 | if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) | 1623 | if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) |
1624 | rwc.invalid_vma = invalid_migration_vma; | 1624 | rwc.invalid_vma = invalid_migration_vma; |
1625 | 1625 | ||
1626 | ret = rmap_walk(page, &rwc); | 1626 | if (flags & TTU_RMAP_LOCKED) |
1627 | ret = rmap_walk_locked(page, &rwc); | ||
1628 | else | ||
1629 | ret = rmap_walk(page, &rwc); | ||
1627 | 1630 | ||
1628 | if (ret != SWAP_MLOCK && !page_mapped(page)) { | 1631 | if (ret != SWAP_MLOCK && !page_mapcount(page)) { |
1629 | ret = SWAP_SUCCESS; | 1632 | ret = SWAP_SUCCESS; |
1630 | if (rp.lazyfreed && !PageDirty(page)) | 1633 | if (rp.lazyfreed && !PageDirty(page)) |
1631 | ret = SWAP_LZFREE; | 1634 | ret = SWAP_LZFREE; |
@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1633 | return ret; | 1636 | return ret; |
1634 | } | 1637 | } |
1635 | 1638 | ||
1639 | static int page_not_mapped(struct page *page) | ||
1640 | { | ||
1641 | return !page_mapped(page); | ||
1642 | }; | ||
1643 | |||
1636 | /** | 1644 | /** |
1637 | * try_to_munlock - try to munlock a page | 1645 | * try_to_munlock - try to munlock a page |
1638 | * @page: the page to be munlocked | 1646 | * @page: the page to be munlocked |