diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:59:31 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:20 -0500 |
commit | e9995ef978a7d5296fe04a9a2c5ca6e66d8bb4e5 (patch) | |
tree | df4324273856e06b8277b7e4a0fa9289eb8e6385 /mm/ksm.c | |
parent | 407f9c8b0889ced1dbe2f9157e4e60c61329d5c9 (diff) |
ksm: rmap_walk to remove_migation_ptes
A side-effect of making ksm pages swappable is that they have to be placed
on the LRUs: which then exposes them to isolate_lru_page() and hence to
page migration.
Add rmap_walk() for remove_migration_ptes() to use: rmap_walk_anon() and
rmap_walk_file() in rmap.c, but rmap_walk_ksm() in ksm.c. Perhaps some
consolidation with existing code is possible, but don't attempt that yet
(try_to_unmap needs to handle nonlinears, but migration pte removal does
not).
rmap_walk() is sadly less general than it appears: rmap_walk_anon(), like
remove_anon_migration_ptes() which it replaces, avoids calling
page_lock_anon_vma(), because that includes a page_mapped() test which
fails when all migration ptes are in place. That was valid when NUMA page
migration was introduced (holding mmap_sem provided the missing guarantee
that anon_vma's slab had not already been destroyed), but I believe not
valid in the memory hotremove case added since.
For now do the same as before, and consider the best way to fix that
unlikely race later on. When fixed, we can probably use rmap_walk() on
hwpoisoned ksm pages too: for now, they remain among hwpoison's various
exceptions (its PageKsm test comes before the page is locked, but its
page_lock_anon_vma fails safely if an anon gets upgraded).
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 65 |
1 files changed, 65 insertions, 0 deletions
@@ -1656,6 +1656,71 @@ out: | |||
1656 | return ret; | 1656 | return ret; |
1657 | } | 1657 | } |
1658 | 1658 | ||
1659 | #ifdef CONFIG_MIGRATION | ||
1660 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | ||
1661 | struct vm_area_struct *, unsigned long, void *), void *arg) | ||
1662 | { | ||
1663 | struct stable_node *stable_node; | ||
1664 | struct hlist_node *hlist; | ||
1665 | struct rmap_item *rmap_item; | ||
1666 | int ret = SWAP_AGAIN; | ||
1667 | int search_new_forks = 0; | ||
1668 | |||
1669 | VM_BUG_ON(!PageKsm(page)); | ||
1670 | VM_BUG_ON(!PageLocked(page)); | ||
1671 | |||
1672 | stable_node = page_stable_node(page); | ||
1673 | if (!stable_node) | ||
1674 | return ret; | ||
1675 | again: | ||
1676 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | ||
1677 | struct anon_vma *anon_vma = rmap_item->anon_vma; | ||
1678 | struct vm_area_struct *vma; | ||
1679 | |||
1680 | spin_lock(&anon_vma->lock); | ||
1681 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | ||
1682 | if (rmap_item->address < vma->vm_start || | ||
1683 | rmap_item->address >= vma->vm_end) | ||
1684 | continue; | ||
1685 | /* | ||
1686 | * Initially we examine only the vma which covers this | ||
1687 | * rmap_item; but later, if there is still work to do, | ||
1688 | * we examine covering vmas in other mms: in case they | ||
1689 | * were forked from the original since ksmd passed. | ||
1690 | */ | ||
1691 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | ||
1692 | continue; | ||
1693 | |||
1694 | ret = rmap_one(page, vma, rmap_item->address, arg); | ||
1695 | if (ret != SWAP_AGAIN) { | ||
1696 | spin_unlock(&anon_vma->lock); | ||
1697 | goto out; | ||
1698 | } | ||
1699 | } | ||
1700 | spin_unlock(&anon_vma->lock); | ||
1701 | } | ||
1702 | if (!search_new_forks++) | ||
1703 | goto again; | ||
1704 | out: | ||
1705 | return ret; | ||
1706 | } | ||
1707 | |||
1708 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) | ||
1709 | { | ||
1710 | struct stable_node *stable_node; | ||
1711 | |||
1712 | VM_BUG_ON(!PageLocked(oldpage)); | ||
1713 | VM_BUG_ON(!PageLocked(newpage)); | ||
1714 | VM_BUG_ON(newpage->mapping != oldpage->mapping); | ||
1715 | |||
1716 | stable_node = page_stable_node(newpage); | ||
1717 | if (stable_node) { | ||
1718 | VM_BUG_ON(stable_node->page != oldpage); | ||
1719 | stable_node->page = newpage; | ||
1720 | } | ||
1721 | } | ||
1722 | #endif /* CONFIG_MIGRATION */ | ||
1723 | |||
1659 | #ifdef CONFIG_SYSFS | 1724 | #ifdef CONFIG_SYSFS |
1660 | /* | 1725 | /* |
1661 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | 1726 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |