diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-01-21 18:49:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:45 -0500 |
commit | 0dd1c7bbce8d1d142bb25aefaa50262dfd77cb78 (patch) | |
tree | d45df52098c0a8364a3e614096bb3ee184aeb068 /mm/rmap.c | |
parent | 051ac83adf69eea4f57a97356e4282e395a5fa6d (diff) |
mm/rmap: extend rmap_walk_xxx() to cope with different cases
There are a lot of common parts in traversing functions, but there are
also a little of uncommon parts in it. By assigning proper function
pointer on each rmap_walker_control, we can handle these difference
correctly.
Following are differences we should handle.
1. difference of lock function in anon mapping case
2. nonlinear handling in file mapping case
3. prechecked condition:
checking memcg in page_referenced(),
checking VM_SHARE in page_mkclean()
checking temporary vma in try_to_unmap()
4. exit condition:
checking page_mapped() in try_to_unmap()
So, in this patch, I introduce 4 function pointers to handle above
differences.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 37 |
1 files changed, 29 insertions, 8 deletions
@@ -1684,10 +1684,14 @@ void __put_anon_vma(struct anon_vma *anon_vma) | |||
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | #ifdef CONFIG_MIGRATION | 1686 | #ifdef CONFIG_MIGRATION |
1687 | static struct anon_vma *rmap_walk_anon_lock(struct page *page) | 1687 | static struct anon_vma *rmap_walk_anon_lock(struct page *page, |
1688 | struct rmap_walk_control *rwc) | ||
1688 | { | 1689 | { |
1689 | struct anon_vma *anon_vma; | 1690 | struct anon_vma *anon_vma; |
1690 | 1691 | ||
1692 | if (rwc->anon_lock) | ||
1693 | return rwc->anon_lock(page); | ||
1694 | |||
1691 | /* | 1695 | /* |
1692 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() | 1696 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() |
1693 | * because that depends on page_mapped(); but not all its usages | 1697 | * because that depends on page_mapped(); but not all its usages |
@@ -1713,16 +1717,22 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) | |||
1713 | struct anon_vma_chain *avc; | 1717 | struct anon_vma_chain *avc; |
1714 | int ret = SWAP_AGAIN; | 1718 | int ret = SWAP_AGAIN; |
1715 | 1719 | ||
1716 | anon_vma = rmap_walk_anon_lock(page); | 1720 | anon_vma = rmap_walk_anon_lock(page, rwc); |
1717 | if (!anon_vma) | 1721 | if (!anon_vma) |
1718 | return ret; | 1722 | return ret; |
1719 | 1723 | ||
1720 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { | 1724 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { |
1721 | struct vm_area_struct *vma = avc->vma; | 1725 | struct vm_area_struct *vma = avc->vma; |
1722 | unsigned long address = vma_address(page, vma); | 1726 | unsigned long address = vma_address(page, vma); |
1727 | |||
1728 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) | ||
1729 | continue; | ||
1730 | |||
1723 | ret = rwc->rmap_one(page, vma, address, rwc->arg); | 1731 | ret = rwc->rmap_one(page, vma, address, rwc->arg); |
1724 | if (ret != SWAP_AGAIN) | 1732 | if (ret != SWAP_AGAIN) |
1725 | break; | 1733 | break; |
1734 | if (rwc->done && rwc->done(page)) | ||
1735 | break; | ||
1726 | } | 1736 | } |
1727 | anon_vma_unlock_read(anon_vma); | 1737 | anon_vma_unlock_read(anon_vma); |
1728 | return ret; | 1738 | return ret; |
@@ -1740,15 +1750,26 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) | |||
1740 | mutex_lock(&mapping->i_mmap_mutex); | 1750 | mutex_lock(&mapping->i_mmap_mutex); |
1741 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { | 1751 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
1742 | unsigned long address = vma_address(page, vma); | 1752 | unsigned long address = vma_address(page, vma); |
1753 | |||
1754 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) | ||
1755 | continue; | ||
1756 | |||
1743 | ret = rwc->rmap_one(page, vma, address, rwc->arg); | 1757 | ret = rwc->rmap_one(page, vma, address, rwc->arg); |
1744 | if (ret != SWAP_AGAIN) | 1758 | if (ret != SWAP_AGAIN) |
1745 | break; | 1759 | goto done; |
1760 | if (rwc->done && rwc->done(page)) | ||
1761 | goto done; | ||
1746 | } | 1762 | } |
1747 | /* | 1763 | |
1748 | * No nonlinear handling: being always shared, nonlinear vmas | 1764 | if (!rwc->file_nonlinear) |
1749 | * never contain migration ptes. Decide what to do about this | 1765 | goto done; |
1750 | * limitation to linear when we need rmap_walk() on nonlinear. | 1766 | |
1751 | */ | 1767 | if (list_empty(&mapping->i_mmap_nonlinear)) |
1768 | goto done; | ||
1769 | |||
1770 | ret = rwc->file_nonlinear(page, mapping, vma); | ||
1771 | |||
1772 | done: | ||
1752 | mutex_unlock(&mapping->i_mmap_mutex); | 1773 | mutex_unlock(&mapping->i_mmap_mutex); |
1753 | return ret; | 1774 | return ret; |
1754 | } | 1775 | } |