aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-01-21 18:49:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:46 -0500
commit9853a407b97d8d066b5a865173a4859a3e69fd8a (patch)
tree8ba05c4deb18caf24d346d65171be3672c86adf1
parent9f32624be943538983eb0f18b73a9052d1493c80 (diff)
mm/rmap: use rmap_walk() in page_mkclean()
Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in page_mkclean(). In this patch, I change following things. 1. remove some variants of rmap traversing functions. cf> page_mkclean_file 2. mechanical change to use rmap_walk() in page_mkclean(). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/rmap.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 080413036406..962e2a1e13a0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -812,12 +812,13 @@ int page_referenced(struct page *page,
812} 812}
813 813
814static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 814static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
815 unsigned long address) 815 unsigned long address, void *arg)
816{ 816{
817 struct mm_struct *mm = vma->vm_mm; 817 struct mm_struct *mm = vma->vm_mm;
818 pte_t *pte; 818 pte_t *pte;
819 spinlock_t *ptl; 819 spinlock_t *ptl;
820 int ret = 0; 820 int ret = 0;
821 int *cleaned = arg;
821 822
822 pte = page_check_address(page, mm, address, &ptl, 1); 823 pte = page_check_address(page, mm, address, &ptl, 1);
823 if (!pte) 824 if (!pte)
@@ -836,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
836 837
837 pte_unmap_unlock(pte, ptl); 838 pte_unmap_unlock(pte, ptl);
838 839
839 if (ret) 840 if (ret) {
840 mmu_notifier_invalidate_page(mm, address); 841 mmu_notifier_invalidate_page(mm, address);
842 (*cleaned)++;
843 }
841out: 844out:
842 return ret; 845 return SWAP_AGAIN;
843} 846}
844 847
845static int page_mkclean_file(struct address_space *mapping, struct page *page) 848static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
846{ 849{
847 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 850 if (vma->vm_flags & VM_SHARED)
848 struct vm_area_struct *vma; 851 return 0;
849 int ret = 0;
850
851 BUG_ON(PageAnon(page));
852 852
853 mutex_lock(&mapping->i_mmap_mutex); 853 return 1;
854 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
855 if (vma->vm_flags & VM_SHARED) {
856 unsigned long address = vma_address(page, vma);
857 ret += page_mkclean_one(page, vma, address);
858 }
859 }
860 mutex_unlock(&mapping->i_mmap_mutex);
861 return ret;
862} 854}
863 855
864int page_mkclean(struct page *page) 856int page_mkclean(struct page *page)
865{ 857{
866 int ret = 0; 858 int cleaned = 0;
859 struct address_space *mapping;
860 struct rmap_walk_control rwc = {
861 .arg = (void *)&cleaned,
862 .rmap_one = page_mkclean_one,
863 .invalid_vma = invalid_mkclean_vma,
864 };
867 865
868 BUG_ON(!PageLocked(page)); 866 BUG_ON(!PageLocked(page));
869 867
870 if (page_mapped(page)) { 868 if (!page_mapped(page))
871 struct address_space *mapping = page_mapping(page); 869 return 0;
872 if (mapping)
873 ret = page_mkclean_file(mapping, page);
874 }
875 870
876 return ret; 871 mapping = page_mapping(page);
872 if (!mapping)
873 return 0;
874
875 rmap_walk(page, &rwc);
876
877 return cleaned;
877} 878}
878EXPORT_SYMBOL_GPL(page_mkclean); 879EXPORT_SYMBOL_GPL(page_mkclean);
879 880