aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rmap.h2
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/memory.c4
-rw-r--r--mm/rmap.c8
5 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 3593b18a07dd..b35bc0e19cd9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -69,7 +69,7 @@ void __anon_vma_link(struct vm_area_struct *);
69void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 69void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
70void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 70void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
71void page_add_file_rmap(struct page *); 71void page_add_file_rmap(struct page *);
72void page_remove_rmap(struct page *, struct vm_area_struct *); 72void page_remove_rmap(struct page *);
73 73
74#ifdef CONFIG_DEBUG_VM 74#ifdef CONFIG_DEBUG_VM
75void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); 75void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index b5167dfb2f2d..0c04615651b7 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -193,7 +193,7 @@ retry:
193 /* Nuke the page table entry. */ 193 /* Nuke the page table entry. */
194 flush_cache_page(vma, address, pte_pfn(*pte)); 194 flush_cache_page(vma, address, pte_pfn(*pte));
195 pteval = ptep_clear_flush_notify(vma, address, pte); 195 pteval = ptep_clear_flush_notify(vma, address, pte);
196 page_remove_rmap(page, vma); 196 page_remove_rmap(page);
197 dec_mm_counter(mm, file_rss); 197 dec_mm_counter(mm, file_rss);
198 BUG_ON(pte_dirty(pteval)); 198 BUG_ON(pte_dirty(pteval));
199 pte_unmap_unlock(pte, ptl); 199 pte_unmap_unlock(pte, ptl);
diff --git a/mm/fremap.c b/mm/fremap.c
index 7d12ca70ef7b..62d5bbda921a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
37 if (page) { 37 if (page) {
38 if (pte_dirty(pte)) 38 if (pte_dirty(pte))
39 set_page_dirty(page); 39 set_page_dirty(page);
40 page_remove_rmap(page, vma); 40 page_remove_rmap(page);
41 page_cache_release(page); 41 page_cache_release(page);
42 update_hiwater_rss(mm); 42 update_hiwater_rss(mm);
43 dec_mm_counter(mm, file_rss); 43 dec_mm_counter(mm, file_rss);
diff --git a/mm/memory.c b/mm/memory.c
index b273cc12b15d..0f9abbaf618c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -798,7 +798,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
798 mark_page_accessed(page); 798 mark_page_accessed(page);
799 file_rss--; 799 file_rss--;
800 } 800 }
801 page_remove_rmap(page, vma); 801 page_remove_rmap(page);
802 if (unlikely(page_mapcount(page) < 0)) 802 if (unlikely(page_mapcount(page) < 0))
803 print_bad_pte(vma, addr, ptent, page); 803 print_bad_pte(vma, addr, ptent, page);
804 tlb_remove_page(tlb, page); 804 tlb_remove_page(tlb, page);
@@ -2023,7 +2023,7 @@ gotten:
2023 * mapcount is visible. So transitively, TLBs to 2023 * mapcount is visible. So transitively, TLBs to
2024 * old page will be flushed before it can be reused. 2024 * old page will be flushed before it can be reused.
2025 */ 2025 */
2026 page_remove_rmap(old_page, vma); 2026 page_remove_rmap(old_page);
2027 } 2027 }
2028 2028
2029 /* Free the old page.. */ 2029 /* Free the old page.. */
diff --git a/mm/rmap.c b/mm/rmap.c
index 32098255082e..ac4af8cffbf9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ void page_add_file_rmap(struct page *page)
707 */ 707 */
708void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 708void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
709{ 709{
710 BUG_ON(page_mapcount(page) == 0);
711 if (PageAnon(page)) 710 if (PageAnon(page))
712 __page_check_anon_rmap(page, vma, address); 711 __page_check_anon_rmap(page, vma, address);
713 atomic_inc(&page->_mapcount); 712 atomic_inc(&page->_mapcount);
@@ -717,11 +716,10 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
717/** 716/**
718 * page_remove_rmap - take down pte mapping from a page 717 * page_remove_rmap - take down pte mapping from a page
719 * @page: page to remove mapping from 718 * @page: page to remove mapping from
720 * @vma: the vm area in which the mapping is removed
721 * 719 *
722 * The caller needs to hold the pte lock. 720 * The caller needs to hold the pte lock.
723 */ 721 */
724void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 722void page_remove_rmap(struct page *page)
725{ 723{
726 if (atomic_add_negative(-1, &page->_mapcount)) { 724 if (atomic_add_negative(-1, &page->_mapcount)) {
727 /* 725 /*
@@ -837,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
837 dec_mm_counter(mm, file_rss); 835 dec_mm_counter(mm, file_rss);
838 836
839 837
840 page_remove_rmap(page, vma); 838 page_remove_rmap(page);
841 page_cache_release(page); 839 page_cache_release(page);
842 840
843out_unmap: 841out_unmap:
@@ -952,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
952 if (pte_dirty(pteval)) 950 if (pte_dirty(pteval))
953 set_page_dirty(page); 951 set_page_dirty(page);
954 952
955 page_remove_rmap(page, vma); 953 page_remove_rmap(page);
956 page_cache_release(page); 954 page_cache_release(page);
957 dec_mm_counter(mm, file_rss); 955 dec_mm_counter(mm, file_rss);
958 (*mapcount)--; 956 (*mapcount)--;