aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c16
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/swapfile.c1
3 files changed, 14 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a4597614f18d..77d9f840936b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -679,7 +679,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
679 &src_mm->mmlist); 679 &src_mm->mmlist);
680 spin_unlock(&mmlist_lock); 680 spin_unlock(&mmlist_lock);
681 } 681 }
682 if (is_write_migration_entry(entry) && 682 if (likely(!non_swap_entry(entry)))
683 rss[MM_SWAPENTS]++;
684 else if (is_write_migration_entry(entry) &&
683 is_cow_mapping(vm_flags)) { 685 is_cow_mapping(vm_flags)) {
684 /* 686 /*
685 * COW mappings require pages in both parent 687 * COW mappings require pages in both parent
@@ -974,9 +976,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
974 if (pte_file(ptent)) { 976 if (pte_file(ptent)) {
975 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) 977 if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
976 print_bad_pte(vma, addr, ptent, NULL); 978 print_bad_pte(vma, addr, ptent, NULL);
977 } else if 979 } else {
978 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) 980 swp_entry_t entry = pte_to_swp_entry(ptent);
979 print_bad_pte(vma, addr, ptent, NULL); 981
982 if (!non_swap_entry(entry))
983 rss[MM_SWAPENTS]--;
984 if (unlikely(!free_swap_and_cache(entry)))
985 print_bad_pte(vma, addr, ptent, NULL);
986 }
980 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 987 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
981 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 988 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
982 989
@@ -2692,6 +2699,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2692 */ 2699 */
2693 2700
2694 inc_mm_counter_fast(mm, MM_ANONPAGES); 2701 inc_mm_counter_fast(mm, MM_ANONPAGES);
2702 dec_mm_counter_fast(mm, MM_SWAPENTS);
2695 pte = mk_pte(page, vma->vm_page_prot); 2703 pte = mk_pte(page, vma->vm_page_prot);
2696 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2704 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2697 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2705 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
diff --git a/mm/rmap.c b/mm/rmap.c
index 73d0472884c2..5cb47111f79e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -840,6 +840,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
840 spin_unlock(&mmlist_lock); 840 spin_unlock(&mmlist_lock);
841 } 841 }
842 dec_mm_counter(mm, MM_ANONPAGES); 842 dec_mm_counter(mm, MM_ANONPAGES);
843 inc_mm_counter(mm, MM_SWAPENTS);
843 } else if (PAGE_MIGRATION) { 844 } else if (PAGE_MIGRATION) {
844 /* 845 /*
845 * Store the pfn of the page in a special migration 846 * Store the pfn of the page in a special migration
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 893984946a2c..187a21f8b7bd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -840,6 +840,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
840 goto out; 840 goto out;
841 } 841 }
842 842
843 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
843 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 844 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
844 get_page(page); 845 get_page(page);
845 set_pte_at(vma->vm_mm, addr, pte, 846 set_pte_at(vma->vm_mm, addr, pte,