aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a4597614f18d..77d9f840936b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -679,7 +679,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
679 &src_mm->mmlist); 679 &src_mm->mmlist);
680 spin_unlock(&mmlist_lock); 680 spin_unlock(&mmlist_lock);
681 } 681 }
682 if (is_write_migration_entry(entry) && 682 if (likely(!non_swap_entry(entry)))
683 rss[MM_SWAPENTS]++;
684 else if (is_write_migration_entry(entry) &&
683 is_cow_mapping(vm_flags)) { 685 is_cow_mapping(vm_flags)) {
684 /* 686 /*
685 * COW mappings require pages in both parent 687 * COW mappings require pages in both parent
@@ -974,9 +976,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
974 if (pte_file(ptent)) { 976 if (pte_file(ptent)) {
975 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) 977 if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
976 print_bad_pte(vma, addr, ptent, NULL); 978 print_bad_pte(vma, addr, ptent, NULL);
977 } else if 979 } else {
978 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) 980 swp_entry_t entry = pte_to_swp_entry(ptent);
979 print_bad_pte(vma, addr, ptent, NULL); 981
982 if (!non_swap_entry(entry))
983 rss[MM_SWAPENTS]--;
984 if (unlikely(!free_swap_and_cache(entry)))
985 print_bad_pte(vma, addr, ptent, NULL);
986 }
980 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 987 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
981 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 988 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
982 989
@@ -2692,6 +2699,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2692 */ 2699 */
2693 2700
2694 inc_mm_counter_fast(mm, MM_ANONPAGES); 2701 inc_mm_counter_fast(mm, MM_ANONPAGES);
2702 dec_mm_counter_fast(mm, MM_SWAPENTS);
2695 pte = mk_pte(page, vma->vm_page_prot); 2703 pte = mk_pte(page, vma->vm_page_prot);
2696 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2704 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2697 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2705 pte = maybe_mkwrite(pte_mkdirty(pte), vma);