aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c56
1 files changed, 34 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 72fb5f39bccc..c57678478801 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -121,6 +121,7 @@ static int __init init_zero_pfn(void)
121} 121}
122core_initcall(init_zero_pfn); 122core_initcall(init_zero_pfn);
123 123
124
124/* 125/*
125 * If a p?d_bad entry is found while walking page tables, report 126 * If a p?d_bad entry is found while walking page tables, report
126 * the error, before resetting entry to p?d_none. Usually (but 127 * the error, before resetting entry to p?d_none. Usually (but
@@ -376,12 +377,18 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
376 return 0; 377 return 0;
377} 378}
378 379
379static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 380static inline void init_rss_vec(int *rss)
380{ 381{
381 if (file_rss) 382 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
382 add_mm_counter(mm, file_rss, file_rss); 383}
383 if (anon_rss) 384
384 add_mm_counter(mm, anon_rss, anon_rss); 385static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
386{
387 int i;
388
389 for (i = 0; i < NR_MM_COUNTERS; i++)
390 if (rss[i])
391 add_mm_counter(mm, i, rss[i]);
385} 392}
386 393
387/* 394/*
@@ -632,7 +639,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
632 if (page) { 639 if (page) {
633 get_page(page); 640 get_page(page);
634 page_dup_rmap(page); 641 page_dup_rmap(page);
635 rss[PageAnon(page)]++; 642 if (PageAnon(page))
643 rss[MM_ANONPAGES]++;
644 else
645 rss[MM_FILEPAGES]++;
636 } 646 }
637 647
638out_set_pte: 648out_set_pte:
@@ -648,11 +658,12 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
648 pte_t *src_pte, *dst_pte; 658 pte_t *src_pte, *dst_pte;
649 spinlock_t *src_ptl, *dst_ptl; 659 spinlock_t *src_ptl, *dst_ptl;
650 int progress = 0; 660 int progress = 0;
651 int rss[2]; 661 int rss[NR_MM_COUNTERS];
652 swp_entry_t entry = (swp_entry_t){0}; 662 swp_entry_t entry = (swp_entry_t){0};
653 663
654again: 664again:
655 rss[1] = rss[0] = 0; 665 init_rss_vec(rss);
666
656 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 667 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
657 if (!dst_pte) 668 if (!dst_pte)
658 return -ENOMEM; 669 return -ENOMEM;
@@ -688,7 +699,7 @@ again:
688 arch_leave_lazy_mmu_mode(); 699 arch_leave_lazy_mmu_mode();
689 spin_unlock(src_ptl); 700 spin_unlock(src_ptl);
690 pte_unmap_nested(orig_src_pte); 701 pte_unmap_nested(orig_src_pte);
691 add_mm_rss(dst_mm, rss[0], rss[1]); 702 add_mm_rss_vec(dst_mm, rss);
692 pte_unmap_unlock(orig_dst_pte, dst_ptl); 703 pte_unmap_unlock(orig_dst_pte, dst_ptl);
693 cond_resched(); 704 cond_resched();
694 705
@@ -816,8 +827,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
816 struct mm_struct *mm = tlb->mm; 827 struct mm_struct *mm = tlb->mm;
817 pte_t *pte; 828 pte_t *pte;
818 spinlock_t *ptl; 829 spinlock_t *ptl;
819 int file_rss = 0; 830 int rss[NR_MM_COUNTERS];
820 int anon_rss = 0; 831
832 init_rss_vec(rss);
821 833
822 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 834 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
823 arch_enter_lazy_mmu_mode(); 835 arch_enter_lazy_mmu_mode();
@@ -863,14 +875,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
863 set_pte_at(mm, addr, pte, 875 set_pte_at(mm, addr, pte,
864 pgoff_to_pte(page->index)); 876 pgoff_to_pte(page->index));
865 if (PageAnon(page)) 877 if (PageAnon(page))
866 anon_rss--; 878 rss[MM_ANONPAGES]--;
867 else { 879 else {
868 if (pte_dirty(ptent)) 880 if (pte_dirty(ptent))
869 set_page_dirty(page); 881 set_page_dirty(page);
870 if (pte_young(ptent) && 882 if (pte_young(ptent) &&
871 likely(!VM_SequentialReadHint(vma))) 883 likely(!VM_SequentialReadHint(vma)))
872 mark_page_accessed(page); 884 mark_page_accessed(page);
873 file_rss--; 885 rss[MM_FILEPAGES]--;
874 } 886 }
875 page_remove_rmap(page); 887 page_remove_rmap(page);
876 if (unlikely(page_mapcount(page) < 0)) 888 if (unlikely(page_mapcount(page) < 0))
@@ -893,7 +905,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
893 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 905 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
894 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 906 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
895 907
896 add_mm_rss(mm, file_rss, anon_rss); 908 add_mm_rss_vec(mm, rss);
897 arch_leave_lazy_mmu_mode(); 909 arch_leave_lazy_mmu_mode();
898 pte_unmap_unlock(pte - 1, ptl); 910 pte_unmap_unlock(pte - 1, ptl);
899 911
@@ -1527,7 +1539,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1527 1539
1528 /* Ok, finally just insert the thing.. */ 1540 /* Ok, finally just insert the thing.. */
1529 get_page(page); 1541 get_page(page);
1530 inc_mm_counter(mm, file_rss); 1542 inc_mm_counter(mm, MM_FILEPAGES);
1531 page_add_file_rmap(page); 1543 page_add_file_rmap(page);
1532 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1544 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1533 1545
@@ -2163,11 +2175,11 @@ gotten:
2163 if (likely(pte_same(*page_table, orig_pte))) { 2175 if (likely(pte_same(*page_table, orig_pte))) {
2164 if (old_page) { 2176 if (old_page) {
2165 if (!PageAnon(old_page)) { 2177 if (!PageAnon(old_page)) {
2166 dec_mm_counter(mm, file_rss); 2178 dec_mm_counter(mm, MM_FILEPAGES);
2167 inc_mm_counter(mm, anon_rss); 2179 inc_mm_counter(mm, MM_ANONPAGES);
2168 } 2180 }
2169 } else 2181 } else
2170 inc_mm_counter(mm, anon_rss); 2182 inc_mm_counter(mm, MM_ANONPAGES);
2171 flush_cache_page(vma, address, pte_pfn(orig_pte)); 2183 flush_cache_page(vma, address, pte_pfn(orig_pte));
2172 entry = mk_pte(new_page, vma->vm_page_prot); 2184 entry = mk_pte(new_page, vma->vm_page_prot);
2173 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2185 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2604,7 +2616,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2604 * discarded at swap_free(). 2616 * discarded at swap_free().
2605 */ 2617 */
2606 2618
2607 inc_mm_counter(mm, anon_rss); 2619 inc_mm_counter(mm, MM_ANONPAGES);
2608 pte = mk_pte(page, vma->vm_page_prot); 2620 pte = mk_pte(page, vma->vm_page_prot);
2609 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2621 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2610 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2622 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
@@ -2688,7 +2700,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2688 if (!pte_none(*page_table)) 2700 if (!pte_none(*page_table))
2689 goto release; 2701 goto release;
2690 2702
2691 inc_mm_counter(mm, anon_rss); 2703 inc_mm_counter(mm, MM_ANONPAGES);
2692 page_add_new_anon_rmap(page, vma, address); 2704 page_add_new_anon_rmap(page, vma, address);
2693setpte: 2705setpte:
2694 set_pte_at(mm, address, page_table, entry); 2706 set_pte_at(mm, address, page_table, entry);
@@ -2842,10 +2854,10 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2842 if (flags & FAULT_FLAG_WRITE) 2854 if (flags & FAULT_FLAG_WRITE)
2843 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2855 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2844 if (anon) { 2856 if (anon) {
2845 inc_mm_counter(mm, anon_rss); 2857 inc_mm_counter(mm, MM_ANONPAGES);
2846 page_add_new_anon_rmap(page, vma, address); 2858 page_add_new_anon_rmap(page, vma, address);
2847 } else { 2859 } else {
2848 inc_mm_counter(mm, file_rss); 2860 inc_mm_counter(mm, MM_FILEPAGES);
2849 page_add_file_rmap(page); 2861 page_add_file_rmap(page);
2850 if (flags & FAULT_FLAG_WRITE) { 2862 if (flags & FAULT_FLAG_WRITE) {
2851 dirty_page = page; 2863 dirty_page = page;