aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-05 16:41:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:23 -0500
commitd559db086ff5be9bcc259e5aa50bf3d881eaf1d1 (patch)
treeaa968c8a4093234e4623a34c0415bf9d8683671c
parent19b629f581320999ddb9f6597051b79cdb53459c (diff)
mm: clean up mm_counter
Presently, per-mm statistics counter is defined by macro in sched.h This patch modifies it to - defined in mm.h as inlinf functions - use array instead of macro's name creation. This patch is for reducing patch size in future patch to modify implementation of per-mm counter. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--include/linux/mm.h104
-rw-r--r--include/linux/mm_types.h33
-rw-r--r--include/linux/sched.h54
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/tsacct.c1
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/memory.c56
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/swapfile.c2
12 files changed, 174 insertions, 101 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f277c4a111cb..375581276011 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -65,11 +65,11 @@ unsigned long task_vsize(struct mm_struct *mm)
65int task_statm(struct mm_struct *mm, int *shared, int *text, 65int task_statm(struct mm_struct *mm, int *shared, int *text,
66 int *data, int *resident) 66 int *data, int *resident)
67{ 67{
68 *shared = get_mm_counter(mm, file_rss); 68 *shared = get_mm_counter(mm, MM_FILEPAGES);
69 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 69 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
70 >> PAGE_SHIFT; 70 >> PAGE_SHIFT;
71 *data = mm->total_vm - mm->shared_vm; 71 *data = mm->total_vm - mm->shared_vm;
72 *resident = *shared + get_mm_counter(mm, anon_rss); 72 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
73 return mm->total_vm; 73 return mm->total_vm;
74} 74}
75 75
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 90957f14195c..2124cdb2d1d0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -870,6 +870,110 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
870 */ 870 */
871int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 871int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
872 struct page **pages); 872 struct page **pages);
873/*
874 * per-process(per-mm_struct) statistics.
875 */
876#if USE_SPLIT_PTLOCKS
877/*
878 * The mm counters are not protected by its page_table_lock,
879 * so must be incremented atomically.
880 */
881static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
882{
883 atomic_long_set(&mm->rss_stat.count[member], value);
884}
885
886static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
887{
888 return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]);
889}
890
891static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
892{
893 atomic_long_add(value, &mm->rss_stat.count[member]);
894}
895
896static inline void inc_mm_counter(struct mm_struct *mm, int member)
897{
898 atomic_long_inc(&mm->rss_stat.count[member]);
899}
900
901static inline void dec_mm_counter(struct mm_struct *mm, int member)
902{
903 atomic_long_dec(&mm->rss_stat.count[member]);
904}
905
906#else /* !USE_SPLIT_PTLOCKS */
907/*
908 * The mm counters are protected by its page_table_lock,
909 * so can be incremented directly.
910 */
911static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
912{
913 mm->rss_stat.count[member] = value;
914}
915
916static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
917{
918 return mm->rss_stat.count[member];
919}
920
921static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
922{
923 mm->rss_stat.count[member] += value;
924}
925
926static inline void inc_mm_counter(struct mm_struct *mm, int member)
927{
928 mm->rss_stat.count[member]++;
929}
930
931static inline void dec_mm_counter(struct mm_struct *mm, int member)
932{
933 mm->rss_stat.count[member]--;
934}
935
936#endif /* !USE_SPLIT_PTLOCKS */
937
938static inline unsigned long get_mm_rss(struct mm_struct *mm)
939{
940 return get_mm_counter(mm, MM_FILEPAGES) +
941 get_mm_counter(mm, MM_ANONPAGES);
942}
943
944static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
945{
946 return max(mm->hiwater_rss, get_mm_rss(mm));
947}
948
949static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
950{
951 return max(mm->hiwater_vm, mm->total_vm);
952}
953
954static inline void update_hiwater_rss(struct mm_struct *mm)
955{
956 unsigned long _rss = get_mm_rss(mm);
957
958 if ((mm)->hiwater_rss < _rss)
959 (mm)->hiwater_rss = _rss;
960}
961
962static inline void update_hiwater_vm(struct mm_struct *mm)
963{
964 if (mm->hiwater_vm < mm->total_vm)
965 mm->hiwater_vm = mm->total_vm;
966}
967
968static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
969 struct mm_struct *mm)
970{
971 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
972
973 if (*maxrss < hiwater_rss)
974 *maxrss = hiwater_rss;
975}
976
873 977
874/* 978/*
875 * A callback you can register to apply pressure to ageable caches. 979 * A callback you can register to apply pressure to ageable caches.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 36f96271306c..e1ca64be6678 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -24,12 +24,6 @@ struct address_space;
24 24
25#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 25#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
26 26
27#if USE_SPLIT_PTLOCKS
28typedef atomic_long_t mm_counter_t;
29#else /* !USE_SPLIT_PTLOCKS */
30typedef unsigned long mm_counter_t;
31#endif /* !USE_SPLIT_PTLOCKS */
32
33/* 27/*
34 * Each physical page in the system has a struct page associated with 28 * Each physical page in the system has a struct page associated with
35 * it to keep track of whatever it is we are using the page for at the 29 * it to keep track of whatever it is we are using the page for at the
@@ -201,6 +195,22 @@ struct core_state {
201 struct completion startup; 195 struct completion startup;
202}; 196};
203 197
198enum {
199 MM_FILEPAGES,
200 MM_ANONPAGES,
201 NR_MM_COUNTERS
202};
203
204#if USE_SPLIT_PTLOCKS
205struct mm_rss_stat {
206 atomic_long_t count[NR_MM_COUNTERS];
207};
208#else /* !USE_SPLIT_PTLOCKS */
209struct mm_rss_stat {
210 unsigned long count[NR_MM_COUNTERS];
211};
212#endif /* !USE_SPLIT_PTLOCKS */
213
204struct mm_struct { 214struct mm_struct {
205 struct vm_area_struct * mmap; /* list of VMAs */ 215 struct vm_area_struct * mmap; /* list of VMAs */
206 struct rb_root mm_rb; 216 struct rb_root mm_rb;
@@ -227,11 +237,6 @@ struct mm_struct {
227 * by mmlist_lock 237 * by mmlist_lock
228 */ 238 */
229 239
230 /* Special counters, in some configurations protected by the
231 * page_table_lock, in other configurations by being atomic.
232 */
233 mm_counter_t _file_rss;
234 mm_counter_t _anon_rss;
235 240
236 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 241 unsigned long hiwater_rss; /* High-watermark of RSS usage */
237 unsigned long hiwater_vm; /* High-water virtual memory usage */ 242 unsigned long hiwater_vm; /* High-water virtual memory usage */
@@ -244,6 +249,12 @@ struct mm_struct {
244 249
245 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 250 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
246 251
252 /*
253 * Special counters, in some configurations protected by the
254 * page_table_lock, in other configurations by being atomic.
255 */
256 struct mm_rss_stat rss_stat;
257
247 struct linux_binfmt *binfmt; 258 struct linux_binfmt *binfmt;
248 259
249 cpumask_t cpu_vm_mask; 260 cpumask_t cpu_vm_mask;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b1753f7e48e..cbeafa49a53b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -396,60 +396,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
396static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 396static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
397#endif 397#endif
398 398
399#if USE_SPLIT_PTLOCKS
400/*
401 * The mm counters are not protected by its page_table_lock,
402 * so must be incremented atomically.
403 */
404#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
405#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
406#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
407#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
408#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
409
410#else /* !USE_SPLIT_PTLOCKS */
411/*
412 * The mm counters are protected by its page_table_lock,
413 * so can be incremented directly.
414 */
415#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
416#define get_mm_counter(mm, member) ((mm)->_##member)
417#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
418#define inc_mm_counter(mm, member) (mm)->_##member++
419#define dec_mm_counter(mm, member) (mm)->_##member--
420
421#endif /* !USE_SPLIT_PTLOCKS */
422
423#define get_mm_rss(mm) \
424 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
425#define update_hiwater_rss(mm) do { \
426 unsigned long _rss = get_mm_rss(mm); \
427 if ((mm)->hiwater_rss < _rss) \
428 (mm)->hiwater_rss = _rss; \
429} while (0)
430#define update_hiwater_vm(mm) do { \
431 if ((mm)->hiwater_vm < (mm)->total_vm) \
432 (mm)->hiwater_vm = (mm)->total_vm; \
433} while (0)
434
435static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
436{
437 return max(mm->hiwater_rss, get_mm_rss(mm));
438}
439
440static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
441 struct mm_struct *mm)
442{
443 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
444
445 if (*maxrss < hiwater_rss)
446 *maxrss = hiwater_rss;
447}
448
449static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
450{
451 return max(mm->hiwater_vm, mm->total_vm);
452}
453 399
454extern void set_dumpable(struct mm_struct *mm, int value); 400extern void set_dumpable(struct mm_struct *mm, int value);
455extern int get_dumpable(struct mm_struct *mm); 401extern int get_dumpable(struct mm_struct *mm);
diff --git a/kernel/fork.c b/kernel/fork.c
index 17bbf093356d..7616bcf107b9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -455,8 +455,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
455 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; 455 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
456 mm->core_state = NULL; 456 mm->core_state = NULL;
457 mm->nr_ptes = 0; 457 mm->nr_ptes = 0;
458 set_mm_counter(mm, file_rss, 0); 458 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
459 set_mm_counter(mm, anon_rss, 0);
460 spin_lock_init(&mm->page_table_lock); 459 spin_lock_init(&mm->page_table_lock);
461 mm->free_area_cache = TASK_UNMAPPED_BASE; 460 mm->free_area_cache = TASK_UNMAPPED_BASE;
462 mm->cached_hole_size = ~0UL; 461 mm->cached_hole_size = ~0UL;
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 00d59d048edf..0a67e041edf8 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -21,6 +21,7 @@
21#include <linux/tsacct_kern.h> 21#include <linux/tsacct_kern.h>
22#include <linux/acct.h> 22#include <linux/acct.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/mm.h>
24 25
25/* 26/*
26 * fill in basic accounting fields 27 * fill in basic accounting fields
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 1888b2d71bb8..78b94f0b6d5d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -194,7 +194,7 @@ retry:
194 flush_cache_page(vma, address, pte_pfn(*pte)); 194 flush_cache_page(vma, address, pte_pfn(*pte));
195 pteval = ptep_clear_flush_notify(vma, address, pte); 195 pteval = ptep_clear_flush_notify(vma, address, pte);
196 page_remove_rmap(page); 196 page_remove_rmap(page);
197 dec_mm_counter(mm, file_rss); 197 dec_mm_counter(mm, MM_FILEPAGES);
198 BUG_ON(pte_dirty(pteval)); 198 BUG_ON(pte_dirty(pteval));
199 pte_unmap_unlock(pte, ptl); 199 pte_unmap_unlock(pte, ptl);
200 page_cache_release(page); 200 page_cache_release(page);
diff --git a/mm/fremap.c b/mm/fremap.c
index b6ec85abbb39..46f5dacf90a2 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -40,7 +40,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
40 page_remove_rmap(page); 40 page_remove_rmap(page);
41 page_cache_release(page); 41 page_cache_release(page);
42 update_hiwater_rss(mm); 42 update_hiwater_rss(mm);
43 dec_mm_counter(mm, file_rss); 43 dec_mm_counter(mm, MM_FILEPAGES);
44 } 44 }
45 } else { 45 } else {
46 if (!pte_file(pte)) 46 if (!pte_file(pte))
diff --git a/mm/memory.c b/mm/memory.c
index 72fb5f39bccc..c57678478801 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -121,6 +121,7 @@ static int __init init_zero_pfn(void)
121} 121}
122core_initcall(init_zero_pfn); 122core_initcall(init_zero_pfn);
123 123
124
124/* 125/*
125 * If a p?d_bad entry is found while walking page tables, report 126 * If a p?d_bad entry is found while walking page tables, report
126 * the error, before resetting entry to p?d_none. Usually (but 127 * the error, before resetting entry to p?d_none. Usually (but
@@ -376,12 +377,18 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
376 return 0; 377 return 0;
377} 378}
378 379
379static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 380static inline void init_rss_vec(int *rss)
380{ 381{
381 if (file_rss) 382 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
382 add_mm_counter(mm, file_rss, file_rss); 383}
383 if (anon_rss) 384
384 add_mm_counter(mm, anon_rss, anon_rss); 385static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
386{
387 int i;
388
389 for (i = 0; i < NR_MM_COUNTERS; i++)
390 if (rss[i])
391 add_mm_counter(mm, i, rss[i]);
385} 392}
386 393
387/* 394/*
@@ -632,7 +639,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
632 if (page) { 639 if (page) {
633 get_page(page); 640 get_page(page);
634 page_dup_rmap(page); 641 page_dup_rmap(page);
635 rss[PageAnon(page)]++; 642 if (PageAnon(page))
643 rss[MM_ANONPAGES]++;
644 else
645 rss[MM_FILEPAGES]++;
636 } 646 }
637 647
638out_set_pte: 648out_set_pte:
@@ -648,11 +658,12 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
648 pte_t *src_pte, *dst_pte; 658 pte_t *src_pte, *dst_pte;
649 spinlock_t *src_ptl, *dst_ptl; 659 spinlock_t *src_ptl, *dst_ptl;
650 int progress = 0; 660 int progress = 0;
651 int rss[2]; 661 int rss[NR_MM_COUNTERS];
652 swp_entry_t entry = (swp_entry_t){0}; 662 swp_entry_t entry = (swp_entry_t){0};
653 663
654again: 664again:
655 rss[1] = rss[0] = 0; 665 init_rss_vec(rss);
666
656 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 667 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
657 if (!dst_pte) 668 if (!dst_pte)
658 return -ENOMEM; 669 return -ENOMEM;
@@ -688,7 +699,7 @@ again:
688 arch_leave_lazy_mmu_mode(); 699 arch_leave_lazy_mmu_mode();
689 spin_unlock(src_ptl); 700 spin_unlock(src_ptl);
690 pte_unmap_nested(orig_src_pte); 701 pte_unmap_nested(orig_src_pte);
691 add_mm_rss(dst_mm, rss[0], rss[1]); 702 add_mm_rss_vec(dst_mm, rss);
692 pte_unmap_unlock(orig_dst_pte, dst_ptl); 703 pte_unmap_unlock(orig_dst_pte, dst_ptl);
693 cond_resched(); 704 cond_resched();
694 705
@@ -816,8 +827,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
816 struct mm_struct *mm = tlb->mm; 827 struct mm_struct *mm = tlb->mm;
817 pte_t *pte; 828 pte_t *pte;
818 spinlock_t *ptl; 829 spinlock_t *ptl;
819 int file_rss = 0; 830 int rss[NR_MM_COUNTERS];
820 int anon_rss = 0; 831
832 init_rss_vec(rss);
821 833
822 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 834 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
823 arch_enter_lazy_mmu_mode(); 835 arch_enter_lazy_mmu_mode();
@@ -863,14 +875,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
863 set_pte_at(mm, addr, pte, 875 set_pte_at(mm, addr, pte,
864 pgoff_to_pte(page->index)); 876 pgoff_to_pte(page->index));
865 if (PageAnon(page)) 877 if (PageAnon(page))
866 anon_rss--; 878 rss[MM_ANONPAGES]--;
867 else { 879 else {
868 if (pte_dirty(ptent)) 880 if (pte_dirty(ptent))
869 set_page_dirty(page); 881 set_page_dirty(page);
870 if (pte_young(ptent) && 882 if (pte_young(ptent) &&
871 likely(!VM_SequentialReadHint(vma))) 883 likely(!VM_SequentialReadHint(vma)))
872 mark_page_accessed(page); 884 mark_page_accessed(page);
873 file_rss--; 885 rss[MM_FILEPAGES]--;
874 } 886 }
875 page_remove_rmap(page); 887 page_remove_rmap(page);
876 if (unlikely(page_mapcount(page) < 0)) 888 if (unlikely(page_mapcount(page) < 0))
@@ -893,7 +905,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
893 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 905 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
894 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 906 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
895 907
896 add_mm_rss(mm, file_rss, anon_rss); 908 add_mm_rss_vec(mm, rss);
897 arch_leave_lazy_mmu_mode(); 909 arch_leave_lazy_mmu_mode();
898 pte_unmap_unlock(pte - 1, ptl); 910 pte_unmap_unlock(pte - 1, ptl);
899 911
@@ -1527,7 +1539,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1527 1539
1528 /* Ok, finally just insert the thing.. */ 1540 /* Ok, finally just insert the thing.. */
1529 get_page(page); 1541 get_page(page);
1530 inc_mm_counter(mm, file_rss); 1542 inc_mm_counter(mm, MM_FILEPAGES);
1531 page_add_file_rmap(page); 1543 page_add_file_rmap(page);
1532 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1544 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1533 1545
@@ -2163,11 +2175,11 @@ gotten:
2163 if (likely(pte_same(*page_table, orig_pte))) { 2175 if (likely(pte_same(*page_table, orig_pte))) {
2164 if (old_page) { 2176 if (old_page) {
2165 if (!PageAnon(old_page)) { 2177 if (!PageAnon(old_page)) {
2166 dec_mm_counter(mm, file_rss); 2178 dec_mm_counter(mm, MM_FILEPAGES);
2167 inc_mm_counter(mm, anon_rss); 2179 inc_mm_counter(mm, MM_ANONPAGES);
2168 } 2180 }
2169 } else 2181 } else
2170 inc_mm_counter(mm, anon_rss); 2182 inc_mm_counter(mm, MM_ANONPAGES);
2171 flush_cache_page(vma, address, pte_pfn(orig_pte)); 2183 flush_cache_page(vma, address, pte_pfn(orig_pte));
2172 entry = mk_pte(new_page, vma->vm_page_prot); 2184 entry = mk_pte(new_page, vma->vm_page_prot);
2173 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2185 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2604,7 +2616,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2604 * discarded at swap_free(). 2616 * discarded at swap_free().
2605 */ 2617 */
2606 2618
2607 inc_mm_counter(mm, anon_rss); 2619 inc_mm_counter(mm, MM_ANONPAGES);
2608 pte = mk_pte(page, vma->vm_page_prot); 2620 pte = mk_pte(page, vma->vm_page_prot);
2609 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2621 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2610 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2622 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
@@ -2688,7 +2700,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2688 if (!pte_none(*page_table)) 2700 if (!pte_none(*page_table))
2689 goto release; 2701 goto release;
2690 2702
2691 inc_mm_counter(mm, anon_rss); 2703 inc_mm_counter(mm, MM_ANONPAGES);
2692 page_add_new_anon_rmap(page, vma, address); 2704 page_add_new_anon_rmap(page, vma, address);
2693setpte: 2705setpte:
2694 set_pte_at(mm, address, page_table, entry); 2706 set_pte_at(mm, address, page_table, entry);
@@ -2842,10 +2854,10 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2842 if (flags & FAULT_FLAG_WRITE) 2854 if (flags & FAULT_FLAG_WRITE)
2843 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2855 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2844 if (anon) { 2856 if (anon) {
2845 inc_mm_counter(mm, anon_rss); 2857 inc_mm_counter(mm, MM_ANONPAGES);
2846 page_add_new_anon_rmap(page, vma, address); 2858 page_add_new_anon_rmap(page, vma, address);
2847 } else { 2859 } else {
2848 inc_mm_counter(mm, file_rss); 2860 inc_mm_counter(mm, MM_FILEPAGES);
2849 page_add_file_rmap(page); 2861 page_add_file_rmap(page);
2850 if (flags & FAULT_FLAG_WRITE) { 2862 if (flags & FAULT_FLAG_WRITE) {
2851 dirty_page = page; 2863 dirty_page = page;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 237050478f28..35755a4156d6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -401,8 +401,8 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
401 "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 401 "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
402 task_pid_nr(p), p->comm, 402 task_pid_nr(p), p->comm,
403 K(p->mm->total_vm), 403 K(p->mm->total_vm),
404 K(get_mm_counter(p->mm, anon_rss)), 404 K(get_mm_counter(p->mm, MM_ANONPAGES)),
405 K(get_mm_counter(p->mm, file_rss))); 405 K(get_mm_counter(p->mm, MM_FILEPAGES)));
406 task_unlock(p); 406 task_unlock(p);
407 407
408 /* 408 /*
diff --git a/mm/rmap.c b/mm/rmap.c
index 278cd277bdec..73d0472884c2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -815,9 +815,9 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
815 815
816 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 816 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
817 if (PageAnon(page)) 817 if (PageAnon(page))
818 dec_mm_counter(mm, anon_rss); 818 dec_mm_counter(mm, MM_ANONPAGES);
819 else 819 else
820 dec_mm_counter(mm, file_rss); 820 dec_mm_counter(mm, MM_FILEPAGES);
821 set_pte_at(mm, address, pte, 821 set_pte_at(mm, address, pte,
822 swp_entry_to_pte(make_hwpoison_entry(page))); 822 swp_entry_to_pte(make_hwpoison_entry(page)));
823 } else if (PageAnon(page)) { 823 } else if (PageAnon(page)) {
@@ -839,7 +839,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
839 list_add(&mm->mmlist, &init_mm.mmlist); 839 list_add(&mm->mmlist, &init_mm.mmlist);
840 spin_unlock(&mmlist_lock); 840 spin_unlock(&mmlist_lock);
841 } 841 }
842 dec_mm_counter(mm, anon_rss); 842 dec_mm_counter(mm, MM_ANONPAGES);
843 } else if (PAGE_MIGRATION) { 843 } else if (PAGE_MIGRATION) {
844 /* 844 /*
845 * Store the pfn of the page in a special migration 845 * Store the pfn of the page in a special migration
@@ -857,7 +857,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
857 entry = make_migration_entry(page, pte_write(pteval)); 857 entry = make_migration_entry(page, pte_write(pteval));
858 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 858 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
859 } else 859 } else
860 dec_mm_counter(mm, file_rss); 860 dec_mm_counter(mm, MM_FILEPAGES);
861 861
862 page_remove_rmap(page); 862 page_remove_rmap(page);
863 page_cache_release(page); 863 page_cache_release(page);
@@ -996,7 +996,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
996 996
997 page_remove_rmap(page); 997 page_remove_rmap(page);
998 page_cache_release(page); 998 page_cache_release(page);
999 dec_mm_counter(mm, file_rss); 999 dec_mm_counter(mm, MM_FILEPAGES);
1000 (*mapcount)--; 1000 (*mapcount)--;
1001 } 1001 }
1002 pte_unmap_unlock(pte - 1, ptl); 1002 pte_unmap_unlock(pte - 1, ptl);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c0585b16418..893984946a2c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -840,7 +840,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
840 goto out; 840 goto out;
841 } 841 }
842 842
843 inc_mm_counter(vma->vm_mm, anon_rss); 843 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
844 get_page(page); 844 get_page(page);
845 set_pte_at(vma->vm_mm, addr, pte, 845 set_pte_at(vma->vm_mm, addr, pte,
846 pte_mkold(mk_pte(page, vma->vm_page_prot))); 846 pte_mkold(mk_pte(page, vma->vm_page_prot)));