summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJerome Marchand <jmarchan@redhat.com>2016-01-14 18:19:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commiteca56ff906bdd0239485e8b47154a6e73dd9a2f3 (patch)
tree2eca71e20820247d4e81243a920db3a138f558db /mm/memory.c
parent48131e03ca4ed71d73fbe55c311a258c6fa2a090 (diff)
mm, shmem: add internal shmem resident memory accounting
Currently looking at /proc/<pid>/status or statm, there is no way to distinguish shmem pages from pages mapped to a regular file (shmem pages are mapped to /dev/zero), even though their implication in actual memory use is quite different. The internal accounting currently counts shmem pages together with regular files. As a preparation to extend the userspace interfaces, this patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for shmem pages separately from MM_FILEPAGES. The next patch will expose it to userspace - this patch doesn't change the exported values yet, by adding up MM_SHMEMPAGES to MM_FILEPAGES at places where MM_FILEPAGES was used before. The only user-visible change after this patch is the OOM killer message that separates the reported "shmem-rss" from "file-rss". [vbabka@suse.cz: forward-porting, tweak changelog] Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c387430f06c3..f7026c035940 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
832 } else if (is_migration_entry(entry)) { 832 } else if (is_migration_entry(entry)) {
833 page = migration_entry_to_page(entry); 833 page = migration_entry_to_page(entry);
834 834
835 if (PageAnon(page)) 835 rss[mm_counter(page)]++;
836 rss[MM_ANONPAGES]++;
837 else
838 rss[MM_FILEPAGES]++;
839 836
840 if (is_write_migration_entry(entry) && 837 if (is_write_migration_entry(entry) &&
841 is_cow_mapping(vm_flags)) { 838 is_cow_mapping(vm_flags)) {
@@ -874,10 +871,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
874 if (page) { 871 if (page) {
875 get_page(page); 872 get_page(page);
876 page_dup_rmap(page); 873 page_dup_rmap(page);
877 if (PageAnon(page)) 874 rss[mm_counter(page)]++;
878 rss[MM_ANONPAGES]++;
879 else
880 rss[MM_FILEPAGES]++;
881 } 875 }
882 876
883out_set_pte: 877out_set_pte:
@@ -1113,9 +1107,8 @@ again:
1113 tlb_remove_tlb_entry(tlb, pte, addr); 1107 tlb_remove_tlb_entry(tlb, pte, addr);
1114 if (unlikely(!page)) 1108 if (unlikely(!page))
1115 continue; 1109 continue;
1116 if (PageAnon(page)) 1110
1117 rss[MM_ANONPAGES]--; 1111 if (!PageAnon(page)) {
1118 else {
1119 if (pte_dirty(ptent)) { 1112 if (pte_dirty(ptent)) {
1120 force_flush = 1; 1113 force_flush = 1;
1121 set_page_dirty(page); 1114 set_page_dirty(page);
@@ -1123,8 +1116,8 @@ again:
1123 if (pte_young(ptent) && 1116 if (pte_young(ptent) &&
1124 likely(!(vma->vm_flags & VM_SEQ_READ))) 1117 likely(!(vma->vm_flags & VM_SEQ_READ)))
1125 mark_page_accessed(page); 1118 mark_page_accessed(page);
1126 rss[MM_FILEPAGES]--;
1127 } 1119 }
1120 rss[mm_counter(page)]--;
1128 page_remove_rmap(page); 1121 page_remove_rmap(page);
1129 if (unlikely(page_mapcount(page) < 0)) 1122 if (unlikely(page_mapcount(page) < 0))
1130 print_bad_pte(vma, addr, ptent, page); 1123 print_bad_pte(vma, addr, ptent, page);
@@ -1146,11 +1139,7 @@ again:
1146 struct page *page; 1139 struct page *page;
1147 1140
1148 page = migration_entry_to_page(entry); 1141 page = migration_entry_to_page(entry);
1149 1142 rss[mm_counter(page)]--;
1150 if (PageAnon(page))
1151 rss[MM_ANONPAGES]--;
1152 else
1153 rss[MM_FILEPAGES]--;
1154 } 1143 }
1155 if (unlikely(!free_swap_and_cache(entry))) 1144 if (unlikely(!free_swap_and_cache(entry)))
1156 print_bad_pte(vma, addr, ptent, NULL); 1145 print_bad_pte(vma, addr, ptent, NULL);
@@ -1460,7 +1449,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1460 1449
1461 /* Ok, finally just insert the thing.. */ 1450 /* Ok, finally just insert the thing.. */
1462 get_page(page); 1451 get_page(page);
1463 inc_mm_counter_fast(mm, MM_FILEPAGES); 1452 inc_mm_counter_fast(mm, mm_counter_file(page));
1464 page_add_file_rmap(page); 1453 page_add_file_rmap(page);
1465 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1454 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1466 1455
@@ -2097,7 +2086,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2097 if (likely(pte_same(*page_table, orig_pte))) { 2086 if (likely(pte_same(*page_table, orig_pte))) {
2098 if (old_page) { 2087 if (old_page) {
2099 if (!PageAnon(old_page)) { 2088 if (!PageAnon(old_page)) {
2100 dec_mm_counter_fast(mm, MM_FILEPAGES); 2089 dec_mm_counter_fast(mm,
2090 mm_counter_file(old_page));
2101 inc_mm_counter_fast(mm, MM_ANONPAGES); 2091 inc_mm_counter_fast(mm, MM_ANONPAGES);
2102 } 2092 }
2103 } else { 2093 } else {
@@ -2820,7 +2810,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2820 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 2810 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2821 page_add_new_anon_rmap(page, vma, address); 2811 page_add_new_anon_rmap(page, vma, address);
2822 } else { 2812 } else {
2823 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); 2813 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
2824 page_add_file_rmap(page); 2814 page_add_file_rmap(page);
2825 } 2815 }
2826 set_pte_at(vma->vm_mm, address, pte, entry); 2816 set_pte_at(vma->vm_mm, address, pte, entry);