summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorJerome Marchand <jmarchan@redhat.com>2016-01-14 18:19:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commiteca56ff906bdd0239485e8b47154a6e73dd9a2f3 (patch)
tree2eca71e20820247d4e81243a920db3a138f558db /mm/rmap.c
parent48131e03ca4ed71d73fbe55c311a258c6fa2a090 (diff)
mm, shmem: add internal shmem resident memory accounting
Currently looking at /proc/<pid>/status or statm, there is no way to distinguish shmem pages from pages mapped to a regular file (shmem pages are mapped to /dev/zero), even though their implication in actual memory use is quite different. The internal accounting currently counts shmem pages together with regular files. As a preparation to extend the userspace interfaces, this patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for shmem pages separately from MM_FILEPAGES. The next patch will expose it to userspace - this patch doesn't change the exported values yet, by adding up MM_SHMEMPAGES to MM_FILEPAGES at places where MM_FILEPAGES was used before. The only user-visible change after this patch is the OOM killer message that separates the reported "shmem-rss" from "file-rss". [vbabka@suse.cz: forward-porting, tweak changelog] Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 3c3f1d21f075..622756c16ac8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1364,10 +1364,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1364 if (PageHuge(page)) { 1364 if (PageHuge(page)) {
1365 hugetlb_count_sub(1 << compound_order(page), mm); 1365 hugetlb_count_sub(1 << compound_order(page), mm);
1366 } else { 1366 } else {
1367 if (PageAnon(page)) 1367 dec_mm_counter(mm, mm_counter(page));
1368 dec_mm_counter(mm, MM_ANONPAGES);
1369 else
1370 dec_mm_counter(mm, MM_FILEPAGES);
1371 } 1368 }
1372 set_pte_at(mm, address, pte, 1369 set_pte_at(mm, address, pte,
1373 swp_entry_to_pte(make_hwpoison_entry(page))); 1370 swp_entry_to_pte(make_hwpoison_entry(page)));
@@ -1377,10 +1374,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1377 * interest anymore. Simply discard the pte, vmscan 1374 * interest anymore. Simply discard the pte, vmscan
1378 * will take care of the rest. 1375 * will take care of the rest.
1379 */ 1376 */
1380 if (PageAnon(page)) 1377 dec_mm_counter(mm, mm_counter(page));
1381 dec_mm_counter(mm, MM_ANONPAGES);
1382 else
1383 dec_mm_counter(mm, MM_FILEPAGES);
1384 } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) { 1378 } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
1385 swp_entry_t entry; 1379 swp_entry_t entry;
1386 pte_t swp_pte; 1380 pte_t swp_pte;
@@ -1420,7 +1414,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1420 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1414 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1421 set_pte_at(mm, address, pte, swp_pte); 1415 set_pte_at(mm, address, pte, swp_pte);
1422 } else 1416 } else
1423 dec_mm_counter(mm, MM_FILEPAGES); 1417 dec_mm_counter(mm, mm_counter_file(page));
1424 1418
1425 page_remove_rmap(page); 1419 page_remove_rmap(page);
1426 page_cache_release(page); 1420 page_cache_release(page);