aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2014-12-10 18:44:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:08 -0500
commitc164e038eee805147e95789dddb88ae3b3aca11c (patch)
tree6e3c5c99920142d46a9acb3d334e58c78d005467 /fs/proc
parent2314b42db67be30b747122d65c6cd2c85da34538 (diff)
mm: fix huge zero page accounting in smaps report
As a small zero page, huge zero page should not be accounted in smaps report as normal page. For small pages we rely on vm_normal_page() to filter out zero page, but vm_normal_page() is not designed to handle pmds. We only get here due hackish cast pmd to pte in smaps_pte_range() -- pte and pmd format is not necessary compatible on each and every architecture. Let's add separate codepath to handle pmds. follow_trans_huge_pmd() will detect huge zero page for us. We would need pmd_dirty() helper to do this properly. The patch adds it to THP-enabled architectures which don't yet have one. [akpm@linux-foundation.org: use do_div to fix 32-bit build] Signed-off-by: "Kirill A. Shutemov" <kirill@shutemov.name> Reported-by: Fengguang Wu <fengguang.wu@intel.com> Tested-by: Fengwei Yin <yfw.kernel@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c104
1 files changed, 68 insertions, 36 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f6734c6b66a6..246eae84b13b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -447,58 +447,91 @@ struct mem_size_stats {
447 u64 pss; 447 u64 pss;
448}; 448};
449 449
450static void smaps_account(struct mem_size_stats *mss, struct page *page,
451 unsigned long size, bool young, bool dirty)
452{
453 int mapcount;
454
455 if (PageAnon(page))
456 mss->anonymous += size;
450 457
451static void smaps_pte_entry(pte_t ptent, unsigned long addr, 458 mss->resident += size;
452 unsigned long ptent_size, struct mm_walk *walk) 459 /* Accumulate the size in pages that have been accessed. */
460 if (young || PageReferenced(page))
461 mss->referenced += size;
462 mapcount = page_mapcount(page);
463 if (mapcount >= 2) {
464 u64 pss_delta;
465
466 if (dirty || PageDirty(page))
467 mss->shared_dirty += size;
468 else
469 mss->shared_clean += size;
470 pss_delta = (u64)size << PSS_SHIFT;
471 do_div(pss_delta, mapcount);
472 mss->pss += pss_delta;
473 } else {
474 if (dirty || PageDirty(page))
475 mss->private_dirty += size;
476 else
477 mss->private_clean += size;
478 mss->pss += (u64)size << PSS_SHIFT;
479 }
480}
481
482static void smaps_pte_entry(pte_t *pte, unsigned long addr,
483 struct mm_walk *walk)
453{ 484{
454 struct mem_size_stats *mss = walk->private; 485 struct mem_size_stats *mss = walk->private;
455 struct vm_area_struct *vma = mss->vma; 486 struct vm_area_struct *vma = mss->vma;
456 pgoff_t pgoff = linear_page_index(vma, addr); 487 pgoff_t pgoff = linear_page_index(vma, addr);
457 struct page *page = NULL; 488 struct page *page = NULL;
458 int mapcount;
459 489
460 if (pte_present(ptent)) { 490 if (pte_present(*pte)) {
461 page = vm_normal_page(vma, addr, ptent); 491 page = vm_normal_page(vma, addr, *pte);
462 } else if (is_swap_pte(ptent)) { 492 } else if (is_swap_pte(*pte)) {
463 swp_entry_t swpent = pte_to_swp_entry(ptent); 493 swp_entry_t swpent = pte_to_swp_entry(*pte);
464 494
465 if (!non_swap_entry(swpent)) 495 if (!non_swap_entry(swpent))
466 mss->swap += ptent_size; 496 mss->swap += PAGE_SIZE;
467 else if (is_migration_entry(swpent)) 497 else if (is_migration_entry(swpent))
468 page = migration_entry_to_page(swpent); 498 page = migration_entry_to_page(swpent);
469 } else if (pte_file(ptent)) { 499 } else if (pte_file(*pte)) {
470 if (pte_to_pgoff(ptent) != pgoff) 500 if (pte_to_pgoff(*pte) != pgoff)
471 mss->nonlinear += ptent_size; 501 mss->nonlinear += PAGE_SIZE;
472 } 502 }
473 503
474 if (!page) 504 if (!page)
475 return; 505 return;
476 506
477 if (PageAnon(page))
478 mss->anonymous += ptent_size;
479
480 if (page->index != pgoff) 507 if (page->index != pgoff)
481 mss->nonlinear += ptent_size; 508 mss->nonlinear += PAGE_SIZE;
482 509
483 mss->resident += ptent_size; 510 smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
484 /* Accumulate the size in pages that have been accessed. */ 511}
485 if (pte_young(ptent) || PageReferenced(page)) 512
486 mss->referenced += ptent_size; 513#ifdef CONFIG_TRANSPARENT_HUGEPAGE
487 mapcount = page_mapcount(page); 514static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
488 if (mapcount >= 2) { 515 struct mm_walk *walk)
489 if (pte_dirty(ptent) || PageDirty(page)) 516{
490 mss->shared_dirty += ptent_size; 517 struct mem_size_stats *mss = walk->private;
491 else 518 struct vm_area_struct *vma = mss->vma;
492 mss->shared_clean += ptent_size; 519 struct page *page;
493 mss->pss += (ptent_size << PSS_SHIFT) / mapcount; 520
494 } else { 521 /* FOLL_DUMP will return -EFAULT on huge zero page */
495 if (pte_dirty(ptent) || PageDirty(page)) 522 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
496 mss->private_dirty += ptent_size; 523 if (IS_ERR_OR_NULL(page))
497 else 524 return;
498 mss->private_clean += ptent_size; 525 mss->anonymous_thp += HPAGE_PMD_SIZE;
499 mss->pss += (ptent_size << PSS_SHIFT); 526 smaps_account(mss, page, HPAGE_PMD_SIZE,
500 } 527 pmd_young(*pmd), pmd_dirty(*pmd));
501} 528}
529#else
530static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
531 struct mm_walk *walk)
532{
533}
534#endif
502 535
503static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 536static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
504 struct mm_walk *walk) 537 struct mm_walk *walk)
@@ -509,9 +542,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
509 spinlock_t *ptl; 542 spinlock_t *ptl;
510 543
511 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 544 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
512 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); 545 smaps_pmd_entry(pmd, addr, walk);
513 spin_unlock(ptl); 546 spin_unlock(ptl);
514 mss->anonymous_thp += HPAGE_PMD_SIZE;
515 return 0; 547 return 0;
516 } 548 }
517 549
@@ -524,7 +556,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
524 */ 556 */
525 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 557 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
526 for (; addr != end; pte++, addr += PAGE_SIZE) 558 for (; addr != end; pte++, addr += PAGE_SIZE)
527 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); 559 smaps_pte_entry(pte, addr, walk);
528 pte_unmap_unlock(pte - 1, ptl); 560 pte_unmap_unlock(pte - 1, ptl);
529 cond_resched(); 561 cond_resched();
530 return 0; 562 return 0;