aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitafd9883f93b6d030682d7072852b50c5a1b17b63 (patch)
tree02c0629aedf4cd5537b5adc08586ab45a35bedd3 /fs/proc
parent685eaade56c66c806dbe8102f12e2926cf4ec870 (diff)
mm, proc: adjust PSS calculation
The goal of this patchset is to make refcounting on THP pages cheaper with simpler semantics and allow the same THP compound page to be mapped with PMD and PTEs. This is required to get reasonable THP-pagecache implementation. With the new refcounting design it's much easier to protect against split_huge_page(): simple reference on a page will make you the deal. It makes gup_fast() implementation simpler and doesn't require special-case in futex code to handle tail THP pages. It should improve THP utilization over the system since splitting THP in one process doesn't necessary lead to splitting the page in all other processes have the page mapped. The patchset drastically lower complexity of get_page()/put_page() codepaths. I encourage people look on this code before-and-after to justify time budget on reviewing this patchset. This patch (of 37): With new refcounting all subpages of the compound page are not necessary have the same mapcount. We need to take into account mapcount of every sub-page. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Jerome Marchand <jmarchan@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index a353b4c6e86e..b74e7dec37dd 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -466,9 +466,10 @@ struct mem_size_stats {
466}; 466};
467 467
468static void smaps_account(struct mem_size_stats *mss, struct page *page, 468static void smaps_account(struct mem_size_stats *mss, struct page *page,
469 unsigned long size, bool young, bool dirty) 469 bool compound, bool young, bool dirty)
470{ 470{
471 int mapcount; 471 int i, nr = compound ? HPAGE_PMD_NR : 1;
472 unsigned long size = nr * PAGE_SIZE;
472 473
473 if (PageAnon(page)) 474 if (PageAnon(page))
474 mss->anonymous += size; 475 mss->anonymous += size;
@@ -477,23 +478,37 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
477 /* Accumulate the size in pages that have been accessed. */ 478 /* Accumulate the size in pages that have been accessed. */
478 if (young || page_is_young(page) || PageReferenced(page)) 479 if (young || page_is_young(page) || PageReferenced(page))
479 mss->referenced += size; 480 mss->referenced += size;
480 mapcount = page_mapcount(page);
481 if (mapcount >= 2) {
482 u64 pss_delta;
483 481
484 if (dirty || PageDirty(page)) 482 /*
485 mss->shared_dirty += size; 483 * page_count(page) == 1 guarantees the page is mapped exactly once.
486 else 484 * If any subpage of the compound page mapped with PTE it would elevate
487 mss->shared_clean += size; 485 * page_count().
488 pss_delta = (u64)size << PSS_SHIFT; 486 */
489 do_div(pss_delta, mapcount); 487 if (page_count(page) == 1) {
490 mss->pss += pss_delta;
491 } else {
492 if (dirty || PageDirty(page)) 488 if (dirty || PageDirty(page))
493 mss->private_dirty += size; 489 mss->private_dirty += size;
494 else 490 else
495 mss->private_clean += size; 491 mss->private_clean += size;
496 mss->pss += (u64)size << PSS_SHIFT; 492 mss->pss += (u64)size << PSS_SHIFT;
493 return;
494 }
495
496 for (i = 0; i < nr; i++, page++) {
497 int mapcount = page_mapcount(page);
498
499 if (mapcount >= 2) {
500 if (dirty || PageDirty(page))
501 mss->shared_dirty += PAGE_SIZE;
502 else
503 mss->shared_clean += PAGE_SIZE;
504 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
505 } else {
506 if (dirty || PageDirty(page))
507 mss->private_dirty += PAGE_SIZE;
508 else
509 mss->private_clean += PAGE_SIZE;
510 mss->pss += PAGE_SIZE << PSS_SHIFT;
511 }
497 } 512 }
498} 513}
499 514
@@ -554,7 +569,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
554 569
555 if (!page) 570 if (!page)
556 return; 571 return;
557 smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte)); 572
573 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
558} 574}
559 575
560#ifdef CONFIG_TRANSPARENT_HUGEPAGE 576#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -570,8 +586,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
570 if (IS_ERR_OR_NULL(page)) 586 if (IS_ERR_OR_NULL(page))
571 return; 587 return;
572 mss->anonymous_thp += HPAGE_PMD_SIZE; 588 mss->anonymous_thp += HPAGE_PMD_SIZE;
573 smaps_account(mss, page, HPAGE_PMD_SIZE, 589 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
574 pmd_young(*pmd), pmd_dirty(*pmd));
575} 590}
576#else 591#else
577static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 592static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,