aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c108
1 files changed, 72 insertions, 36 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 60b914860f81..93381aae9363 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,6 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/hugetlb.h> 2#include <linux/hugetlb.h>
3#include <linux/huge_mm.h>
3#include <linux/mount.h> 4#include <linux/mount.h>
4#include <linux/seq_file.h> 5#include <linux/seq_file.h>
5#include <linux/highmem.h> 6#include <linux/highmem.h>
@@ -7,6 +8,7 @@
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/mempolicy.h> 10#include <linux/mempolicy.h>
11#include <linux/rmap.h>
10#include <linux/swap.h> 12#include <linux/swap.h>
11#include <linux/swapops.h> 13#include <linux/swapops.h>
12 14
@@ -329,58 +331,86 @@ struct mem_size_stats {
329 unsigned long private_dirty; 331 unsigned long private_dirty;
330 unsigned long referenced; 332 unsigned long referenced;
331 unsigned long anonymous; 333 unsigned long anonymous;
334 unsigned long anonymous_thp;
332 unsigned long swap; 335 unsigned long swap;
333 u64 pss; 336 u64 pss;
334}; 337};
335 338
336static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 339
337 struct mm_walk *walk) 340static void smaps_pte_entry(pte_t ptent, unsigned long addr,
341 unsigned long ptent_size, struct mm_walk *walk)
338{ 342{
339 struct mem_size_stats *mss = walk->private; 343 struct mem_size_stats *mss = walk->private;
340 struct vm_area_struct *vma = mss->vma; 344 struct vm_area_struct *vma = mss->vma;
341 pte_t *pte, ptent;
342 spinlock_t *ptl;
343 struct page *page; 345 struct page *page;
344 int mapcount; 346 int mapcount;
345 347
346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 348 if (is_swap_pte(ptent)) {
347 for (; addr != end; pte++, addr += PAGE_SIZE) { 349 mss->swap += ptent_size;
348 ptent = *pte; 350 return;
349 351 }
350 if (is_swap_pte(ptent)) {
351 mss->swap += PAGE_SIZE;
352 continue;
353 }
354 352
355 if (!pte_present(ptent)) 353 if (!pte_present(ptent))
356 continue; 354 return;
355
356 page = vm_normal_page(vma, addr, ptent);
357 if (!page)
358 return;
359
360 if (PageAnon(page))
361 mss->anonymous += ptent_size;
362
363 mss->resident += ptent_size;
364 /* Accumulate the size in pages that have been accessed. */
365 if (pte_young(ptent) || PageReferenced(page))
366 mss->referenced += ptent_size;
367 mapcount = page_mapcount(page);
368 if (mapcount >= 2) {
369 if (pte_dirty(ptent) || PageDirty(page))
370 mss->shared_dirty += ptent_size;
371 else
372 mss->shared_clean += ptent_size;
373 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
374 } else {
375 if (pte_dirty(ptent) || PageDirty(page))
376 mss->private_dirty += ptent_size;
377 else
378 mss->private_clean += ptent_size;
379 mss->pss += (ptent_size << PSS_SHIFT);
380 }
381}
357 382
358 page = vm_normal_page(vma, addr, ptent); 383static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
359 if (!page) 384 struct mm_walk *walk)
360 continue; 385{
386 struct mem_size_stats *mss = walk->private;
387 struct vm_area_struct *vma = mss->vma;
388 pte_t *pte;
389 spinlock_t *ptl;
361 390
362 if (PageAnon(page)) 391 spin_lock(&walk->mm->page_table_lock);
363 mss->anonymous += PAGE_SIZE; 392 if (pmd_trans_huge(*pmd)) {
364 393 if (pmd_trans_splitting(*pmd)) {
365 mss->resident += PAGE_SIZE; 394 spin_unlock(&walk->mm->page_table_lock);
366 /* Accumulate the size in pages that have been accessed. */ 395 wait_split_huge_page(vma->anon_vma, pmd);
367 if (pte_young(ptent) || PageReferenced(page))
368 mss->referenced += PAGE_SIZE;
369 mapcount = page_mapcount(page);
370 if (mapcount >= 2) {
371 if (pte_dirty(ptent) || PageDirty(page))
372 mss->shared_dirty += PAGE_SIZE;
373 else
374 mss->shared_clean += PAGE_SIZE;
375 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
376 } else { 396 } else {
377 if (pte_dirty(ptent) || PageDirty(page)) 397 smaps_pte_entry(*(pte_t *)pmd, addr,
378 mss->private_dirty += PAGE_SIZE; 398 HPAGE_PMD_SIZE, walk);
379 else 399 spin_unlock(&walk->mm->page_table_lock);
380 mss->private_clean += PAGE_SIZE; 400 mss->anonymous_thp += HPAGE_PMD_SIZE;
381 mss->pss += (PAGE_SIZE << PSS_SHIFT); 401 return 0;
382 } 402 }
403 } else {
404 spin_unlock(&walk->mm->page_table_lock);
383 } 405 }
406 /*
407 * The mmap_sem held all the way back in m_start() is what
408 * keeps khugepaged out of here and from collapsing things
409 * in here.
410 */
411 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
412 for (; addr != end; pte++, addr += PAGE_SIZE)
413 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
384 pte_unmap_unlock(pte - 1, ptl); 414 pte_unmap_unlock(pte - 1, ptl);
385 cond_resched(); 415 cond_resched();
386 return 0; 416 return 0;
@@ -416,6 +446,7 @@ static int show_smap(struct seq_file *m, void *v)
416 "Private_Dirty: %8lu kB\n" 446 "Private_Dirty: %8lu kB\n"
417 "Referenced: %8lu kB\n" 447 "Referenced: %8lu kB\n"
418 "Anonymous: %8lu kB\n" 448 "Anonymous: %8lu kB\n"
449 "AnonHugePages: %8lu kB\n"
419 "Swap: %8lu kB\n" 450 "Swap: %8lu kB\n"
420 "KernelPageSize: %8lu kB\n" 451 "KernelPageSize: %8lu kB\n"
421 "MMUPageSize: %8lu kB\n" 452 "MMUPageSize: %8lu kB\n"
@@ -429,6 +460,7 @@ static int show_smap(struct seq_file *m, void *v)
429 mss.private_dirty >> 10, 460 mss.private_dirty >> 10,
430 mss.referenced >> 10, 461 mss.referenced >> 10,
431 mss.anonymous >> 10, 462 mss.anonymous >> 10,
463 mss.anonymous_thp >> 10,
432 mss.swap >> 10, 464 mss.swap >> 10,
433 vma_kernel_pagesize(vma) >> 10, 465 vma_kernel_pagesize(vma) >> 10,
434 vma_mmu_pagesize(vma) >> 10, 466 vma_mmu_pagesize(vma) >> 10,
@@ -467,6 +499,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
467 spinlock_t *ptl; 499 spinlock_t *ptl;
468 struct page *page; 500 struct page *page;
469 501
502 split_huge_page_pmd(walk->mm, pmd);
503
470 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 504 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
471 for (; addr != end; pte++, addr += PAGE_SIZE) { 505 for (; addr != end; pte++, addr += PAGE_SIZE) {
472 ptent = *pte; 506 ptent = *pte;
@@ -623,6 +657,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
623 pte_t *pte; 657 pte_t *pte;
624 int err = 0; 658 int err = 0;
625 659
660 split_huge_page_pmd(walk->mm, pmd);
661
626 /* find the first VMA at or above 'addr' */ 662 /* find the first VMA at or above 'addr' */
627 vma = find_vma(walk->mm, addr); 663 vma = find_vma(walk->mm, addr);
628 for (; addr != end; addr += PAGE_SIZE) { 664 for (; addr != end; addr += PAGE_SIZE) {