aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-01-14 18:19:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commit48131e03ca4ed71d73fbe55c311a258c6fa2a090 (patch)
tree2957b6c97152f31bd97e0943f36ab03ea49b6a19 /fs/proc
parent6a15a37097c7e02390bb08d83dac433c9f10144f (diff)
mm, proc: reduce cost of /proc/pid/smaps for unpopulated shmem mappings
Following the previous patch, further reduction of /proc/pid/smaps cost is possible for private writable shmem mappings with unpopulated areas where the page walk invokes the .pte_hole function. We can use radix tree iterator for each such area instead of calling find_get_entry() in a loop. This is possible at the extra maintenance cost of introducing another shmem function shmem_partial_swap_usage(). To demonstrate the diference, I have measured this on a process that creates a private writable 2GB mapping of a partially swapped out /dev/shm/file (which cannot employ the optimizations from the prvious patch) and doesn't populate it at all. I time how long does it take to cat /proc/pid/smaps of this process 100 times. Before this patch: real 0m3.831s user 0m0.180s sys 0m3.212s After this patch: real 0m1.176s user 0m0.180s sys 0m0.684s The time is similar to the case where a radix tree iterator is employed on the whole mapping. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c42
1 files changed, 13 insertions, 29 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5830b2e129ed..8a03759bda38 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -488,42 +488,16 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
488} 488}
489 489
490#ifdef CONFIG_SHMEM 490#ifdef CONFIG_SHMEM
491static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
492 unsigned long addr)
493{
494 struct page *page;
495
496 page = find_get_entry(vma->vm_file->f_mapping,
497 linear_page_index(vma, addr));
498 if (!page)
499 return 0;
500
501 if (radix_tree_exceptional_entry(page))
502 return PAGE_SIZE;
503
504 page_cache_release(page);
505 return 0;
506
507}
508
509static int smaps_pte_hole(unsigned long addr, unsigned long end, 491static int smaps_pte_hole(unsigned long addr, unsigned long end,
510 struct mm_walk *walk) 492 struct mm_walk *walk)
511{ 493{
512 struct mem_size_stats *mss = walk->private; 494 struct mem_size_stats *mss = walk->private;
513 495
514 while (addr < end) { 496 mss->swap += shmem_partial_swap_usage(
515 mss->swap += smaps_shmem_swap(walk->vma, addr); 497 walk->vma->vm_file->f_mapping, addr, end);
516 addr += PAGE_SIZE;
517 }
518 498
519 return 0; 499 return 0;
520} 500}
521#else
522static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
523 unsigned long addr)
524{
525 return 0;
526}
527#endif 501#endif
528 502
529static void smaps_pte_entry(pte_t *pte, unsigned long addr, 503static void smaps_pte_entry(pte_t *pte, unsigned long addr,
@@ -555,7 +529,17 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
555 page = migration_entry_to_page(swpent); 529 page = migration_entry_to_page(swpent);
556 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap 530 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
557 && pte_none(*pte))) { 531 && pte_none(*pte))) {
558 mss->swap += smaps_shmem_swap(vma, addr); 532 page = find_get_entry(vma->vm_file->f_mapping,
533 linear_page_index(vma, addr));
534 if (!page)
535 return;
536
537 if (radix_tree_exceptional_entry(page))
538 mss->swap += PAGE_SIZE;
539 else
540 page_cache_release(page);
541
542 return;
559 } 543 }
560 544
561 if (!page) 545 if (!page)