aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:15 -0400
committerJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:59 -0400
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /fs/proc/task_mmu.c
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c138
1 files changed, 87 insertions, 51 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 60b914860f81..2e7addfd9803 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,6 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/hugetlb.h> 2#include <linux/hugetlb.h>
3#include <linux/huge_mm.h>
3#include <linux/mount.h> 4#include <linux/mount.h>
4#include <linux/seq_file.h> 5#include <linux/seq_file.h>
5#include <linux/highmem.h> 6#include <linux/highmem.h>
@@ -7,6 +8,7 @@
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/mempolicy.h> 10#include <linux/mempolicy.h>
11#include <linux/rmap.h>
10#include <linux/swap.h> 12#include <linux/swap.h>
11#include <linux/swapops.h> 13#include <linux/swapops.h>
12 14
@@ -119,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
119 121
120 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 122 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
121 if (!priv->task) 123 if (!priv->task)
122 return NULL; 124 return ERR_PTR(-ESRCH);
123 125
124 mm = mm_for_maps(priv->task); 126 mm = mm_for_maps(priv->task);
125 if (!mm) 127 if (!mm || IS_ERR(mm))
126 return NULL; 128 return mm;
127 down_read(&mm->mmap_sem); 129 down_read(&mm->mmap_sem);
128 130
129 tail_vma = get_gate_vma(priv->task); 131 tail_vma = get_gate_vma(priv->task->mm);
130 priv->tail_vma = tail_vma; 132 priv->tail_vma = tail_vma;
131 133
132 /* Start with last addr hint */ 134 /* Start with last addr hint */
@@ -180,7 +182,8 @@ static void m_stop(struct seq_file *m, void *v)
180 struct proc_maps_private *priv = m->private; 182 struct proc_maps_private *priv = m->private;
181 struct vm_area_struct *vma = v; 183 struct vm_area_struct *vma = v;
182 184
183 vma_stop(priv, vma); 185 if (!IS_ERR(vma))
186 vma_stop(priv, vma);
184 if (priv->task) 187 if (priv->task)
185 put_task_struct(priv->task); 188 put_task_struct(priv->task);
186} 189}
@@ -249,8 +252,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
249 const char *name = arch_vma_name(vma); 252 const char *name = arch_vma_name(vma);
250 if (!name) { 253 if (!name) {
251 if (mm) { 254 if (mm) {
252 if (vma->vm_start <= mm->start_brk && 255 if (vma->vm_start <= mm->brk &&
253 vma->vm_end >= mm->brk) { 256 vma->vm_end >= mm->start_brk) {
254 name = "[heap]"; 257 name = "[heap]";
255 } else if (vma->vm_start <= mm->start_stack && 258 } else if (vma->vm_start <= mm->start_stack &&
256 vma->vm_end >= mm->start_stack) { 259 vma->vm_end >= mm->start_stack) {
@@ -277,7 +280,8 @@ static int show_map(struct seq_file *m, void *v)
277 show_map_vma(m, vma); 280 show_map_vma(m, vma);
278 281
279 if (m->count < m->size) /* vma is copied successfully */ 282 if (m->count < m->size) /* vma is copied successfully */
280 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 283 m->version = (vma != get_gate_vma(task->mm))
284 ? vma->vm_start : 0;
281 return 0; 285 return 0;
282} 286}
283 287
@@ -329,58 +333,86 @@ struct mem_size_stats {
329 unsigned long private_dirty; 333 unsigned long private_dirty;
330 unsigned long referenced; 334 unsigned long referenced;
331 unsigned long anonymous; 335 unsigned long anonymous;
336 unsigned long anonymous_thp;
332 unsigned long swap; 337 unsigned long swap;
333 u64 pss; 338 u64 pss;
334}; 339};
335 340
336static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 341
337 struct mm_walk *walk) 342static void smaps_pte_entry(pte_t ptent, unsigned long addr,
343 unsigned long ptent_size, struct mm_walk *walk)
338{ 344{
339 struct mem_size_stats *mss = walk->private; 345 struct mem_size_stats *mss = walk->private;
340 struct vm_area_struct *vma = mss->vma; 346 struct vm_area_struct *vma = mss->vma;
341 pte_t *pte, ptent;
342 spinlock_t *ptl;
343 struct page *page; 347 struct page *page;
344 int mapcount; 348 int mapcount;
345 349
346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 350 if (is_swap_pte(ptent)) {
347 for (; addr != end; pte++, addr += PAGE_SIZE) { 351 mss->swap += ptent_size;
348 ptent = *pte; 352 return;
349 353 }
350 if (is_swap_pte(ptent)) {
351 mss->swap += PAGE_SIZE;
352 continue;
353 }
354 354
355 if (!pte_present(ptent)) 355 if (!pte_present(ptent))
356 continue; 356 return;
357
358 page = vm_normal_page(vma, addr, ptent);
359 if (!page)
360 return;
361
362 if (PageAnon(page))
363 mss->anonymous += ptent_size;
364
365 mss->resident += ptent_size;
366 /* Accumulate the size in pages that have been accessed. */
367 if (pte_young(ptent) || PageReferenced(page))
368 mss->referenced += ptent_size;
369 mapcount = page_mapcount(page);
370 if (mapcount >= 2) {
371 if (pte_dirty(ptent) || PageDirty(page))
372 mss->shared_dirty += ptent_size;
373 else
374 mss->shared_clean += ptent_size;
375 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
376 } else {
377 if (pte_dirty(ptent) || PageDirty(page))
378 mss->private_dirty += ptent_size;
379 else
380 mss->private_clean += ptent_size;
381 mss->pss += (ptent_size << PSS_SHIFT);
382 }
383}
357 384
358 page = vm_normal_page(vma, addr, ptent); 385static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
359 if (!page) 386 struct mm_walk *walk)
360 continue; 387{
388 struct mem_size_stats *mss = walk->private;
389 struct vm_area_struct *vma = mss->vma;
390 pte_t *pte;
391 spinlock_t *ptl;
361 392
362 if (PageAnon(page)) 393 spin_lock(&walk->mm->page_table_lock);
363 mss->anonymous += PAGE_SIZE; 394 if (pmd_trans_huge(*pmd)) {
364 395 if (pmd_trans_splitting(*pmd)) {
365 mss->resident += PAGE_SIZE; 396 spin_unlock(&walk->mm->page_table_lock);
366 /* Accumulate the size in pages that have been accessed. */ 397 wait_split_huge_page(vma->anon_vma, pmd);
367 if (pte_young(ptent) || PageReferenced(page))
368 mss->referenced += PAGE_SIZE;
369 mapcount = page_mapcount(page);
370 if (mapcount >= 2) {
371 if (pte_dirty(ptent) || PageDirty(page))
372 mss->shared_dirty += PAGE_SIZE;
373 else
374 mss->shared_clean += PAGE_SIZE;
375 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
376 } else { 398 } else {
377 if (pte_dirty(ptent) || PageDirty(page)) 399 smaps_pte_entry(*(pte_t *)pmd, addr,
378 mss->private_dirty += PAGE_SIZE; 400 HPAGE_PMD_SIZE, walk);
379 else 401 spin_unlock(&walk->mm->page_table_lock);
380 mss->private_clean += PAGE_SIZE; 402 mss->anonymous_thp += HPAGE_PMD_SIZE;
381 mss->pss += (PAGE_SIZE << PSS_SHIFT); 403 return 0;
382 } 404 }
405 } else {
406 spin_unlock(&walk->mm->page_table_lock);
383 } 407 }
408 /*
409 * The mmap_sem held all the way back in m_start() is what
410 * keeps khugepaged out of here and from collapsing things
411 * in here.
412 */
413 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
414 for (; addr != end; pte++, addr += PAGE_SIZE)
415 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
384 pte_unmap_unlock(pte - 1, ptl); 416 pte_unmap_unlock(pte - 1, ptl);
385 cond_resched(); 417 cond_resched();
386 return 0; 418 return 0;
@@ -416,6 +448,7 @@ static int show_smap(struct seq_file *m, void *v)
416 "Private_Dirty: %8lu kB\n" 448 "Private_Dirty: %8lu kB\n"
417 "Referenced: %8lu kB\n" 449 "Referenced: %8lu kB\n"
418 "Anonymous: %8lu kB\n" 450 "Anonymous: %8lu kB\n"
451 "AnonHugePages: %8lu kB\n"
419 "Swap: %8lu kB\n" 452 "Swap: %8lu kB\n"
420 "KernelPageSize: %8lu kB\n" 453 "KernelPageSize: %8lu kB\n"
421 "MMUPageSize: %8lu kB\n" 454 "MMUPageSize: %8lu kB\n"
@@ -429,6 +462,7 @@ static int show_smap(struct seq_file *m, void *v)
429 mss.private_dirty >> 10, 462 mss.private_dirty >> 10,
430 mss.referenced >> 10, 463 mss.referenced >> 10,
431 mss.anonymous >> 10, 464 mss.anonymous >> 10,
465 mss.anonymous_thp >> 10,
432 mss.swap >> 10, 466 mss.swap >> 10,
433 vma_kernel_pagesize(vma) >> 10, 467 vma_kernel_pagesize(vma) >> 10,
434 vma_mmu_pagesize(vma) >> 10, 468 vma_mmu_pagesize(vma) >> 10,
@@ -436,7 +470,8 @@ static int show_smap(struct seq_file *m, void *v)
436 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 470 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
437 471
438 if (m->count < m->size) /* vma is copied successfully */ 472 if (m->count < m->size) /* vma is copied successfully */
439 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 473 m->version = (vma != get_gate_vma(task->mm))
474 ? vma->vm_start : 0;
440 return 0; 475 return 0;
441} 476}
442 477
@@ -467,6 +502,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
467 spinlock_t *ptl; 502 spinlock_t *ptl;
468 struct page *page; 503 struct page *page;
469 504
505 split_huge_page_pmd(walk->mm, pmd);
506
470 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 507 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
471 for (; addr != end; pte++, addr += PAGE_SIZE) { 508 for (; addr != end; pte++, addr += PAGE_SIZE) {
472 ptent = *pte; 509 ptent = *pte;
@@ -623,6 +660,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
623 pte_t *pte; 660 pte_t *pte;
624 int err = 0; 661 int err = 0;
625 662
663 split_huge_page_pmd(walk->mm, pmd);
664
626 /* find the first VMA at or above 'addr' */ 665 /* find the first VMA at or above 'addr' */
627 vma = find_vma(walk->mm, addr); 666 vma = find_vma(walk->mm, addr);
628 for (; addr != end; addr += PAGE_SIZE) { 667 for (; addr != end; addr += PAGE_SIZE) {
@@ -728,8 +767,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
728 if (!task) 767 if (!task)
729 goto out; 768 goto out;
730 769
731 ret = -EACCES; 770 mm = mm_for_maps(task);
732 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 771 ret = PTR_ERR(mm);
772 if (!mm || IS_ERR(mm))
733 goto out_task; 773 goto out_task;
734 774
735 ret = -EINVAL; 775 ret = -EINVAL;
@@ -742,10 +782,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
742 if (!count) 782 if (!count)
743 goto out_task; 783 goto out_task;
744 784
745 mm = get_task_mm(task);
746 if (!mm)
747 goto out_task;
748
749 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 785 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
750 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 786 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
751 ret = -ENOMEM; 787 ret = -ENOMEM;