aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c155
1 files changed, 80 insertions, 75 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9203c6..47f5b145f56e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -4,6 +4,7 @@
4#include <linux/seq_file.h> 4#include <linux/seq_file.h>
5#include <linux/highmem.h> 5#include <linux/highmem.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <linux/slab.h>
7#include <linux/pagemap.h> 8#include <linux/pagemap.h>
8#include <linux/mempolicy.h> 9#include <linux/mempolicy.h>
9#include <linux/swap.h> 10#include <linux/swap.h>
@@ -16,7 +17,7 @@
16 17
17void task_mem(struct seq_file *m, struct mm_struct *mm) 18void task_mem(struct seq_file *m, struct mm_struct *mm)
18{ 19{
19 unsigned long data, text, lib; 20 unsigned long data, text, lib, swap;
20 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 21 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
21 22
22 /* 23 /*
@@ -36,6 +37,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
36 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 37 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
37 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 38 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
38 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 39 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
40 swap = get_mm_counter(mm, MM_SWAPENTS);
39 seq_printf(m, 41 seq_printf(m,
40 "VmPeak:\t%8lu kB\n" 42 "VmPeak:\t%8lu kB\n"
41 "VmSize:\t%8lu kB\n" 43 "VmSize:\t%8lu kB\n"
@@ -46,7 +48,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46 "VmStk:\t%8lu kB\n" 48 "VmStk:\t%8lu kB\n"
47 "VmExe:\t%8lu kB\n" 49 "VmExe:\t%8lu kB\n"
48 "VmLib:\t%8lu kB\n" 50 "VmLib:\t%8lu kB\n"
49 "VmPTE:\t%8lu kB\n", 51 "VmPTE:\t%8lu kB\n"
52 "VmSwap:\t%8lu kB\n",
50 hiwater_vm << (PAGE_SHIFT-10), 53 hiwater_vm << (PAGE_SHIFT-10),
51 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 54 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
52 mm->locked_vm << (PAGE_SHIFT-10), 55 mm->locked_vm << (PAGE_SHIFT-10),
@@ -54,7 +57,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54 total_rss << (PAGE_SHIFT-10), 57 total_rss << (PAGE_SHIFT-10),
55 data << (PAGE_SHIFT-10), 58 data << (PAGE_SHIFT-10),
56 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 59 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
57 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); 60 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61 swap << (PAGE_SHIFT-10));
58} 62}
59 63
60unsigned long task_vsize(struct mm_struct *mm) 64unsigned long task_vsize(struct mm_struct *mm)
@@ -65,11 +69,11 @@ unsigned long task_vsize(struct mm_struct *mm)
65int task_statm(struct mm_struct *mm, int *shared, int *text, 69int task_statm(struct mm_struct *mm, int *shared, int *text,
66 int *data, int *resident) 70 int *data, int *resident)
67{ 71{
68 *shared = get_mm_counter(mm, file_rss); 72 *shared = get_mm_counter(mm, MM_FILEPAGES);
69 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 73 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
70 >> PAGE_SHIFT; 74 >> PAGE_SHIFT;
71 *data = mm->total_vm - mm->shared_vm; 75 *data = mm->total_vm - mm->shared_vm;
72 *resident = *shared + get_mm_counter(mm, anon_rss); 76 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
73 return mm->total_vm; 77 return mm->total_vm;
74} 78}
75 79
@@ -243,25 +247,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
243 } else if (vma->vm_start <= mm->start_stack && 247 } else if (vma->vm_start <= mm->start_stack &&
244 vma->vm_end >= mm->start_stack) { 248 vma->vm_end >= mm->start_stack) {
245 name = "[stack]"; 249 name = "[stack]";
246 } else {
247 unsigned long stack_start;
248 struct proc_maps_private *pmp;
249
250 pmp = m->private;
251 stack_start = pmp->task->stack_start;
252
253 if (vma->vm_start <= stack_start &&
254 vma->vm_end >= stack_start) {
255 pad_len_spaces(m, len);
256 seq_printf(m,
257 "[threadstack:%08lx]",
258#ifdef CONFIG_STACK_GROWSUP
259 vma->vm_end - stack_start
260#else
261 stack_start - vma->vm_start
262#endif
263 );
264 }
265 } 250 }
266 } else { 251 } else {
267 name = "[vdso]"; 252 name = "[vdso]";
@@ -361,12 +346,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
361 if (!pte_present(ptent)) 346 if (!pte_present(ptent))
362 continue; 347 continue;
363 348
364 mss->resident += PAGE_SIZE;
365
366 page = vm_normal_page(vma, addr, ptent); 349 page = vm_normal_page(vma, addr, ptent);
367 if (!page) 350 if (!page)
368 continue; 351 continue;
369 352
353 mss->resident += PAGE_SIZE;
370 /* Accumulate the size in pages that have been accessed. */ 354 /* Accumulate the size in pages that have been accessed. */
371 if (pte_young(ptent) || PageReferenced(page)) 355 if (pte_young(ptent) || PageReferenced(page))
372 mss->referenced += PAGE_SIZE; 356 mss->referenced += PAGE_SIZE;
@@ -404,6 +388,7 @@ static int show_smap(struct seq_file *m, void *v)
404 388
405 memset(&mss, 0, sizeof mss); 389 memset(&mss, 0, sizeof mss);
406 mss.vma = vma; 390 mss.vma = vma;
391 /* mmap_sem is held in m_start */
407 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 392 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
408 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 393 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
409 394
@@ -550,7 +535,8 @@ const struct file_operations proc_clear_refs_operations = {
550}; 535};
551 536
552struct pagemapread { 537struct pagemapread {
553 u64 __user *out, *end; 538 int pos, len;
539 u64 *buffer;
554}; 540};
555 541
556#define PM_ENTRY_BYTES sizeof(u64) 542#define PM_ENTRY_BYTES sizeof(u64)
@@ -573,10 +559,8 @@ struct pagemapread {
573static int add_to_pagemap(unsigned long addr, u64 pfn, 559static int add_to_pagemap(unsigned long addr, u64 pfn,
574 struct pagemapread *pm) 560 struct pagemapread *pm)
575{ 561{
576 if (put_user(pfn, pm->out)) 562 pm->buffer[pm->pos++] = pfn;
577 return -EFAULT; 563 if (pm->pos >= pm->len)
578 pm->out++;
579 if (pm->out >= pm->end)
580 return PM_END_OF_BUFFER; 564 return PM_END_OF_BUFFER;
581 return 0; 565 return 0;
582} 566}
@@ -650,6 +634,37 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
650 return err; 634 return err;
651} 635}
652 636
637static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
638{
639 u64 pme = 0;
640 if (pte_present(pte))
641 pme = PM_PFRAME(pte_pfn(pte) + offset)
642 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
643 return pme;
644}
645
646/* This function walks within one hugetlb entry in the single call */
647static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
648 unsigned long addr, unsigned long end,
649 struct mm_walk *walk)
650{
651 struct pagemapread *pm = walk->private;
652 int err = 0;
653 u64 pfn;
654
655 for (; addr != end; addr += PAGE_SIZE) {
656 int offset = (addr & ~hmask) >> PAGE_SHIFT;
657 pfn = huge_pte_to_pagemap_entry(*pte, offset);
658 err = add_to_pagemap(addr, pfn, pm);
659 if (err)
660 return err;
661 }
662
663 cond_resched();
664
665 return err;
666}
667
653/* 668/*
654 * /proc/pid/pagemap - an array mapping virtual pages to pfns 669 * /proc/pid/pagemap - an array mapping virtual pages to pfns
655 * 670 *
@@ -674,21 +689,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
674 * determine which areas of memory are actually mapped and llseek to 689 * determine which areas of memory are actually mapped and llseek to
675 * skip over unmapped regions. 690 * skip over unmapped regions.
676 */ 691 */
692#define PAGEMAP_WALK_SIZE (PMD_SIZE)
677static ssize_t pagemap_read(struct file *file, char __user *buf, 693static ssize_t pagemap_read(struct file *file, char __user *buf,
678 size_t count, loff_t *ppos) 694 size_t count, loff_t *ppos)
679{ 695{
680 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 696 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
681 struct page **pages, *page;
682 unsigned long uaddr, uend;
683 struct mm_struct *mm; 697 struct mm_struct *mm;
684 struct pagemapread pm; 698 struct pagemapread pm;
685 int pagecount;
686 int ret = -ESRCH; 699 int ret = -ESRCH;
687 struct mm_walk pagemap_walk = {}; 700 struct mm_walk pagemap_walk = {};
688 unsigned long src; 701 unsigned long src;
689 unsigned long svpfn; 702 unsigned long svpfn;
690 unsigned long start_vaddr; 703 unsigned long start_vaddr;
691 unsigned long end_vaddr; 704 unsigned long end_vaddr;
705 int copied = 0;
692 706
693 if (!task) 707 if (!task)
694 goto out; 708 goto out;
@@ -711,37 +725,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
711 if (!mm) 725 if (!mm)
712 goto out_task; 726 goto out_task;
713 727
714 728 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
715 uaddr = (unsigned long)buf & PAGE_MASK; 729 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
716 uend = (unsigned long)(buf + count);
717 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
718 ret = 0;
719 if (pagecount == 0)
720 goto out_mm;
721 pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
722 ret = -ENOMEM; 730 ret = -ENOMEM;
723 if (!pages) 731 if (!pm.buffer)
724 goto out_mm; 732 goto out_mm;
725 733
726 down_read(&current->mm->mmap_sem);
727 ret = get_user_pages(current, current->mm, uaddr, pagecount,
728 1, 0, pages, NULL);
729 up_read(&current->mm->mmap_sem);
730
731 if (ret < 0)
732 goto out_free;
733
734 if (ret != pagecount) {
735 pagecount = ret;
736 ret = -EFAULT;
737 goto out_pages;
738 }
739
740 pm.out = (u64 __user *)buf;
741 pm.end = (u64 __user *)(buf + count);
742
743 pagemap_walk.pmd_entry = pagemap_pte_range; 734 pagemap_walk.pmd_entry = pagemap_pte_range;
744 pagemap_walk.pte_hole = pagemap_pte_hole; 735 pagemap_walk.pte_hole = pagemap_pte_hole;
736 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
745 pagemap_walk.mm = mm; 737 pagemap_walk.mm = mm;
746 pagemap_walk.private = &pm; 738 pagemap_walk.private = &pm;
747 739
@@ -760,23 +752,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
760 * user buffer is tracked in "pm", and the walk 752 * user buffer is tracked in "pm", and the walk
761 * will stop when we hit the end of the buffer. 753 * will stop when we hit the end of the buffer.
762 */ 754 */
763 ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); 755 ret = 0;
764 if (ret == PM_END_OF_BUFFER) 756 while (count && (start_vaddr < end_vaddr)) {
765 ret = 0; 757 int len;
766 /* don't need mmap_sem for these, but this looks cleaner */ 758 unsigned long end;
767 *ppos += (char __user *)pm.out - buf; 759
768 if (!ret) 760 pm.pos = 0;
769 ret = (char __user *)pm.out - buf; 761 end = start_vaddr + PAGEMAP_WALK_SIZE;
770 762 /* overflow ? */
771out_pages: 763 if (end < start_vaddr || end > end_vaddr)
772 for (; pagecount; pagecount--) { 764 end = end_vaddr;
773 page = pages[pagecount-1]; 765 down_read(&mm->mmap_sem);
774 if (!PageReserved(page)) 766 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
775 SetPageDirty(page); 767 up_read(&mm->mmap_sem);
776 page_cache_release(page); 768 start_vaddr = end;
769
770 len = min(count, PM_ENTRY_BYTES * pm.pos);
771 if (copy_to_user(buf, pm.buffer, len)) {
772 ret = -EFAULT;
773 goto out_free;
774 }
775 copied += len;
776 buf += len;
777 count -= len;
777 } 778 }
779 *ppos += copied;
780 if (!ret || ret == PM_END_OF_BUFFER)
781 ret = copied;
782
778out_free: 783out_free:
779 kfree(pages); 784 kfree(pm.buffer);
780out_mm: 785out_mm:
781 mmput(mm); 786 mmput(mm);
782out_task: 787out_task: