diff options
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/proc_misc.c | 16 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 80 |
2 files changed, 58 insertions, 38 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 7e277f2ad466..c652d469dc08 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -123,6 +123,11 @@ static int uptime_read_proc(char *page, char **start, off_t off, | |||
123 | return proc_calc_metrics(page, start, off, count, eof, len); | 123 | return proc_calc_metrics(page, start, off, count, eof, len); |
124 | } | 124 | } |
125 | 125 | ||
126 | int __attribute__((weak)) arch_report_meminfo(char *page) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | |||
126 | static int meminfo_read_proc(char *page, char **start, off_t off, | 131 | static int meminfo_read_proc(char *page, char **start, off_t off, |
127 | int count, int *eof, void *data) | 132 | int count, int *eof, void *data) |
128 | { | 133 | { |
@@ -221,6 +226,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
221 | 226 | ||
222 | len += hugetlb_report_meminfo(page + len); | 227 | len += hugetlb_report_meminfo(page + len); |
223 | 228 | ||
229 | len += arch_report_meminfo(page + len); | ||
230 | |||
224 | return proc_calc_metrics(page, start, off, count, eof, len); | 231 | return proc_calc_metrics(page, start, off, count, eof, len); |
225 | #undef K | 232 | #undef K |
226 | } | 233 | } |
@@ -472,6 +479,13 @@ static const struct file_operations proc_vmalloc_operations = { | |||
472 | }; | 479 | }; |
473 | #endif | 480 | #endif |
474 | 481 | ||
482 | #ifndef arch_irq_stat_cpu | ||
483 | #define arch_irq_stat_cpu(cpu) 0 | ||
484 | #endif | ||
485 | #ifndef arch_irq_stat | ||
486 | #define arch_irq_stat() 0 | ||
487 | #endif | ||
488 | |||
475 | static int show_stat(struct seq_file *p, void *v) | 489 | static int show_stat(struct seq_file *p, void *v) |
476 | { | 490 | { |
477 | int i; | 491 | int i; |
@@ -509,7 +523,9 @@ static int show_stat(struct seq_file *p, void *v) | |||
509 | sum += temp; | 523 | sum += temp; |
510 | per_irq_sum[j] += temp; | 524 | per_irq_sum[j] += temp; |
511 | } | 525 | } |
526 | sum += arch_irq_stat_cpu(i); | ||
512 | } | 527 | } |
528 | sum += arch_irq_stat(); | ||
513 | 529 | ||
514 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", | 530 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", |
515 | (unsigned long long)cputime64_to_clock_t(user), | 531 | (unsigned long long)cputime64_to_clock_t(user), |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ab8ccc9d14ff..c492449f3b45 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -476,10 +476,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
476 | return -ESRCH; | 476 | return -ESRCH; |
477 | mm = get_task_mm(task); | 477 | mm = get_task_mm(task); |
478 | if (mm) { | 478 | if (mm) { |
479 | static struct mm_walk clear_refs_walk; | 479 | struct mm_walk clear_refs_walk = { |
480 | memset(&clear_refs_walk, 0, sizeof(clear_refs_walk)); | 480 | .pmd_entry = clear_refs_pte_range, |
481 | clear_refs_walk.pmd_entry = clear_refs_pte_range; | 481 | .mm = mm, |
482 | clear_refs_walk.mm = mm; | 482 | }; |
483 | down_read(&mm->mmap_sem); | 483 | down_read(&mm->mmap_sem); |
484 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 484 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
485 | clear_refs_walk.private = vma; | 485 | clear_refs_walk.private = vma; |
@@ -602,11 +602,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
602 | return err; | 602 | return err; |
603 | } | 603 | } |
604 | 604 | ||
605 | static struct mm_walk pagemap_walk = { | ||
606 | .pmd_entry = pagemap_pte_range, | ||
607 | .pte_hole = pagemap_pte_hole | ||
608 | }; | ||
609 | |||
610 | /* | 605 | /* |
611 | * /proc/pid/pagemap - an array mapping virtual pages to pfns | 606 | * /proc/pid/pagemap - an array mapping virtual pages to pfns |
612 | * | 607 | * |
@@ -641,6 +636,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
641 | struct pagemapread pm; | 636 | struct pagemapread pm; |
642 | int pagecount; | 637 | int pagecount; |
643 | int ret = -ESRCH; | 638 | int ret = -ESRCH; |
639 | struct mm_walk pagemap_walk; | ||
640 | unsigned long src; | ||
641 | unsigned long svpfn; | ||
642 | unsigned long start_vaddr; | ||
643 | unsigned long end_vaddr; | ||
644 | 644 | ||
645 | if (!task) | 645 | if (!task) |
646 | goto out; | 646 | goto out; |
@@ -659,11 +659,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
659 | if (!mm) | 659 | if (!mm) |
660 | goto out_task; | 660 | goto out_task; |
661 | 661 | ||
662 | ret = -ENOMEM; | 662 | |
663 | uaddr = (unsigned long)buf & PAGE_MASK; | 663 | uaddr = (unsigned long)buf & PAGE_MASK; |
664 | uend = (unsigned long)(buf + count); | 664 | uend = (unsigned long)(buf + count); |
665 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; | 665 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; |
666 | pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL); | 666 | ret = 0; |
667 | if (pagecount == 0) | ||
668 | goto out_mm; | ||
669 | pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | ||
670 | ret = -ENOMEM; | ||
667 | if (!pages) | 671 | if (!pages) |
668 | goto out_mm; | 672 | goto out_mm; |
669 | 673 | ||
@@ -684,33 +688,33 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
684 | pm.out = (u64 *)buf; | 688 | pm.out = (u64 *)buf; |
685 | pm.end = (u64 *)(buf + count); | 689 | pm.end = (u64 *)(buf + count); |
686 | 690 | ||
687 | if (!ptrace_may_attach(task)) { | 691 | pagemap_walk.pmd_entry = pagemap_pte_range; |
688 | ret = -EIO; | 692 | pagemap_walk.pte_hole = pagemap_pte_hole; |
689 | } else { | 693 | pagemap_walk.mm = mm; |
690 | unsigned long src = *ppos; | 694 | pagemap_walk.private = ± |
691 | unsigned long svpfn = src / PM_ENTRY_BYTES; | 695 | |
692 | unsigned long start_vaddr = svpfn << PAGE_SHIFT; | 696 | src = *ppos; |
693 | unsigned long end_vaddr = TASK_SIZE_OF(task); | 697 | svpfn = src / PM_ENTRY_BYTES; |
694 | 698 | start_vaddr = svpfn << PAGE_SHIFT; | |
695 | /* watch out for wraparound */ | 699 | end_vaddr = TASK_SIZE_OF(task); |
696 | if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) | 700 | |
697 | start_vaddr = end_vaddr; | 701 | /* watch out for wraparound */ |
698 | 702 | if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) | |
699 | /* | 703 | start_vaddr = end_vaddr; |
700 | * The odds are that this will stop walking way | 704 | |
701 | * before end_vaddr, because the length of the | 705 | /* |
702 | * user buffer is tracked in "pm", and the walk | 706 | * The odds are that this will stop walking way |
703 | * will stop when we hit the end of the buffer. | 707 | * before end_vaddr, because the length of the |
704 | */ | 708 | * user buffer is tracked in "pm", and the walk |
705 | ret = walk_page_range(start_vaddr, end_vaddr, | 709 | * will stop when we hit the end of the buffer. |
706 | &pagemap_walk); | 710 | */ |
707 | if (ret == PM_END_OF_BUFFER) | 711 | ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); |
708 | ret = 0; | 712 | if (ret == PM_END_OF_BUFFER) |
709 | /* don't need mmap_sem for these, but this looks cleaner */ | 713 | ret = 0; |
710 | *ppos += (char *)pm.out - buf; | 714 | /* don't need mmap_sem for these, but this looks cleaner */ |
711 | if (!ret) | 715 | *ppos += (char *)pm.out - buf; |
712 | ret = (char *)pm.out - buf; | 716 | if (!ret) |
713 | } | 717 | ret = (char *)pm.out - buf; |
714 | 718 | ||
715 | out_pages: | 719 | out_pages: |
716 | for (; pagecount; pagecount--) { | 720 | for (; pagecount; pagecount--) { |