diff options
| author | Andrew Morton <akpm@linux-foundation.org> | 2008-07-05 04:02:01 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-05 16:13:44 -0400 |
| commit | 5d7e0d2bd98ef4f5a16ac9da1987ae655368dd6a (patch) | |
| tree | a910f821c544bba353324c63e2783852a1b95b11 /fs | |
| parent | ca31e146d5c2fe51498e619eb3a64782d02e310a (diff) | |
Fix pagemap_read() use of struct mm_walk
Fix some issues in pagemap_read noted by Alexey:
- initialize pagemap_walk.mm to "mm" , so the code starts working as
advertised
- initialize ->private to "&pm" so it wouldn't immediately oops in
pagemap_pte_hole()
- unstatic struct pagemap_walk, so two threads won't fsckup each other
(including those started by root, including flipping ->mm when you don't
have permissions)
- pagemap_read() contains two calls to ptrace_may_attach(), second one
looks unneeded.
- avoid possible kmalloc(0) and integer wraparound.
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ Personally, I'd just remove the functionality entirely - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/proc/task_mmu.c | 72 |
1 files changed, 38 insertions, 34 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 05053d701ac5..c492449f3b45 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -602,11 +602,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
| 602 | return err; | 602 | return err; |
| 603 | } | 603 | } |
| 604 | 604 | ||
| 605 | static struct mm_walk pagemap_walk = { | ||
| 606 | .pmd_entry = pagemap_pte_range, | ||
| 607 | .pte_hole = pagemap_pte_hole | ||
| 608 | }; | ||
| 609 | |||
| 610 | /* | 605 | /* |
| 611 | * /proc/pid/pagemap - an array mapping virtual pages to pfns | 606 | * /proc/pid/pagemap - an array mapping virtual pages to pfns |
| 612 | * | 607 | * |
| @@ -641,6 +636,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
| 641 | struct pagemapread pm; | 636 | struct pagemapread pm; |
| 642 | int pagecount; | 637 | int pagecount; |
| 643 | int ret = -ESRCH; | 638 | int ret = -ESRCH; |
| 639 | struct mm_walk pagemap_walk; | ||
| 640 | unsigned long src; | ||
| 641 | unsigned long svpfn; | ||
| 642 | unsigned long start_vaddr; | ||
| 643 | unsigned long end_vaddr; | ||
| 644 | 644 | ||
| 645 | if (!task) | 645 | if (!task) |
| 646 | goto out; | 646 | goto out; |
| @@ -659,11 +659,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
| 659 | if (!mm) | 659 | if (!mm) |
| 660 | goto out_task; | 660 | goto out_task; |
| 661 | 661 | ||
| 662 | ret = -ENOMEM; | 662 | |
| 663 | uaddr = (unsigned long)buf & PAGE_MASK; | 663 | uaddr = (unsigned long)buf & PAGE_MASK; |
| 664 | uend = (unsigned long)(buf + count); | 664 | uend = (unsigned long)(buf + count); |
| 665 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; | 665 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; |
| 666 | pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL); | 666 | ret = 0; |
| 667 | if (pagecount == 0) | ||
| 668 | goto out_mm; | ||
| 669 | pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | ||
| 670 | ret = -ENOMEM; | ||
| 667 | if (!pages) | 671 | if (!pages) |
| 668 | goto out_mm; | 672 | goto out_mm; |
| 669 | 673 | ||
| @@ -684,33 +688,33 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
| 684 | pm.out = (u64 *)buf; | 688 | pm.out = (u64 *)buf; |
| 685 | pm.end = (u64 *)(buf + count); | 689 | pm.end = (u64 *)(buf + count); |
| 686 | 690 | ||
| 687 | if (!ptrace_may_attach(task)) { | 691 | pagemap_walk.pmd_entry = pagemap_pte_range; |
| 688 | ret = -EIO; | 692 | pagemap_walk.pte_hole = pagemap_pte_hole; |
| 689 | } else { | 693 | pagemap_walk.mm = mm; |
| 690 | unsigned long src = *ppos; | 694 | pagemap_walk.private = ± |
| 691 | unsigned long svpfn = src / PM_ENTRY_BYTES; | 695 | |
| 692 | unsigned long start_vaddr = svpfn << PAGE_SHIFT; | 696 | src = *ppos; |
| 693 | unsigned long end_vaddr = TASK_SIZE_OF(task); | 697 | svpfn = src / PM_ENTRY_BYTES; |
| 694 | 698 | start_vaddr = svpfn << PAGE_SHIFT; | |
| 695 | /* watch out for wraparound */ | 699 | end_vaddr = TASK_SIZE_OF(task); |
| 696 | if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) | 700 | |
| 697 | start_vaddr = end_vaddr; | 701 | /* watch out for wraparound */ |
| 698 | 702 | if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) | |
| 699 | /* | 703 | start_vaddr = end_vaddr; |
| 700 | * The odds are that this will stop walking way | 704 | |
| 701 | * before end_vaddr, because the length of the | 705 | /* |
| 702 | * user buffer is tracked in "pm", and the walk | 706 | * The odds are that this will stop walking way |
| 703 | * will stop when we hit the end of the buffer. | 707 | * before end_vaddr, because the length of the |
| 704 | */ | 708 | * user buffer is tracked in "pm", and the walk |
| 705 | ret = walk_page_range(start_vaddr, end_vaddr, | 709 | * will stop when we hit the end of the buffer. |
| 706 | &pagemap_walk); | 710 | */ |
| 707 | if (ret == PM_END_OF_BUFFER) | 711 | ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); |
| 708 | ret = 0; | 712 | if (ret == PM_END_OF_BUFFER) |
| 709 | /* don't need mmap_sem for these, but this looks cleaner */ | 713 | ret = 0; |
| 710 | *ppos += (char *)pm.out - buf; | 714 | /* don't need mmap_sem for these, but this looks cleaner */ |
| 711 | if (!ret) | 715 | *ppos += (char *)pm.out - buf; |
| 712 | ret = (char *)pm.out - buf; | 716 | if (!ret) |
| 713 | } | 717 | ret = (char *)pm.out - buf; |
| 714 | 718 | ||
| 715 | out_pages: | 719 | out_pages: |
| 716 | for (; pagecount; pagecount--) { | 720 | for (; pagecount; pagecount--) { |
