aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-05-26 19:25:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 20:12:37 -0400
commit98bc93e505c03403479c6669c4ff97301cee6199 (patch)
tree0b6bf39cd6107d536b55fd245b9905cb5baa4e74 /fs/proc
parent30cd8903913dac7b0918807cac46be3ecde5a5a7 (diff)
proc: fix pagemap_read() error case
Currently, pagemap_read() has three error and/or corner case handling mistake. (1) If ppos parameter is wrong, mm refcount will be leak. (2) If count parameter is 0, mm refcount will be leak too. (3) If the current task is sleeping in kmalloc() and the system is out of memory and oom-killer kill the proc associated task, mm_refcount prevent the task free its memory. then system may hang up. <Quote Hugh's explain why we shold call kmalloc() before get_mm()> check_mem_permission gets a reference to the mm. If we __get_free_page after check_mem_permission, imagine what happens if the system is out of memory, and the mm we're looking at is selected for killing by the OOM killer: while we wait in __get_free_page for more memory, no memory is freed from the selected mm because it cannot reach exit_mmap while we hold that reference. This patch fixes the above three. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jovi Zhang <bookjovi@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Stephen Wilson <wilsons@start.ca> Cc: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 30a6a72d05b..25b6a887adb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -771,18 +771,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
771 if (!task) 771 if (!task)
772 goto out; 772 goto out;
773 773
774 mm = mm_for_maps(task);
775 ret = PTR_ERR(mm);
776 if (!mm || IS_ERR(mm))
777 goto out_task;
778
779 ret = -EINVAL; 774 ret = -EINVAL;
780 /* file position must be aligned */ 775 /* file position must be aligned */
781 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 776 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
782 goto out_task; 777 goto out_task;
783 778
784 ret = 0; 779 ret = 0;
785
786 if (!count) 780 if (!count)
787 goto out_task; 781 goto out_task;
788 782
@@ -790,7 +784,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
790 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 784 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
791 ret = -ENOMEM; 785 ret = -ENOMEM;
792 if (!pm.buffer) 786 if (!pm.buffer)
793 goto out_mm; 787 goto out_task;
788
789 mm = mm_for_maps(task);
790 ret = PTR_ERR(mm);
791 if (!mm || IS_ERR(mm))
792 goto out_free;
794 793
795 pagemap_walk.pmd_entry = pagemap_pte_range; 794 pagemap_walk.pmd_entry = pagemap_pte_range;
796 pagemap_walk.pte_hole = pagemap_pte_hole; 795 pagemap_walk.pte_hole = pagemap_pte_hole;
@@ -833,7 +832,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
833 len = min(count, PM_ENTRY_BYTES * pm.pos); 832 len = min(count, PM_ENTRY_BYTES * pm.pos);
834 if (copy_to_user(buf, pm.buffer, len)) { 833 if (copy_to_user(buf, pm.buffer, len)) {
835 ret = -EFAULT; 834 ret = -EFAULT;
836 goto out_free; 835 goto out_mm;
837 } 836 }
838 copied += len; 837 copied += len;
839 buf += len; 838 buf += len;
@@ -843,10 +842,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
843 if (!ret || ret == PM_END_OF_BUFFER) 842 if (!ret || ret == PM_END_OF_BUFFER)
844 ret = copied; 843 ret = copied;
845 844
846out_free:
847 kfree(pm.buffer);
848out_mm: 845out_mm:
849 mmput(mm); 846 mmput(mm);
847out_free:
848 kfree(pm.buffer);
850out_task: 849out_task:
851 put_task_struct(task); 850 put_task_struct(task);
852out: 851out: