diff options
author | Christopher Yeoh <cyeoh@au1.ibm.com> | 2012-02-01 20:04:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-02 15:55:17 -0500 |
commit | 8cdb878dcb359fd1137e9abdee9322f5e9bcfdf8 (patch) | |
tree | 146afc01f3c1d7cbc944328484d077032bc53bfd | |
parent | 24b36da33c64368775f4ef9386d44dce1d2bc8cf (diff) |
Fix race in process_vm_rw_core
This fixes the race in process_vm_core found by Oleg (see
http://article.gmane.org/gmane.linux.kernel/1235667/
for details).
This has been updated since I last sent it as the creation of the new
mm_access() function did almost exactly the same thing as parts of the
previous version of this patch did.
In order to use mm_access() even when /proc isn't enabled, we move it to
kernel/fork.c where other related process mm access functions already
are.
Signed-off-by: Chris Yeoh <yeohc@au1.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/proc/base.c | 20 | ||||
-rw-r--r-- | include/linux/sched.h | 6 | ||||
-rw-r--r-- | kernel/fork.c | 20 | ||||
-rw-r--r-- | mm/process_vm_access.c | 23 |
4 files changed, 35 insertions, 34 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c index d9512bd03e6c..d4548dd49b02 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path) | |||
198 | return result; | 198 | return result; |
199 | } | 199 | } |
200 | 200 | ||
201 | static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | ||
202 | { | ||
203 | struct mm_struct *mm; | ||
204 | int err; | ||
205 | |||
206 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); | ||
207 | if (err) | ||
208 | return ERR_PTR(err); | ||
209 | |||
210 | mm = get_task_mm(task); | ||
211 | if (mm && mm != current->mm && | ||
212 | !ptrace_may_access(task, mode)) { | ||
213 | mmput(mm); | ||
214 | mm = ERR_PTR(-EACCES); | ||
215 | } | ||
216 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
217 | |||
218 | return mm; | ||
219 | } | ||
220 | |||
221 | struct mm_struct *mm_for_maps(struct task_struct *task) | 201 | struct mm_struct *mm_for_maps(struct task_struct *task) |
222 | { | 202 | { |
223 | return mm_access(task, PTRACE_MODE_READ); | 203 | return mm_access(task, PTRACE_MODE_READ); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2234985a5e65..7d379a6bfd88 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm) | |||
2259 | extern void mmput(struct mm_struct *); | 2259 | extern void mmput(struct mm_struct *); |
2260 | /* Grab a reference to a task's mm, if it is not already going away */ | 2260 | /* Grab a reference to a task's mm, if it is not already going away */ |
2261 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 2261 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
2262 | /* | ||
2263 | * Grab a reference to a task's mm, if it is not already going away | ||
2264 | * and ptrace_may_access with the mode parameter passed to it | ||
2265 | * succeeds. | ||
2266 | */ | ||
2267 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); | ||
2262 | /* Remove the current tasks stale references to the old mm_struct */ | 2268 | /* Remove the current tasks stale references to the old mm_struct */ |
2263 | extern void mm_release(struct task_struct *, struct mm_struct *); | 2269 | extern void mm_release(struct task_struct *, struct mm_struct *); |
2264 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 2270 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 051f090d40c1..1b2ef3c23ae4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) | |||
647 | } | 647 | } |
648 | EXPORT_SYMBOL_GPL(get_task_mm); | 648 | EXPORT_SYMBOL_GPL(get_task_mm); |
649 | 649 | ||
650 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | ||
651 | { | ||
652 | struct mm_struct *mm; | ||
653 | int err; | ||
654 | |||
655 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); | ||
656 | if (err) | ||
657 | return ERR_PTR(err); | ||
658 | |||
659 | mm = get_task_mm(task); | ||
660 | if (mm && mm != current->mm && | ||
661 | !ptrace_may_access(task, mode)) { | ||
662 | mmput(mm); | ||
663 | mm = ERR_PTR(-EACCES); | ||
664 | } | ||
665 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
666 | |||
667 | return mm; | ||
668 | } | ||
669 | |||
650 | /* Please note the differences between mmput and mm_release. | 670 | /* Please note the differences between mmput and mm_release. |
651 | * mmput is called whenever we stop holding onto a mm_struct, | 671 | * mmput is called whenever we stop holding onto a mm_struct, |
652 | * error success whatever. | 672 | * error success whatever. |
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index e920aa3ce104..c20ff48994c2 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, | |||
298 | goto free_proc_pages; | 298 | goto free_proc_pages; |
299 | } | 299 | } |
300 | 300 | ||
301 | task_lock(task); | 301 | mm = mm_access(task, PTRACE_MODE_ATTACH); |
302 | if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { | 302 | if (!mm || IS_ERR(mm)) { |
303 | task_unlock(task); | 303 | rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; |
304 | rc = -EPERM; | 304 | /* |
305 | goto put_task_struct; | 305 | * Explicitly map EACCES to EPERM as EPERM is a more a |
306 | } | 306 | * appropriate error code for process_vw_readv/writev |
307 | mm = task->mm; | 307 | */ |
308 | 308 | if (rc == -EACCES) | |
309 | if (!mm || (task->flags & PF_KTHREAD)) { | 309 | rc = -EPERM; |
310 | task_unlock(task); | ||
311 | rc = -EINVAL; | ||
312 | goto put_task_struct; | 310 | goto put_task_struct; |
313 | } | 311 | } |
314 | 312 | ||
315 | atomic_inc(&mm->mm_users); | ||
316 | task_unlock(task); | ||
317 | |||
318 | for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { | 313 | for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { |
319 | rc = process_vm_rw_single_vec( | 314 | rc = process_vm_rw_single_vec( |
320 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, | 315 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, |