diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-01-21 18:50:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:46 -0500 |
commit | 4d4048be8a93769350efa31d2482a038b7de73d0 (patch) | |
tree | 9ee0a9d1e392117d1285641d82c1d537e0d13ec8 | |
parent | ad96244179fbd55b40c00f10f399bc04739b8e1f (diff) |
oom_kill: add rcu_read_lock() into find_lock_task_mm()
find_lock_task_mm() expects it is called under rcu or tasklist lock, but
it seems that at least oom_unkillable_task()->task_in_mem_cgroup() and
mem_cgroup_out_of_memory()->oom_badness() can call it lockless.
Perhaps we could fix the callers, but this patch simply adds rcu lock
into find_lock_task_mm(). This also allows to simplify a bit one of its
callers, oom_kill_process().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Sergey Dyasly <dserrg@gmail.com>
Cc: Sameer Nanda <snanda@chromium.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mandeep Singh Baines <msb@chromium.org>
Cc: "Ma, Xindong" <xindong.ma@intel.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: "Tu, Xiaobing" <xiaobing.tu@intel.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/oom_kill.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 0d8ad1ebd1d1..054ff47c4478 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -102,14 +102,19 @@ struct task_struct *find_lock_task_mm(struct task_struct *p) | |||
102 | { | 102 | { |
103 | struct task_struct *t; | 103 | struct task_struct *t; |
104 | 104 | ||
105 | rcu_read_lock(); | ||
106 | |||
105 | for_each_thread(p, t) { | 107 | for_each_thread(p, t) { |
106 | task_lock(t); | 108 | task_lock(t); |
107 | if (likely(t->mm)) | 109 | if (likely(t->mm)) |
108 | return t; | 110 | goto found; |
109 | task_unlock(t); | 111 | task_unlock(t); |
110 | } | 112 | } |
113 | t = NULL; | ||
114 | found: | ||
115 | rcu_read_unlock(); | ||
111 | 116 | ||
112 | return NULL; | 117 | return t; |
113 | } | 118 | } |
114 | 119 | ||
115 | /* return true if the task is not adequate as candidate victim task. */ | 120 | /* return true if the task is not adequate as candidate victim task. */ |
@@ -461,10 +466,8 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
461 | } | 466 | } |
462 | read_unlock(&tasklist_lock); | 467 | read_unlock(&tasklist_lock); |
463 | 468 | ||
464 | rcu_read_lock(); | ||
465 | p = find_lock_task_mm(victim); | 469 | p = find_lock_task_mm(victim); |
466 | if (!p) { | 470 | if (!p) { |
467 | rcu_read_unlock(); | ||
468 | put_task_struct(victim); | 471 | put_task_struct(victim); |
469 | return; | 472 | return; |
470 | } else if (victim != p) { | 473 | } else if (victim != p) { |
@@ -490,6 +493,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
490 | * That thread will now get access to memory reserves since it has a | 493 | * That thread will now get access to memory reserves since it has a |
491 | * pending fatal signal. | 494 | * pending fatal signal. |
492 | */ | 495 | */ |
496 | rcu_read_lock(); | ||
493 | for_each_process(p) | 497 | for_each_process(p) |
494 | if (p->mm == mm && !same_thread_group(p, victim) && | 498 | if (p->mm == mm && !same_thread_group(p, victim) && |
495 | !(p->flags & PF_KTHREAD)) { | 499 | !(p->flags & PF_KTHREAD)) { |