diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-01-21 18:49:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:46 -0500 |
commit | 1da4db0cd5c8a31d4468ec906b413e75e604b465 (patch) | |
tree | 7b413ea40f6630376032d59bd7525e0372ea2d6e /mm/oom_kill.c | |
parent | 0c740d0afc3bff0a097ad03a1c8df92757516f5c (diff) |
oom_kill: change oom_kill.c to use for_each_thread()
Change oom_kill.c to use for_each_thread() rather than the racy
while_each_thread() which can loop forever if we race with exit.
Note also that most users were buggy even if while_each_thread() was
fine, the task can exit even _before_ rcu_read_lock().
Fortunately the new for_each_thread() only requires the stable
task_struct, so this change fixes both problems.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Sergey Dyasly <dserrg@gmail.com>
Tested-by: Sergey Dyasly <dserrg@gmail.com>
Reviewed-by: Sameer Nanda <snanda@chromium.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mandeep Singh Baines <msb@chromium.org>
Cc: "Ma, Xindong" <xindong.ma@intel.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: "Tu, Xiaobing" <xiaobing.tu@intel.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r-- | mm/oom_kill.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1e4a600a6163..96d7945f75a6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -59,7 +59,7 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
59 | { | 59 | { |
60 | struct task_struct *start = tsk; | 60 | struct task_struct *start = tsk; |
61 | 61 | ||
62 | do { | 62 | for_each_thread(start, tsk) { |
63 | if (mask) { | 63 | if (mask) { |
64 | /* | 64 | /* |
65 | * If this is a mempolicy constrained oom, tsk's | 65 | * If this is a mempolicy constrained oom, tsk's |
@@ -77,7 +77,7 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
77 | if (cpuset_mems_allowed_intersects(current, tsk)) | 77 | if (cpuset_mems_allowed_intersects(current, tsk)) |
78 | return true; | 78 | return true; |
79 | } | 79 | } |
80 | } while_each_thread(start, tsk); | 80 | } |
81 | 81 | ||
82 | return false; | 82 | return false; |
83 | } | 83 | } |
@@ -97,14 +97,14 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
97 | */ | 97 | */ |
98 | struct task_struct *find_lock_task_mm(struct task_struct *p) | 98 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
99 | { | 99 | { |
100 | struct task_struct *t = p; | 100 | struct task_struct *t; |
101 | 101 | ||
102 | do { | 102 | for_each_thread(p, t) { |
103 | task_lock(t); | 103 | task_lock(t); |
104 | if (likely(t->mm)) | 104 | if (likely(t->mm)) |
105 | return t; | 105 | return t; |
106 | task_unlock(t); | 106 | task_unlock(t); |
107 | } while_each_thread(p, t); | 107 | } |
108 | 108 | ||
109 | return NULL; | 109 | return NULL; |
110 | } | 110 | } |
@@ -301,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
301 | unsigned long chosen_points = 0; | 301 | unsigned long chosen_points = 0; |
302 | 302 | ||
303 | rcu_read_lock(); | 303 | rcu_read_lock(); |
304 | do_each_thread(g, p) { | 304 | for_each_process_thread(g, p) { |
305 | unsigned int points; | 305 | unsigned int points; |
306 | 306 | ||
307 | switch (oom_scan_process_thread(p, totalpages, nodemask, | 307 | switch (oom_scan_process_thread(p, totalpages, nodemask, |
@@ -323,7 +323,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
323 | chosen = p; | 323 | chosen = p; |
324 | chosen_points = points; | 324 | chosen_points = points; |
325 | } | 325 | } |
326 | } while_each_thread(g, p); | 326 | } |
327 | if (chosen) | 327 | if (chosen) |
328 | get_task_struct(chosen); | 328 | get_task_struct(chosen); |
329 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
@@ -406,7 +406,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
406 | { | 406 | { |
407 | struct task_struct *victim = p; | 407 | struct task_struct *victim = p; |
408 | struct task_struct *child; | 408 | struct task_struct *child; |
409 | struct task_struct *t = p; | 409 | struct task_struct *t; |
410 | struct mm_struct *mm; | 410 | struct mm_struct *mm; |
411 | unsigned int victim_points = 0; | 411 | unsigned int victim_points = 0; |
412 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, | 412 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
@@ -437,7 +437,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
437 | * still freeing memory. | 437 | * still freeing memory. |
438 | */ | 438 | */ |
439 | read_lock(&tasklist_lock); | 439 | read_lock(&tasklist_lock); |
440 | do { | 440 | for_each_thread(p, t) { |
441 | list_for_each_entry(child, &t->children, sibling) { | 441 | list_for_each_entry(child, &t->children, sibling) { |
442 | unsigned int child_points; | 442 | unsigned int child_points; |
443 | 443 | ||
@@ -455,7 +455,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
455 | get_task_struct(victim); | 455 | get_task_struct(victim); |
456 | } | 456 | } |
457 | } | 457 | } |
458 | } while_each_thread(p, t); | 458 | } |
459 | read_unlock(&tasklist_lock); | 459 | read_unlock(&tasklist_lock); |
460 | 460 | ||
461 | rcu_read_lock(); | 461 | rcu_read_lock(); |