aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1e4a600a6163..054ff47c4478 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
47#ifdef CONFIG_NUMA 47#ifdef CONFIG_NUMA
48/** 48/**
49 * has_intersects_mems_allowed() - check task eligiblity for kill 49 * has_intersects_mems_allowed() - check task eligiblity for kill
50 * @tsk: task struct of which task to consider 50 * @start: task struct of which task to consider
51 * @mask: nodemask passed to page allocator for mempolicy ooms 51 * @mask: nodemask passed to page allocator for mempolicy ooms
52 * 52 *
53 * Task eligibility is determined by whether or not a candidate task, @tsk, 53 * Task eligibility is determined by whether or not a candidate task, @tsk,
54 * shares the same mempolicy nodes as current if it is bound by such a policy 54 * shares the same mempolicy nodes as current if it is bound by such a policy
55 * and whether or not it has the same set of allowed cpuset nodes. 55 * and whether or not it has the same set of allowed cpuset nodes.
56 */ 56 */
57static bool has_intersects_mems_allowed(struct task_struct *tsk, 57static bool has_intersects_mems_allowed(struct task_struct *start,
58 const nodemask_t *mask) 58 const nodemask_t *mask)
59{ 59{
60 struct task_struct *start = tsk; 60 struct task_struct *tsk;
61 bool ret = false;
61 62
62 do { 63 rcu_read_lock();
64 for_each_thread(start, tsk) {
63 if (mask) { 65 if (mask) {
64 /* 66 /*
65 * If this is a mempolicy constrained oom, tsk's 67 * If this is a mempolicy constrained oom, tsk's
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
67 * mempolicy intersects current, otherwise it may be 69 * mempolicy intersects current, otherwise it may be
68 * needlessly killed. 70 * needlessly killed.
69 */ 71 */
70 if (mempolicy_nodemask_intersects(tsk, mask)) 72 ret = mempolicy_nodemask_intersects(tsk, mask);
71 return true;
72 } else { 73 } else {
73 /* 74 /*
74 * This is not a mempolicy constrained oom, so only 75 * This is not a mempolicy constrained oom, so only
75 * check the mems of tsk's cpuset. 76 * check the mems of tsk's cpuset.
76 */ 77 */
77 if (cpuset_mems_allowed_intersects(current, tsk)) 78 ret = cpuset_mems_allowed_intersects(current, tsk);
78 return true;
79 } 79 }
80 } while_each_thread(start, tsk); 80 if (ret)
81 break;
82 }
83 rcu_read_unlock();
81 84
82 return false; 85 return ret;
83} 86}
84#else 87#else
85static bool has_intersects_mems_allowed(struct task_struct *tsk, 88static bool has_intersects_mems_allowed(struct task_struct *tsk,
@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
97 */ 100 */
98struct task_struct *find_lock_task_mm(struct task_struct *p) 101struct task_struct *find_lock_task_mm(struct task_struct *p)
99{ 102{
100 struct task_struct *t = p; 103 struct task_struct *t;
101 104
102 do { 105 rcu_read_lock();
106
107 for_each_thread(p, t) {
103 task_lock(t); 108 task_lock(t);
104 if (likely(t->mm)) 109 if (likely(t->mm))
105 return t; 110 goto found;
106 task_unlock(t); 111 task_unlock(t);
107 } while_each_thread(p, t); 112 }
113 t = NULL;
114found:
115 rcu_read_unlock();
108 116
109 return NULL; 117 return t;
110} 118}
111 119
112/* return true if the task is not adequate as candidate victim task. */ 120/* return true if the task is not adequate as candidate victim task. */
@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
301 unsigned long chosen_points = 0; 309 unsigned long chosen_points = 0;
302 310
303 rcu_read_lock(); 311 rcu_read_lock();
304 do_each_thread(g, p) { 312 for_each_process_thread(g, p) {
305 unsigned int points; 313 unsigned int points;
306 314
307 switch (oom_scan_process_thread(p, totalpages, nodemask, 315 switch (oom_scan_process_thread(p, totalpages, nodemask,
@@ -323,7 +331,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
323 chosen = p; 331 chosen = p;
324 chosen_points = points; 332 chosen_points = points;
325 } 333 }
326 } while_each_thread(g, p); 334 }
327 if (chosen) 335 if (chosen)
328 get_task_struct(chosen); 336 get_task_struct(chosen);
329 rcu_read_unlock(); 337 rcu_read_unlock();
@@ -406,7 +414,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
406{ 414{
407 struct task_struct *victim = p; 415 struct task_struct *victim = p;
408 struct task_struct *child; 416 struct task_struct *child;
409 struct task_struct *t = p; 417 struct task_struct *t;
410 struct mm_struct *mm; 418 struct mm_struct *mm;
411 unsigned int victim_points = 0; 419 unsigned int victim_points = 0;
412 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, 420 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -437,7 +445,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
437 * still freeing memory. 445 * still freeing memory.
438 */ 446 */
439 read_lock(&tasklist_lock); 447 read_lock(&tasklist_lock);
440 do { 448 for_each_thread(p, t) {
441 list_for_each_entry(child, &t->children, sibling) { 449 list_for_each_entry(child, &t->children, sibling) {
442 unsigned int child_points; 450 unsigned int child_points;
443 451
@@ -455,13 +463,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
455 get_task_struct(victim); 463 get_task_struct(victim);
456 } 464 }
457 } 465 }
458 } while_each_thread(p, t); 466 }
459 read_unlock(&tasklist_lock); 467 read_unlock(&tasklist_lock);
460 468
461 rcu_read_lock();
462 p = find_lock_task_mm(victim); 469 p = find_lock_task_mm(victim);
463 if (!p) { 470 if (!p) {
464 rcu_read_unlock();
465 put_task_struct(victim); 471 put_task_struct(victim);
466 return; 472 return;
467 } else if (victim != p) { 473 } else if (victim != p) {
@@ -487,6 +493,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
487 * That thread will now get access to memory reserves since it has a 493 * That thread will now get access to memory reserves since it has a
488 * pending fatal signal. 494 * pending fatal signal.
489 */ 495 */
496 rcu_read_lock();
490 for_each_process(p) 497 for_each_process(p)
491 if (p->mm == mm && !same_thread_group(p, victim) && 498 if (p->mm == mm && !same_thread_group(p, victim) &&
492 !(p->flags & PF_KTHREAD)) { 499 !(p->flags & PF_KTHREAD)) {