aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-07-31 19:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:44 -0400
commit9cbb78bb314360a860a8b23723971cb6fcb54176 (patch)
tree7983de03845b5914e0188ce119f9374711ffcce7 /mm/oom_kill.c
parent462607ecc519b197f7b5cc6b024a1c26fa6fc0ac (diff)
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist try_set_zonelist_oom() which is used in the page allocator. Concurrent oom kills are thus a rare event and only occur in systems using mempolicies and with a large number of nodes. Memory controller oom kills, however, can frequently be concurrent since there is no serialization once the oom killer is called for oom conditions in several different memcgs in parallel. This creates a massive contention on tasklist_lock since the oom killer requires the readside for the tasklist iteration. If several memcgs are calling the oom killer, this lock can be held for a substantial amount of time, especially if threads continue to enter it as other threads are exiting. Since the exit path grabs the writeside of the lock with irqs disabled in a few different places, this can cause a soft lockup on cpus as a result of tasklist_lock starvation. The kernel lacks unfair writelocks, and successful calls to the oom killer usually result in at least one thread entering the exit path, so an alternative solution is needed. This patch introduces a seperate oom handler for memcgs so that they do not require tasklist_lock for as much time. Instead, it iterates only over the threads attached to the oom memcg and grabs a reference to the selected thread before calling oom_kill_process() to ensure it doesn't prematurely exit. This still requires tasklist_lock for the tasklist dump, iterating children of the selected process, and killing all other threads on the system sharing the same memory as the selected victim. So while this isn't a complete solution to tasklist_lock starvation, it significantly reduces the amount of time that it is held. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Sha Zhengju <handai.szj@taobao.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c48
1 files changed, 15 insertions, 33 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f8eba9651c0c..c0c97aea837f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -288,20 +288,13 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
288} 288}
289#endif 289#endif
290 290
291enum oom_scan_t { 291enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
292 OOM_SCAN_OK, /* scan thread and find its badness */ 292 unsigned long totalpages, const nodemask_t *nodemask,
293 OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ 293 bool force_kill)
294 OOM_SCAN_ABORT, /* abort the iteration and return */
295 OOM_SCAN_SELECT, /* always select this thread first */
296};
297
298static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
299 struct mem_cgroup *memcg, unsigned long totalpages,
300 const nodemask_t *nodemask, bool force_kill)
301{ 294{
302 if (task->exit_state) 295 if (task->exit_state)
303 return OOM_SCAN_CONTINUE; 296 return OOM_SCAN_CONTINUE;
304 if (oom_unkillable_task(task, memcg, nodemask)) 297 if (oom_unkillable_task(task, NULL, nodemask))
305 return OOM_SCAN_CONTINUE; 298 return OOM_SCAN_CONTINUE;
306 299
307 /* 300 /*
@@ -348,8 +341,8 @@ static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
348 * (not docbooked, we don't want this one cluttering up the manual) 341 * (not docbooked, we don't want this one cluttering up the manual)
349 */ 342 */
350static struct task_struct *select_bad_process(unsigned int *ppoints, 343static struct task_struct *select_bad_process(unsigned int *ppoints,
351 unsigned long totalpages, struct mem_cgroup *memcg, 344 unsigned long totalpages, const nodemask_t *nodemask,
352 const nodemask_t *nodemask, bool force_kill) 345 bool force_kill)
353{ 346{
354 struct task_struct *g, *p; 347 struct task_struct *g, *p;
355 struct task_struct *chosen = NULL; 348 struct task_struct *chosen = NULL;
@@ -358,7 +351,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
358 do_each_thread(g, p) { 351 do_each_thread(g, p) {
359 unsigned int points; 352 unsigned int points;
360 353
361 switch (oom_scan_process_thread(p, memcg, totalpages, nodemask, 354 switch (oom_scan_process_thread(p, totalpages, nodemask,
362 force_kill)) { 355 force_kill)) {
363 case OOM_SCAN_SELECT: 356 case OOM_SCAN_SELECT:
364 chosen = p; 357 chosen = p;
@@ -371,7 +364,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
371 case OOM_SCAN_OK: 364 case OOM_SCAN_OK:
372 break; 365 break;
373 }; 366 };
374 points = oom_badness(p, memcg, nodemask, totalpages); 367 points = oom_badness(p, NULL, nodemask, totalpages);
375 if (points > chosen_points) { 368 if (points > chosen_points) {
376 chosen = p; 369 chosen = p;
377 chosen_points = points; 370 chosen_points = points;
@@ -443,10 +436,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
443} 436}
444 437
445#define K(x) ((x) << (PAGE_SHIFT-10)) 438#define K(x) ((x) << (PAGE_SHIFT-10))
446static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 439void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
447 unsigned int points, unsigned long totalpages, 440 unsigned int points, unsigned long totalpages,
448 struct mem_cgroup *memcg, nodemask_t *nodemask, 441 struct mem_cgroup *memcg, nodemask_t *nodemask,
449 const char *message) 442 const char *message)
450{ 443{
451 struct task_struct *victim = p; 444 struct task_struct *victim = p;
452 struct task_struct *child; 445 struct task_struct *child;
@@ -564,10 +557,6 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
564void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 557void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
565 int order) 558 int order)
566{ 559{
567 unsigned long limit;
568 unsigned int points = 0;
569 struct task_struct *p;
570
571 /* 560 /*
572 * If current has a pending SIGKILL, then automatically select it. The 561 * If current has a pending SIGKILL, then automatically select it. The
573 * goal is to allow it to allocate so that it may quickly exit and free 562 * goal is to allow it to allocate so that it may quickly exit and free
@@ -579,13 +568,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
579 } 568 }
580 569
581 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 570 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
582 limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 571 __mem_cgroup_out_of_memory(memcg, gfp_mask, order);
583 read_lock(&tasklist_lock);
584 p = select_bad_process(&points, limit, memcg, NULL, false);
585 if (p && PTR_ERR(p) != -1UL)
586 oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
587 "Memory cgroup out of memory");
588 read_unlock(&tasklist_lock);
589} 572}
590#endif 573#endif
591 574
@@ -710,7 +693,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
710 struct task_struct *p; 693 struct task_struct *p;
711 unsigned long totalpages; 694 unsigned long totalpages;
712 unsigned long freed = 0; 695 unsigned long freed = 0;
713 unsigned int points; 696 unsigned int uninitialized_var(points);
714 enum oom_constraint constraint = CONSTRAINT_NONE; 697 enum oom_constraint constraint = CONSTRAINT_NONE;
715 int killed = 0; 698 int killed = 0;
716 699
@@ -748,8 +731,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
748 goto out; 731 goto out;
749 } 732 }
750 733
751 p = select_bad_process(&points, totalpages, NULL, mpol_mask, 734 p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
752 force_kill);
753 /* Found nothing?!?! Either we hang forever, or we panic. */ 735 /* Found nothing?!?! Either we hang forever, or we panic. */
754 if (!p) { 736 if (!p) {
755 dump_header(NULL, gfp_mask, order, NULL, mpol_mask); 737 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);