aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-07-31 19:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:44 -0400
commit9cbb78bb314360a860a8b23723971cb6fcb54176 (patch)
tree7983de03845b5914e0188ce119f9374711ffcce7 /mm
parent462607ecc519b197f7b5cc6b024a1c26fa6fc0ac (diff)
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist try_set_zonelist_oom() which is used in the page allocator. Concurrent oom kills are thus a rare event and only occur in systems using mempolicies and with a large number of nodes. Memory controller oom kills, however, can frequently be concurrent since there is no serialization once the oom killer is called for oom conditions in several different memcgs in parallel. This creates a massive contention on tasklist_lock since the oom killer requires the readside for the tasklist iteration. If several memcgs are calling the oom killer, this lock can be held for a substantial amount of time, especially if threads continue to enter it as other threads are exiting. Since the exit path grabs the writeside of the lock with irqs disabled in a few different places, this can cause a soft lockup on cpus as a result of tasklist_lock starvation. The kernel lacks unfair writelocks, and successful calls to the oom killer usually result in at least one thread entering the exit path, so an alternative solution is needed. This patch introduces a seperate oom handler for memcgs so that they do not require tasklist_lock for as much time. Instead, it iterates only over the threads attached to the oom memcg and grabs a reference to the selected thread before calling oom_kill_process() to ensure it doesn't prematurely exit. This still requires tasklist_lock for the tasklist dump, iterating children of the selected process, and killing all other threads on the system sharing the same memory as the selected victim. So while this isn't a complete solution to tasklist_lock starvation, it significantly reduces the amount of time that it is held. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Sha Zhengju <handai.szj@taobao.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c61
-rw-r--r--mm/oom_kill.c48
2 files changed, 75 insertions, 34 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4f73c823c59f..b78972e2f43f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1453,7 +1453,7 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1453/* 1453/*
1454 * Return the memory (and swap, if configured) limit for a memcg. 1454 * Return the memory (and swap, if configured) limit for a memcg.
1455 */ 1455 */
1456u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1456static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1457{ 1457{
1458 u64 limit; 1458 u64 limit;
1459 u64 memsw; 1459 u64 memsw;
@@ -1469,6 +1469,65 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1469 return min(limit, memsw); 1469 return min(limit, memsw);
1470} 1470}
1471 1471
1472void __mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1473 int order)
1474{
1475 struct mem_cgroup *iter;
1476 unsigned long chosen_points = 0;
1477 unsigned long totalpages;
1478 unsigned int points = 0;
1479 struct task_struct *chosen = NULL;
1480
1481 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1482 for_each_mem_cgroup_tree(iter, memcg) {
1483 struct cgroup *cgroup = iter->css.cgroup;
1484 struct cgroup_iter it;
1485 struct task_struct *task;
1486
1487 cgroup_iter_start(cgroup, &it);
1488 while ((task = cgroup_iter_next(cgroup, &it))) {
1489 switch (oom_scan_process_thread(task, totalpages, NULL,
1490 false)) {
1491 case OOM_SCAN_SELECT:
1492 if (chosen)
1493 put_task_struct(chosen);
1494 chosen = task;
1495 chosen_points = ULONG_MAX;
1496 get_task_struct(chosen);
1497 /* fall through */
1498 case OOM_SCAN_CONTINUE:
1499 continue;
1500 case OOM_SCAN_ABORT:
1501 cgroup_iter_end(cgroup, &it);
1502 mem_cgroup_iter_break(memcg, iter);
1503 if (chosen)
1504 put_task_struct(chosen);
1505 return;
1506 case OOM_SCAN_OK:
1507 break;
1508 };
1509 points = oom_badness(task, memcg, NULL, totalpages);
1510 if (points > chosen_points) {
1511 if (chosen)
1512 put_task_struct(chosen);
1513 chosen = task;
1514 chosen_points = points;
1515 get_task_struct(chosen);
1516 }
1517 }
1518 cgroup_iter_end(cgroup, &it);
1519 }
1520
1521 if (!chosen)
1522 return;
1523 points = chosen_points * 1000 / totalpages;
1524 read_lock(&tasklist_lock);
1525 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1526 NULL, "Memory cgroup out of memory");
1527 read_unlock(&tasklist_lock);
1528 put_task_struct(chosen);
1529}
1530
1472static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1531static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1473 gfp_t gfp_mask, 1532 gfp_t gfp_mask,
1474 unsigned long flags) 1533 unsigned long flags)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f8eba9651c0c..c0c97aea837f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -288,20 +288,13 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
288} 288}
289#endif 289#endif
290 290
291enum oom_scan_t { 291enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
292 OOM_SCAN_OK, /* scan thread and find its badness */ 292 unsigned long totalpages, const nodemask_t *nodemask,
293 OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ 293 bool force_kill)
294 OOM_SCAN_ABORT, /* abort the iteration and return */
295 OOM_SCAN_SELECT, /* always select this thread first */
296};
297
298static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
299 struct mem_cgroup *memcg, unsigned long totalpages,
300 const nodemask_t *nodemask, bool force_kill)
301{ 294{
302 if (task->exit_state) 295 if (task->exit_state)
303 return OOM_SCAN_CONTINUE; 296 return OOM_SCAN_CONTINUE;
304 if (oom_unkillable_task(task, memcg, nodemask)) 297 if (oom_unkillable_task(task, NULL, nodemask))
305 return OOM_SCAN_CONTINUE; 298 return OOM_SCAN_CONTINUE;
306 299
307 /* 300 /*
@@ -348,8 +341,8 @@ static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
348 * (not docbooked, we don't want this one cluttering up the manual) 341 * (not docbooked, we don't want this one cluttering up the manual)
349 */ 342 */
350static struct task_struct *select_bad_process(unsigned int *ppoints, 343static struct task_struct *select_bad_process(unsigned int *ppoints,
351 unsigned long totalpages, struct mem_cgroup *memcg, 344 unsigned long totalpages, const nodemask_t *nodemask,
352 const nodemask_t *nodemask, bool force_kill) 345 bool force_kill)
353{ 346{
354 struct task_struct *g, *p; 347 struct task_struct *g, *p;
355 struct task_struct *chosen = NULL; 348 struct task_struct *chosen = NULL;
@@ -358,7 +351,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
358 do_each_thread(g, p) { 351 do_each_thread(g, p) {
359 unsigned int points; 352 unsigned int points;
360 353
361 switch (oom_scan_process_thread(p, memcg, totalpages, nodemask, 354 switch (oom_scan_process_thread(p, totalpages, nodemask,
362 force_kill)) { 355 force_kill)) {
363 case OOM_SCAN_SELECT: 356 case OOM_SCAN_SELECT:
364 chosen = p; 357 chosen = p;
@@ -371,7 +364,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
371 case OOM_SCAN_OK: 364 case OOM_SCAN_OK:
372 break; 365 break;
373 }; 366 };
374 points = oom_badness(p, memcg, nodemask, totalpages); 367 points = oom_badness(p, NULL, nodemask, totalpages);
375 if (points > chosen_points) { 368 if (points > chosen_points) {
376 chosen = p; 369 chosen = p;
377 chosen_points = points; 370 chosen_points = points;
@@ -443,10 +436,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
443} 436}
444 437
445#define K(x) ((x) << (PAGE_SHIFT-10)) 438#define K(x) ((x) << (PAGE_SHIFT-10))
446static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 439void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
447 unsigned int points, unsigned long totalpages, 440 unsigned int points, unsigned long totalpages,
448 struct mem_cgroup *memcg, nodemask_t *nodemask, 441 struct mem_cgroup *memcg, nodemask_t *nodemask,
449 const char *message) 442 const char *message)
450{ 443{
451 struct task_struct *victim = p; 444 struct task_struct *victim = p;
452 struct task_struct *child; 445 struct task_struct *child;
@@ -564,10 +557,6 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
564void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 557void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
565 int order) 558 int order)
566{ 559{
567 unsigned long limit;
568 unsigned int points = 0;
569 struct task_struct *p;
570
571 /* 560 /*
572 * If current has a pending SIGKILL, then automatically select it. The 561 * If current has a pending SIGKILL, then automatically select it. The
573 * goal is to allow it to allocate so that it may quickly exit and free 562 * goal is to allow it to allocate so that it may quickly exit and free
@@ -579,13 +568,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
579 } 568 }
580 569
581 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 570 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
582 limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 571 __mem_cgroup_out_of_memory(memcg, gfp_mask, order);
583 read_lock(&tasklist_lock);
584 p = select_bad_process(&points, limit, memcg, NULL, false);
585 if (p && PTR_ERR(p) != -1UL)
586 oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
587 "Memory cgroup out of memory");
588 read_unlock(&tasklist_lock);
589} 572}
590#endif 573#endif
591 574
@@ -710,7 +693,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
710 struct task_struct *p; 693 struct task_struct *p;
711 unsigned long totalpages; 694 unsigned long totalpages;
712 unsigned long freed = 0; 695 unsigned long freed = 0;
713 unsigned int points; 696 unsigned int uninitialized_var(points);
714 enum oom_constraint constraint = CONSTRAINT_NONE; 697 enum oom_constraint constraint = CONSTRAINT_NONE;
715 int killed = 0; 698 int killed = 0;
716 699
@@ -748,8 +731,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
748 goto out; 731 goto out;
749 } 732 }
750 733
751 p = select_bad_process(&points, totalpages, NULL, mpol_mask, 734 p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
752 force_kill);
753 /* Found nothing?!?! Either we hang forever, or we panic. */ 735 /* Found nothing?!?! Either we hang forever, or we panic. */
754 if (!p) { 736 if (!p) {
755 dump_header(NULL, gfp_mask, order, NULL, mpol_mask); 737 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);