aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-07-31 19:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:44 -0400
commit9cbb78bb314360a860a8b23723971cb6fcb54176 (patch)
tree7983de03845b5914e0188ce119f9374711ffcce7 /mm/memcontrol.c
parent462607ecc519b197f7b5cc6b024a1c26fa6fc0ac (diff)
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist try_set_zonelist_oom() which is used in the page allocator. Concurrent oom kills are thus a rare event and only occur in systems using mempolicies and with a large number of nodes. Memory controller oom kills, however, can frequently be concurrent since there is no serialization once the oom killer is called for oom conditions in several different memcgs in parallel. This creates a massive contention on tasklist_lock since the oom killer requires the readside for the tasklist iteration. If several memcgs are calling the oom killer, this lock can be held for a substantial amount of time, especially if threads continue to enter it as other threads are exiting. Since the exit path grabs the writeside of the lock with irqs disabled in a few different places, this can cause a soft lockup on cpus as a result of tasklist_lock starvation. The kernel lacks unfair writelocks, and successful calls to the oom killer usually result in at least one thread entering the exit path, so an alternative solution is needed. This patch introduces a seperate oom handler for memcgs so that they do not require tasklist_lock for as much time. Instead, it iterates only over the threads attached to the oom memcg and grabs a reference to the selected thread before calling oom_kill_process() to ensure it doesn't prematurely exit. This still requires tasklist_lock for the tasklist dump, iterating children of the selected process, and killing all other threads on the system sharing the same memory as the selected victim. So while this isn't a complete solution to tasklist_lock starvation, it significantly reduces the amount of time that it is held. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Sha Zhengju <handai.szj@taobao.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c61
1 files changed, 60 insertions, 1 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4f73c823c59f..b78972e2f43f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1453,7 +1453,7 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1453/* 1453/*
1454 * Return the memory (and swap, if configured) limit for a memcg. 1454 * Return the memory (and swap, if configured) limit for a memcg.
1455 */ 1455 */
1456u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1456static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1457{ 1457{
1458 u64 limit; 1458 u64 limit;
1459 u64 memsw; 1459 u64 memsw;
@@ -1469,6 +1469,65 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1469 return min(limit, memsw); 1469 return min(limit, memsw);
1470} 1470}
1471 1471
1472void __mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1473 int order)
1474{
1475 struct mem_cgroup *iter;
1476 unsigned long chosen_points = 0;
1477 unsigned long totalpages;
1478 unsigned int points = 0;
1479 struct task_struct *chosen = NULL;
1480
1481 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1482 for_each_mem_cgroup_tree(iter, memcg) {
1483 struct cgroup *cgroup = iter->css.cgroup;
1484 struct cgroup_iter it;
1485 struct task_struct *task;
1486
1487 cgroup_iter_start(cgroup, &it);
1488 while ((task = cgroup_iter_next(cgroup, &it))) {
1489 switch (oom_scan_process_thread(task, totalpages, NULL,
1490 false)) {
1491 case OOM_SCAN_SELECT:
1492 if (chosen)
1493 put_task_struct(chosen);
1494 chosen = task;
1495 chosen_points = ULONG_MAX;
1496 get_task_struct(chosen);
1497 /* fall through */
1498 case OOM_SCAN_CONTINUE:
1499 continue;
1500 case OOM_SCAN_ABORT:
1501 cgroup_iter_end(cgroup, &it);
1502 mem_cgroup_iter_break(memcg, iter);
1503 if (chosen)
1504 put_task_struct(chosen);
1505 return;
1506 case OOM_SCAN_OK:
1507 break;
1508 };
1509 points = oom_badness(task, memcg, NULL, totalpages);
1510 if (points > chosen_points) {
1511 if (chosen)
1512 put_task_struct(chosen);
1513 chosen = task;
1514 chosen_points = points;
1515 get_task_struct(chosen);
1516 }
1517 }
1518 cgroup_iter_end(cgroup, &it);
1519 }
1520
1521 if (!chosen)
1522 return;
1523 points = chosen_points * 1000 / totalpages;
1524 read_lock(&tasklist_lock);
1525 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1526 NULL, "Memory cgroup out of memory");
1527 read_unlock(&tasklist_lock);
1528 put_task_struct(chosen);
1529}
1530
1472static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1531static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1473 gfp_t gfp_mask, 1532 gfp_t gfp_mask,
1474 unsigned long flags) 1533 unsigned long flags)