diff options
author | he, bo <bo.he@intel.com> | 2012-04-25 07:59:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-04-26 06:54:53 -0400 |
commit | fb2cf2c660971bea0ad86a9a5c19ad39eab61344 (patch) | |
tree | 75343c1ef32657416dbef2dd5a683391ff8b00c9 /kernel/sched | |
parent | eb95308ee2a69403909e111837b9068c64cfc349 (diff) |
sched: Fix OOPS when build_sched_domains() percpu allocation fails
Under extreme memory used up situations, percpu allocation
might fail. We hit it when system goes to suspend-to-ram,
causing a kworker panic:
EIP: [<c124411a>] build_sched_domains+0x23a/0xad0
Kernel panic - not syncing: Fatal exception
Pid: 3026, comm: kworker/u:3
3.0.8-137473-gf42fbef #1
Call Trace:
[<c18cc4f2>] panic+0x66/0x16c
[...]
[<c1244c37>] partition_sched_domains+0x287/0x4b0
[<c12a77be>] cpuset_update_active_cpus+0x1fe/0x210
[<c123712d>] cpuset_cpu_inactive+0x1d/0x30
[...]
With this fix applied build_sched_domains() will return -ENOMEM and
the suspend attempt fails.
Signed-off-by: he, bo <bo.he@intel.com>
Reviewed-by: Zhang, Yanmin <yanmin.zhang@intel.com>
Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <stable@kernel.org>
Link: http://lkml.kernel.org/r/1335355161.5892.17.camel@hebo
[ So, we fail to deallocate a CPU because we cannot allocate RAM :-/
I don't like that kind of sad behavior but nevertheless it should
not crash under high memory load. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4603b9d8f30a..0533a688ce22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
6405 | struct sd_data *sdd = &tl->data; | 6405 | struct sd_data *sdd = &tl->data; |
6406 | 6406 | ||
6407 | for_each_cpu(j, cpu_map) { | 6407 | for_each_cpu(j, cpu_map) { |
6408 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); | 6408 | struct sched_domain *sd; |
6409 | if (sd && (sd->flags & SD_OVERLAP)) | 6409 | |
6410 | free_sched_groups(sd->groups, 0); | 6410 | if (sdd->sd) { |
6411 | kfree(*per_cpu_ptr(sdd->sd, j)); | 6411 | sd = *per_cpu_ptr(sdd->sd, j); |
6412 | kfree(*per_cpu_ptr(sdd->sg, j)); | 6412 | if (sd && (sd->flags & SD_OVERLAP)) |
6413 | kfree(*per_cpu_ptr(sdd->sgp, j)); | 6413 | free_sched_groups(sd->groups, 0); |
6414 | kfree(*per_cpu_ptr(sdd->sd, j)); | ||
6415 | } | ||
6416 | |||
6417 | if (sdd->sg) | ||
6418 | kfree(*per_cpu_ptr(sdd->sg, j)); | ||
6419 | if (sdd->sgp) | ||
6420 | kfree(*per_cpu_ptr(sdd->sgp, j)); | ||
6414 | } | 6421 | } |
6415 | free_percpu(sdd->sd); | 6422 | free_percpu(sdd->sd); |
6423 | sdd->sd = NULL; | ||
6416 | free_percpu(sdd->sg); | 6424 | free_percpu(sdd->sg); |
6425 | sdd->sg = NULL; | ||
6417 | free_percpu(sdd->sgp); | 6426 | free_percpu(sdd->sgp); |
6427 | sdd->sgp = NULL; | ||
6418 | } | 6428 | } |
6419 | } | 6429 | } |
6420 | 6430 | ||