diff options
| author | Rik van Riel <riel@redhat.com> | 2013-11-11 19:29:25 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-11-13 07:33:51 -0500 |
| commit | 46a73e8a1c1720f7713b5e2df68e9dd272015b5d (patch) | |
| tree | a119f949799e3cd7382548eac2b41fa060c29ac5 /kernel | |
| parent | 106dd5afde3cd10db7e1370b6ddc77f0b2496a75 (diff) | |
sched/numa: Fix NULL pointer dereference in task_numa_migrate()
The cpusets code can split up the scheduler's domain tree into
smaller domains. Some of those smaller domains may not cross
NUMA nodes at all, leading to a NULL pointer dereference on the
per-cpu sd_numa pointer.
Tasks cannot be migrated out of their domain, so the patch
also sets p->numa_preferred_nid to whereever they are, to
prevent the migration from being retried over and over again.
Reported-by: Prarit Bhargava <prarit@redhat.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Link: http://lkml.kernel.org/n/tip-oosqomw0Jput0Jkvoowhrqtu@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df77c605c7a6..c11e36ff5ea0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -1201,9 +1201,21 @@ static int task_numa_migrate(struct task_struct *p) | |||
| 1201 | */ | 1201 | */ |
| 1202 | rcu_read_lock(); | 1202 | rcu_read_lock(); |
| 1203 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); | 1203 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); |
| 1204 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; | 1204 | if (sd) |
| 1205 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; | ||
| 1205 | rcu_read_unlock(); | 1206 | rcu_read_unlock(); |
| 1206 | 1207 | ||
| 1208 | /* | ||
| 1209 | * Cpusets can break the scheduler domain tree into smaller | ||
| 1210 | * balance domains, some of which do not cross NUMA boundaries. | ||
| 1211 | * Tasks that are "trapped" in such domains cannot be migrated | ||
| 1212 | * elsewhere, so there is no point in (re)trying. | ||
| 1213 | */ | ||
| 1214 | if (unlikely(!sd)) { | ||
| 1215 | p->numa_preferred_nid = cpu_to_node(task_cpu(p)); | ||
| 1216 | return -EINVAL; | ||
| 1217 | } | ||
| 1218 | |||
| 1207 | taskweight = task_weight(p, env.src_nid); | 1219 | taskweight = task_weight(p, env.src_nid); |
| 1208 | groupweight = group_weight(p, env.src_nid); | 1220 | groupweight = group_weight(p, env.src_nid); |
| 1209 | update_numa_stats(&env.src_stats, env.src_nid); | 1221 | update_numa_stats(&env.src_stats, env.src_nid); |
