diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df77c605c7a6..c11e36ff5ea0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1201,9 +1201,21 @@ static int task_numa_migrate(struct task_struct *p) | |||
1201 | */ | 1201 | */ |
1202 | rcu_read_lock(); | 1202 | rcu_read_lock(); |
1203 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); | 1203 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); |
1204 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; | 1204 | if (sd) |
1205 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; | ||
1205 | rcu_read_unlock(); | 1206 | rcu_read_unlock(); |
1206 | 1207 | ||
1208 | /* | ||
1209 | * Cpusets can break the scheduler domain tree into smaller | ||
1210 | * balance domains, some of which do not cross NUMA boundaries. | ||
1211 | * Tasks that are "trapped" in such domains cannot be migrated | ||
1212 | * elsewhere, so there is no point in (re)trying. | ||
1213 | */ | ||
1214 | if (unlikely(!sd)) { | ||
1215 | p->numa_preferred_nid = cpu_to_node(task_cpu(p)); | ||
1216 | return -EINVAL; | ||
1217 | } | ||
1218 | |||
1207 | taskweight = task_weight(p, env.src_nid); | 1219 | taskweight = task_weight(p, env.src_nid); |
1208 | groupweight = group_weight(p, env.src_nid); | 1220 | groupweight = group_weight(p, env.src_nid); |
1209 | update_numa_stats(&env.src_stats, env.src_nid); | 1221 | update_numa_stats(&env.src_stats, env.src_nid); |