diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-11-06 12:47:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-13 07:33:52 -0500 |
commit | 5eca82a9ac2c961cfbd26a4b6f43e6e3747a71dd (patch) | |
tree | c63a3495a562c32b447bd4dec093eca98e9a584b /kernel | |
parent | 46a73e8a1c1720f7713b5e2df68e9dd272015b5d (diff) |
sched/numa: Cure update_numa_stats() vs. hotplug
Because we're completely unserialized against hotplug its well
possible to try and generate numa stats for an offlined node.
Bail out early (and avoid a /0) in this case. The resulting stats are
all 0 which should result in an undesirable balance target -- not to
mention that actually trying to migrate to an offline CPU will fail.
Reported-by: Prarit Bhargava <prarit@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Link: http://lkml.kernel.org/n/tip-orja0qylcvyhxfsuebcyL5sI@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c11e36ff5ea0..201be782b5b3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1000,7 +1000,7 @@ struct numa_stats { | |||
1000 | */ | 1000 | */ |
1001 | static void update_numa_stats(struct numa_stats *ns, int nid) | 1001 | static void update_numa_stats(struct numa_stats *ns, int nid) |
1002 | { | 1002 | { |
1003 | int cpu; | 1003 | int cpu, cpus = 0; |
1004 | 1004 | ||
1005 | memset(ns, 0, sizeof(*ns)); | 1005 | memset(ns, 0, sizeof(*ns)); |
1006 | for_each_cpu(cpu, cpumask_of_node(nid)) { | 1006 | for_each_cpu(cpu, cpumask_of_node(nid)) { |
@@ -1009,8 +1009,21 @@ static void update_numa_stats(struct numa_stats *ns, int nid) | |||
1009 | ns->nr_running += rq->nr_running; | 1009 | ns->nr_running += rq->nr_running; |
1010 | ns->load += weighted_cpuload(cpu); | 1010 | ns->load += weighted_cpuload(cpu); |
1011 | ns->power += power_of(cpu); | 1011 | ns->power += power_of(cpu); |
1012 | |||
1013 | cpus++; | ||
1012 | } | 1014 | } |
1013 | 1015 | ||
1016 | /* | ||
1017 | * If we raced with hotplug and there are no CPUs left in our mask | ||
1018 | * the @ns structure is NULL'ed and task_numa_compare() will | ||
1019 | * not find this node attractive. | ||
1020 | * | ||
1021 | * We'll either bail at !has_capacity, or we'll detect a huge imbalance | ||
1022 | * and bail there. | ||
1023 | */ | ||
1024 | if (!cpus) | ||
1025 | return; | ||
1026 | |||
1014 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power; | 1027 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power; |
1015 | ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE); | 1028 | ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE); |
1016 | ns->has_capacity = (ns->nr_running < ns->capacity); | 1029 | ns->has_capacity = (ns->nr_running < ns->capacity); |