diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-11-25 07:31:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-06 15:10:56 -0500 |
commit | 6ad4c18884e864cf4c77f9074d3d1816063f99cd (patch) | |
tree | f09643f6148b576fa2d23bf7d4b37d082d94e267 /include | |
parent | e1b8090bdf125f8b2e192149547fead7f302a89c (diff) |
sched: Fix balance vs hotplug race
Since (e761b77: cpu hotplug, sched: Introduce cpu_active_map and redo
sched domain managment) we have cpu_active_mask which is suppose to rule
scheduler migration and load-balancing, except it never (fully) did.
The particular problem being solved here is a crash in try_to_wake_up()
where select_task_rq() ends up selecting an offline cpu because
select_task_rq_fair() trusts the sched_domain tree to reflect the
current state of affairs, similarly select_task_rq_rt() trusts the
root_domain.
However, the sched_domains are updated from CPU_DEAD, which is after the
cpu is taken offline and after stop_machine is done. Therefore it can
race perfectly well with code assuming the domains are right.
Cure this by building the domains from cpu_active_mask on
CPU_DOWN_PREPARE.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpumask.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 789cf5f920ce..d77b54733c5b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask; | |||
84 | #define num_online_cpus() cpumask_weight(cpu_online_mask) | 84 | #define num_online_cpus() cpumask_weight(cpu_online_mask) |
85 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) | 85 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
86 | #define num_present_cpus() cpumask_weight(cpu_present_mask) | 86 | #define num_present_cpus() cpumask_weight(cpu_present_mask) |
87 | #define num_active_cpus() cpumask_weight(cpu_active_mask) | ||
87 | #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) | 88 | #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) |
88 | #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) | 89 | #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) |
89 | #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) | 90 | #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) |
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask; | |||
92 | #define num_online_cpus() 1 | 93 | #define num_online_cpus() 1 |
93 | #define num_possible_cpus() 1 | 94 | #define num_possible_cpus() 1 |
94 | #define num_present_cpus() 1 | 95 | #define num_present_cpus() 1 |
96 | #define num_active_cpus() 1 | ||
95 | #define cpu_online(cpu) ((cpu) == 0) | 97 | #define cpu_online(cpu) ((cpu) == 0) |
96 | #define cpu_possible(cpu) ((cpu) == 0) | 98 | #define cpu_possible(cpu) ((cpu) == 0) |
97 | #define cpu_present(cpu) ((cpu) == 0) | 99 | #define cpu_present(cpu) ((cpu) == 0) |