diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-18 05:24:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-19 04:56:54 -0400 |
commit | 057f3fadb347e9c51b07e1b277bbdda79f976768 (patch) | |
tree | 12f637ad30129245ef51836df1d134257d827dd5 /kernel/sched.c | |
parent | 2f36825b176f67e5c5228aa33d828bc39718811f (diff) |
sched: Fix sched_domain iterations vs. RCU
Vladis Kletnieks reported a new RCU debug warning in the scheduler.
Since commit dce840a08702b ("sched: Dynamically allocate sched_domain/
sched_group data-structures") the sched_domain trees are protected by
RCU instead of RCU-sched.
This means that we need to include rcu_read_lock() protection when we
iterate them since disabling preemption doesn't suffice anymore.
Reported-by: Valdis.Kletnieks@vt.edu
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1302882741.2388.241.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0cfe0310ed5d..27d3e73a2af6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1208,11 +1208,17 @@ int get_nohz_timer_target(void) | |||
1208 | int i; | 1208 | int i; |
1209 | struct sched_domain *sd; | 1209 | struct sched_domain *sd; |
1210 | 1210 | ||
1211 | rcu_read_lock(); | ||
1211 | for_each_domain(cpu, sd) { | 1212 | for_each_domain(cpu, sd) { |
1212 | for_each_cpu(i, sched_domain_span(sd)) | 1213 | for_each_cpu(i, sched_domain_span(sd)) { |
1213 | if (!idle_cpu(i)) | 1214 | if (!idle_cpu(i)) { |
1214 | return i; | 1215 | cpu = i; |
1216 | goto unlock; | ||
1217 | } | ||
1218 | } | ||
1215 | } | 1219 | } |
1220 | unlock: | ||
1221 | rcu_read_unlock(); | ||
1216 | return cpu; | 1222 | return cpu; |
1217 | } | 1223 | } |
1218 | /* | 1224 | /* |
@@ -2415,12 +2421,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) | |||
2415 | struct sched_domain *sd; | 2421 | struct sched_domain *sd; |
2416 | 2422 | ||
2417 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | 2423 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
2424 | rcu_read_lock(); | ||
2418 | for_each_domain(this_cpu, sd) { | 2425 | for_each_domain(this_cpu, sd) { |
2419 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { | 2426 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2420 | schedstat_inc(sd, ttwu_wake_remote); | 2427 | schedstat_inc(sd, ttwu_wake_remote); |
2421 | break; | 2428 | break; |
2422 | } | 2429 | } |
2423 | } | 2430 | } |
2431 | rcu_read_unlock(); | ||
2424 | } | 2432 | } |
2425 | #endif /* CONFIG_SMP */ | 2433 | #endif /* CONFIG_SMP */ |
2426 | 2434 | ||