aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael wang <wangyun@linux.vnet.ibm.com>2013-11-12 22:10:56 -0500
committerIngo Molnar <mingo@kernel.org>2013-11-13 07:33:50 -0500
commit106dd5afde3cd10db7e1370b6ddc77f0b2496a75 (patch)
tree5a83870657588c076e17d59a19a3c27f20fe38cb
parent9b66bfb28049594fe2bb2b91607ba302f511ce8b (diff)
sched: Fix endless sync_sched/rcu() loop inside _cpu_down()
Commit 6acce3ef8: sched: Remove get_online_cpus() usage tries to do sync_sched/rcu() inside _cpu_down() but triggers: INFO: task swapper/0:1 blocked for more than 120 seconds. ... [<ffffffff811263dc>] synchronize_rcu+0x2c/0x30 [<ffffffff81d1bd82>] _cpu_down+0x2b2/0x340 ... It was caused by that in the rcu boost case we rely on smpboot thread to finish the rcu callback, which has already been parked before sync in here and leads to the endless sync_sched/rcu(). This patch exchanges the sequence of smpboot_park_threads() and sync_sched/rcu() to fix the bug. Reported-by: Fengguang Wu <fengguang.wu@intel.com> Tested-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Michael Wang <wangyun@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/5282EDC0.6060003@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/cpu.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 63aa50d7ce1e..2227b58734a7 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -306,7 +306,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
306 __func__, cpu); 306 __func__, cpu);
307 goto out_release; 307 goto out_release;
308 } 308 }
309 smpboot_park_threads(cpu);
310 309
311 /* 310 /*
312 * By now we've cleared cpu_active_mask, wait for all preempt-disabled 311 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
@@ -315,12 +314,16 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
315 * 314 *
316 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might 315 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
317 * not imply sync_sched(), so explicitly call both. 316 * not imply sync_sched(), so explicitly call both.
317 *
318 * Do sync before park smpboot threads to take care the rcu boost case.
318 */ 319 */
319#ifdef CONFIG_PREEMPT 320#ifdef CONFIG_PREEMPT
320 synchronize_sched(); 321 synchronize_sched();
321#endif 322#endif
322 synchronize_rcu(); 323 synchronize_rcu();
323 324
325 smpboot_park_threads(cpu);
326
324 /* 327 /*
325 * So now all preempt/rcu users must observe !cpu_active(). 328 * So now all preempt/rcu users must observe !cpu_active().
326 */ 329 */