aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2011-05-30 07:34:51 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-31 04:01:48 -0400
commitd72bce0e67e8afc6eb959f656013cbb577426f1e (patch)
tree9c93d4df9aa895d6f2f555e0cf50e7ae5ebaded4 /kernel/rcutree_plugin.h
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
rcu: Cure load woes
Commit cc3ce5176d83 (rcu: Start RCU kthreads in TASK_INTERRUPTIBLE state) fudges a sleeping task' state, resulting in the scheduler seeing a TASK_UNINTERRUPTIBLE task going to sleep, but a TASK_INTERRUPTIBLE task waking up. The result is unbalanced load calculation. The problem that patch tried to address is that the RCU threads could stay in UNINTERRUPTIBLE state for quite a while and triggering the hung task detector due to on-demand wake-ups. Cure the problem differently by always giving the tasks at least one wake-up once the CPU is fully up and running, this will kick them out of the initial UNINTERRUPTIBLE state and into the regular INTERRUPTIBLE wait state. [ The alternative would be teaching kthread_create() to start threads as INTERRUPTIBLE but that needs a tad more thought. ] Reported-by: Damien Wyart <damien.wyart@free.fr> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul E. McKenney <paul.mckenney@linaro.org> Link: http://lkml.kernel.org/r/1306755291.1200.2872.camel@twins Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index a767b7dac365..c8bff3099a89 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1295,7 +1295,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1295 if (IS_ERR(t)) 1295 if (IS_ERR(t))
1296 return PTR_ERR(t); 1296 return PTR_ERR(t);
1297 raw_spin_lock_irqsave(&rnp->lock, flags); 1297 raw_spin_lock_irqsave(&rnp->lock, flags);
1298 set_task_state(t, TASK_INTERRUPTIBLE);
1299 rnp->boost_kthread_task = t; 1298 rnp->boost_kthread_task = t;
1300 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1299 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1301 sp.sched_priority = RCU_KTHREAD_PRIO; 1300 sp.sched_priority = RCU_KTHREAD_PRIO;
@@ -1303,6 +1302,12 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1303 return 0; 1302 return 0;
1304} 1303}
1305 1304
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
1306{
1307 if (rnp->boost_kthread_task)
1308 wake_up_process(rnp->boost_kthread_task);
1309}
1310
1306#else /* #ifdef CONFIG_RCU_BOOST */ 1311#else /* #ifdef CONFIG_RCU_BOOST */
1307 1312
1308static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1313static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
@@ -1326,6 +1331,10 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1326 return 0; 1331 return 0;
1327} 1332}
1328 1333
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
1335{
1336}
1337
1329#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1338#endif /* #else #ifdef CONFIG_RCU_BOOST */
1330 1339
1331#ifndef CONFIG_SMP 1340#ifndef CONFIG_SMP