aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-13 15:00:53 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-16 13:08:01 -0400
commit9386c0b75dda05f535a10ea1abf1817fe292c81c (patch)
tree85bd2cdd5f2f4c11b94230bce3a1ebb220cfc181 /kernel
parentf4aa84ba24872e3a8e59b58bc8533cae95597f2e (diff)
rcu: Rationalize kthread spawning
Currently, RCU spawns kthreads from several different early_initcall() functions. Although this has served RCU well for quite some time, as more kthreads are added a more deterministic approach is required. This commit therefore causes all of RCU's early-boot kthreads to be spawned from a single early_initcall() function. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org> Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c4
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_plugin.h12
3 files changed, 7 insertions, 10 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1b70cb6fbe3c..9be47f43903b 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3489,7 +3489,7 @@ static int rcu_pm_notify(struct notifier_block *self,
3489} 3489}
3490 3490
3491/* 3491/*
3492 * Spawn the kthread that handles this RCU flavor's grace periods. 3492 * Spawn the kthreads that handle each RCU flavor's grace periods.
3493 */ 3493 */
3494static int __init rcu_spawn_gp_kthread(void) 3494static int __init rcu_spawn_gp_kthread(void)
3495{ 3495{
@@ -3498,6 +3498,7 @@ static int __init rcu_spawn_gp_kthread(void)
3498 struct rcu_state *rsp; 3498 struct rcu_state *rsp;
3499 struct task_struct *t; 3499 struct task_struct *t;
3500 3500
3501 rcu_scheduler_fully_active = 1;
3501 for_each_rcu_flavor(rsp) { 3502 for_each_rcu_flavor(rsp) {
3502 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); 3503 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3503 BUG_ON(IS_ERR(t)); 3504 BUG_ON(IS_ERR(t));
@@ -3507,6 +3508,7 @@ static int __init rcu_spawn_gp_kthread(void)
3507 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3508 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3508 rcu_spawn_nocb_kthreads(rsp); 3509 rcu_spawn_nocb_kthreads(rsp);
3509 } 3510 }
3511 rcu_spawn_boost_kthreads();
3510 return 0; 3512 return 0;
3511} 3513}
3512early_initcall(rcu_spawn_gp_kthread); 3514early_initcall(rcu_spawn_gp_kthread);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 6a86eb7bac45..a966092fdfd7 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -572,6 +572,7 @@ static void rcu_preempt_do_callbacks(void);
572static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 572static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
573 struct rcu_node *rnp); 573 struct rcu_node *rnp);
574#endif /* #ifdef CONFIG_RCU_BOOST */ 574#endif /* #ifdef CONFIG_RCU_BOOST */
575static void __init rcu_spawn_boost_kthreads(void);
575static void rcu_prepare_kthreads(int cpu); 576static void rcu_prepare_kthreads(int cpu);
576static void rcu_cleanup_after_idle(int cpu); 577static void rcu_cleanup_after_idle(int cpu);
577static void rcu_prepare_for_idle(int cpu); 578static void rcu_prepare_for_idle(int cpu);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 4c1af96836f6..410c74424d96 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1435,14 +1435,13 @@ static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1435}; 1435};
1436 1436
1437/* 1437/*
1438 * Spawn all kthreads -- called as soon as the scheduler is running. 1438 * Spawn boost kthreads -- called as soon as the scheduler is running.
1439 */ 1439 */
1440static int __init rcu_spawn_kthreads(void) 1440static void __init rcu_spawn_boost_kthreads(void)
1441{ 1441{
1442 struct rcu_node *rnp; 1442 struct rcu_node *rnp;
1443 int cpu; 1443 int cpu;
1444 1444
1445 rcu_scheduler_fully_active = 1;
1446 for_each_possible_cpu(cpu) 1445 for_each_possible_cpu(cpu)
1447 per_cpu(rcu_cpu_has_work, cpu) = 0; 1446 per_cpu(rcu_cpu_has_work, cpu) = 0;
1448 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1447 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
@@ -1452,9 +1451,7 @@ static int __init rcu_spawn_kthreads(void)
1452 rcu_for_each_leaf_node(rcu_state_p, rnp) 1451 rcu_for_each_leaf_node(rcu_state_p, rnp)
1453 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1452 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1454 } 1453 }
1455 return 0;
1456} 1454}
1457early_initcall(rcu_spawn_kthreads);
1458 1455
1459static void rcu_prepare_kthreads(int cpu) 1456static void rcu_prepare_kthreads(int cpu)
1460{ 1457{
@@ -1492,12 +1489,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1492{ 1489{
1493} 1490}
1494 1491
1495static int __init rcu_scheduler_really_started(void) 1492static void __init rcu_spawn_boost_kthreads(void)
1496{ 1493{
1497 rcu_scheduler_fully_active = 1;
1498 return 0;
1499} 1494}
1500early_initcall(rcu_scheduler_really_started);
1501 1495
1502static void rcu_prepare_kthreads(int cpu) 1496static void rcu_prepare_kthreads(int cpu)
1503{ 1497{