aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c26
-rw-r--r--kernel/rcutree_plugin.h15
2 files changed, 36 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7e59ffb3d0ba..ba06207b1dd3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
84 84
85static struct rcu_state *rcu_state; 85static struct rcu_state *rcu_state;
86 86
87/*
88 * The rcu_scheduler_active variable transitions from zero to one just
89 * before the first task is spawned. So when this variable is zero, RCU
90 * can assume that there is but one task, allowing RCU to (for example)
91 * optimized synchronize_sched() to a simple barrier(). When this variable
92 * is one, RCU must actually do all the hard work required to detect real
93 * grace periods. This variable is also used to suppress boot-time false
94 * positives from lockdep-RCU error checking.
95 */
87int rcu_scheduler_active __read_mostly; 96int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 97EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 98
99/*
100 * The rcu_scheduler_fully_active variable transitions from zero to one
101 * during the early_initcall() processing, which is after the scheduler
102 * is capable of creating new tasks. So RCU processing (for example,
103 * creating tasks for RCU priority boosting) must be delayed until after
104 * rcu_scheduler_fully_active transitions from zero to one. We also
105 * currently delay invocation of any RCU callbacks until after this point.
106 *
107 * It might later prove better for people registering RCU callbacks during
108 * early boot to take responsibility for these callbacks, but one step at
109 * a time.
110 */
111static int rcu_scheduler_fully_active __read_mostly;
112
90#ifdef CONFIG_RCU_BOOST 113#ifdef CONFIG_RCU_BOOST
91 114
92/* 115/*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
98DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); 121DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
99DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 122DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
100DEFINE_PER_CPU(char, rcu_cpu_has_work); 123DEFINE_PER_CPU(char, rcu_cpu_has_work);
101static char rcu_kthreads_spawnable;
102 124
103#endif /* #ifdef CONFIG_RCU_BOOST */ 125#endif /* #ifdef CONFIG_RCU_BOOST */
104 126
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1467 */ 1489 */
1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 1490static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{ 1491{
1492 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
1493 return;
1470 if (likely(!rsp->boost)) { 1494 if (likely(!rsp->boost)) {
1471 rcu_do_batch(rsp, rdp); 1495 rcu_do_batch(rsp, rdp);
1472 return; 1496 return;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 14dc7dd00902..75113cb7c4fb 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1532 struct sched_param sp; 1532 struct sched_param sp;
1533 struct task_struct *t; 1533 struct task_struct *t;
1534 1534
1535 if (!rcu_kthreads_spawnable || 1535 if (!rcu_scheduler_fully_active ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL) 1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0; 1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); 1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1639 struct sched_param sp; 1639 struct sched_param sp;
1640 struct task_struct *t; 1640 struct task_struct *t;
1641 1641
1642 if (!rcu_kthreads_spawnable || 1642 if (!rcu_scheduler_fully_active ||
1643 rnp->qsmaskinit == 0) 1643 rnp->qsmaskinit == 0)
1644 return 0; 1644 return 0;
1645 if (rnp->node_kthread_task == NULL) { 1645 if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void)
1665 int cpu; 1665 int cpu;
1666 struct rcu_node *rnp; 1666 struct rcu_node *rnp;
1667 1667
1668 rcu_kthreads_spawnable = 1; 1668 rcu_scheduler_fully_active = 1;
1669 for_each_possible_cpu(cpu) { 1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0; 1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu)) 1671 if (cpu_online(cpu))
@@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1687 struct rcu_node *rnp = rdp->mynode; 1687 struct rcu_node *rnp = rdp->mynode;
1688 1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) { 1690 if (rcu_scheduler_fully_active) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu); 1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL) 1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1726{ 1726{
1727} 1727}
1728 1728
1729static int __init rcu_scheduler_really_started(void)
1730{
1731 rcu_scheduler_fully_active = 1;
1732 return 0;
1733}
1734early_initcall(rcu_scheduler_really_started);
1735
1729static void __cpuinit rcu_prepare_kthreads(int cpu) 1736static void __cpuinit rcu_prepare_kthreads(int cpu)
1730{ 1737{
1731} 1738}