diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-08-07 23:26:31 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:40 -0400 |
commit | 037067a1b6f9a70f862f3ed9d59fe28b7cd55ac4 (patch) | |
tree | 0f54deff3994de1bdb54707b7943307244ed692f /kernel/rcutree.c | |
parent | 82e78d80fc392ac7e98326bc8beeb8a679913ffd (diff) |
rcu: Prohibit grace periods during early boot
Greater use of RCU during early boot (before the scheduler is operating)
is causing RCU to attempt to start grace periods during that time, which
in turn is resulting in both RCU and the callback functions attempting
to use the scheduler before it is ready.
This commit prevents these problems by prohibiting RCU grace periods
until after the scheduler has spawned the first non-idle task.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0051dbf6958e..9970116163ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -838,8 +838,11 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
838 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 838 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
839 | struct rcu_node *rnp = rcu_get_root(rsp); | 839 | struct rcu_node *rnp = rcu_get_root(rsp); |
840 | 840 | ||
841 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { | 841 | if (!rcu_scheduler_fully_active || |
842 | if (cpu_needs_another_gp(rsp, rdp)) | 842 | !cpu_needs_another_gp(rsp, rdp) || |
843 | rsp->fqs_active) { | ||
844 | if (rcu_scheduler_fully_active && | ||
845 | cpu_needs_another_gp(rsp, rdp)) | ||
843 | rsp->fqs_need_gp = 1; | 846 | rsp->fqs_need_gp = 1; |
844 | if (rnp->completed == rsp->completed) { | 847 | if (rnp->completed == rsp->completed) { |
845 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 848 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |