summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-06-30 16:58:26 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-08-22 12:36:57 -0400
commit7ec99de36f402618ae44147ac7fa9a07e4757a5f (patch)
tree47d88c0cca429331ec1d20e89435fa5fefda972c
parent29b4817d4018df78086157ea3a55c1d9424a7cfc (diff)
rcu: Provide exact CPU-online tracking for RCU
Up to now, RCU has assumed that the CPU-online process makes it from CPU_UP_PREPARE to set_cpu_online() within one jiffy. Given the recent rise of virtualized environments, this assumption is very clearly obsolete. Failing to meet this deadline can result in RCU paying attention to an incoming CPU for one jiffy, then ignoring it until the grace period following the one in which that CPU sets itself online. This situation might prove to be fatally disappointing to any RCU read-side critical sections that had the misfortune to execute during the time in which RCU was ignoring the slow-to-come-online CPU. This commit therefore updates RCU's internal CPU state-tracking information at notify_cpu_starting() time, thus providing RCU with an exact transition of the CPU's state from offline to online. Note that this means that incoming CPUs must not use RCU read-side critical section (other than those of SRCU) until notify_cpu_starting() time. Note also that the CPU_STARTING notifiers -are- allowed to use RCU read-side critical sections. (Of course, CPU-hotplug notifiers are rapidly becoming obsolete, so you need to act fast!) If a given architecture or CPU family needs to use RCU read-side critical sections earlier, the call to rcu_cpu_starting() from notify_cpu_starting() will need to be architecture-specific, with architectures that need early use being required to hand-place the call to rcu_cpu_starting() at some point preceding the call to notify_cpu_starting(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--kernel/cpu.c1
-rw-r--r--kernel/rcu/tree.c32
3 files changed, 31 insertions, 3 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1aa62e1a761b..321f9ed552a9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -334,6 +334,7 @@ void rcu_sched_qs(void);
334void rcu_bh_qs(void); 334void rcu_bh_qs(void);
335void rcu_check_callbacks(int user); 335void rcu_check_callbacks(int user);
336void rcu_report_dead(unsigned int cpu); 336void rcu_report_dead(unsigned int cpu);
337void rcu_cpu_starting(unsigned int cpu);
337 338
338#ifndef CONFIG_TINY_RCU 339#ifndef CONFIG_TINY_RCU
339void rcu_end_inkernel_boot(void); 340void rcu_end_inkernel_boot(void);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 341bf80f80bd..9482ceb928e0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -889,6 +889,7 @@ void notify_cpu_starting(unsigned int cpu)
889 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 889 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
890 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); 890 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
891 891
892 rcu_cpu_starting(cpu); /* All CPU_STARTING notifiers can use RCU. */
892 while (st->state < target) { 893 while (st->state < target) {
893 struct cpuhp_step *step; 894 struct cpuhp_step *step;
894 895
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5d80925e7fc8..d2973fb85e8c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3792,8 +3792,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3792 rnp = rdp->mynode; 3792 rnp = rdp->mynode;
3793 mask = rdp->grpmask; 3793 mask = rdp->grpmask;
3794 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3794 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
3795 rnp->qsmaskinitnext |= mask;
3796 rnp->expmaskinitnext |= mask;
3797 if (!rdp->beenonline) 3795 if (!rdp->beenonline)
3798 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); 3796 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
3799 rdp->beenonline = true; /* We have now been online. */ 3797 rdp->beenonline = true; /* We have now been online. */
@@ -3860,6 +3858,32 @@ int rcutree_dead_cpu(unsigned int cpu)
3860 return 0; 3858 return 0;
3861} 3859}
3862 3860
3861/*
3862 * Mark the specified CPU as being online so that subsequent grace periods
3863 * (both expedited and normal) will wait on it. Note that this means that
3864 * incoming CPUs are not allowed to use RCU read-side critical sections
3865 * until this function is called. Failing to observe this restriction
3866 * will result in lockdep splats.
3867 */
3868void rcu_cpu_starting(unsigned int cpu)
3869{
3870 unsigned long flags;
3871 unsigned long mask;
3872 struct rcu_data *rdp;
3873 struct rcu_node *rnp;
3874 struct rcu_state *rsp;
3875
3876 for_each_rcu_flavor(rsp) {
3877 rdp = this_cpu_ptr(rsp->rda);
3878 rnp = rdp->mynode;
3879 mask = rdp->grpmask;
3880 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3881 rnp->qsmaskinitnext |= mask;
3882 rnp->expmaskinitnext |= mask;
3883 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3884 }
3885}
3886
3863#ifdef CONFIG_HOTPLUG_CPU 3887#ifdef CONFIG_HOTPLUG_CPU
3864/* 3888/*
3865 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 3889 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
@@ -4209,8 +4233,10 @@ void __init rcu_init(void)
4209 * or the scheduler are operational. 4233 * or the scheduler are operational.
4210 */ 4234 */
4211 pm_notifier(rcu_pm_notify, 0); 4235 pm_notifier(rcu_pm_notify, 0);
4212 for_each_online_cpu(cpu) 4236 for_each_online_cpu(cpu) {
4213 rcutree_prepare_cpu(cpu); 4237 rcutree_prepare_cpu(cpu);
4238 rcu_cpu_starting(cpu);
4239 }
4214} 4240}
4215 4241
4216#include "tree_exp.h" 4242#include "tree_exp.h"