aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5d80925e7fc8..69a5611a7e7c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -41,7 +41,6 @@
41#include <linux/export.h> 41#include <linux/export.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <linux/module.h>
45#include <linux/percpu.h> 44#include <linux/percpu.h>
46#include <linux/notifier.h> 45#include <linux/notifier.h>
47#include <linux/cpu.h> 46#include <linux/cpu.h>
@@ -60,7 +59,6 @@
60#include "tree.h" 59#include "tree.h"
61#include "rcu.h" 60#include "rcu.h"
62 61
63MODULE_ALIAS("rcutree");
64#ifdef MODULE_PARAM_PREFIX 62#ifdef MODULE_PARAM_PREFIX
65#undef MODULE_PARAM_PREFIX 63#undef MODULE_PARAM_PREFIX
66#endif 64#endif
@@ -1848,6 +1846,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1848 struct rcu_data *rdp) 1846 struct rcu_data *rdp)
1849{ 1847{
1850 bool ret; 1848 bool ret;
1849 bool need_gp;
1851 1850
1852 /* Handle the ends of any preceding grace periods first. */ 1851 /* Handle the ends of any preceding grace periods first. */
1853 if (rdp->completed == rnp->completed && 1852 if (rdp->completed == rnp->completed &&
@@ -1874,9 +1873,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1874 */ 1873 */
1875 rdp->gpnum = rnp->gpnum; 1874 rdp->gpnum = rnp->gpnum;
1876 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1875 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1877 rdp->cpu_no_qs.b.norm = true; 1876 need_gp = !!(rnp->qsmask & rdp->grpmask);
1877 rdp->cpu_no_qs.b.norm = need_gp;
1878 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1878 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1879 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1879 rdp->core_needs_qs = need_gp;
1880 zero_cpu_stall_ticks(rdp); 1880 zero_cpu_stall_ticks(rdp);
1881 WRITE_ONCE(rdp->gpwrap, false); 1881 WRITE_ONCE(rdp->gpwrap, false);
1882 } 1882 }
@@ -2344,7 +2344,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2344 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2344 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2345 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2345 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2346 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2346 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2347 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2347 rcu_gp_kthread_wake(rsp);
2348} 2348}
2349 2349
2350/* 2350/*
@@ -2970,7 +2970,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2970 } 2970 }
2971 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2971 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2972 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2972 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2973 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2973 rcu_gp_kthread_wake(rsp);
2974} 2974}
2975 2975
2976/* 2976/*
@@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
3013/* 3013/*
3014 * Do RCU core processing for the current CPU. 3014 * Do RCU core processing for the current CPU.
3015 */ 3015 */
3016static void rcu_process_callbacks(struct softirq_action *unused) 3016static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
3017{ 3017{
3018 struct rcu_state *rsp; 3018 struct rcu_state *rsp;
3019 3019
@@ -3792,8 +3792,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3792 rnp = rdp->mynode; 3792 rnp = rdp->mynode;
3793 mask = rdp->grpmask; 3793 mask = rdp->grpmask;
3794 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3794 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
3795 rnp->qsmaskinitnext |= mask;
3796 rnp->expmaskinitnext |= mask;
3797 if (!rdp->beenonline) 3795 if (!rdp->beenonline)
3798 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); 3796 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
3799 rdp->beenonline = true; /* We have now been online. */ 3797 rdp->beenonline = true; /* We have now been online. */
@@ -3860,6 +3858,32 @@ int rcutree_dead_cpu(unsigned int cpu)
3860 return 0; 3858 return 0;
3861} 3859}
3862 3860
3861/*
3862 * Mark the specified CPU as being online so that subsequent grace periods
3863 * (both expedited and normal) will wait on it. Note that this means that
3864 * incoming CPUs are not allowed to use RCU read-side critical sections
3865 * until this function is called. Failing to observe this restriction
3866 * will result in lockdep splats.
3867 */
3868void rcu_cpu_starting(unsigned int cpu)
3869{
3870 unsigned long flags;
3871 unsigned long mask;
3872 struct rcu_data *rdp;
3873 struct rcu_node *rnp;
3874 struct rcu_state *rsp;
3875
3876 for_each_rcu_flavor(rsp) {
3877 rdp = this_cpu_ptr(rsp->rda);
3878 rnp = rdp->mynode;
3879 mask = rdp->grpmask;
3880 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3881 rnp->qsmaskinitnext |= mask;
3882 rnp->expmaskinitnext |= mask;
3883 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3884 }
3885}
3886
3863#ifdef CONFIG_HOTPLUG_CPU 3887#ifdef CONFIG_HOTPLUG_CPU
3864/* 3888/*
3865 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 3889 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
@@ -4209,8 +4233,10 @@ void __init rcu_init(void)
4209 * or the scheduler are operational. 4233 * or the scheduler are operational.
4210 */ 4234 */
4211 pm_notifier(rcu_pm_notify, 0); 4235 pm_notifier(rcu_pm_notify, 0);
4212 for_each_online_cpu(cpu) 4236 for_each_online_cpu(cpu) {
4213 rcutree_prepare_cpu(cpu); 4237 rcutree_prepare_cpu(cpu);
4238 rcu_cpu_starting(cpu);
4239 }
4214} 4240}
4215 4241
4216#include "tree_exp.h" 4242#include "tree_exp.h"