aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-15 04:00:12 -0400
committerIngo Molnar <mingo@kernel.org>2016-03-15 04:01:06 -0400
commit8bc6782fe20bd2584c73a35c47329c9fd0a8d34c (patch)
treec7fc6f467ee212e4ef442e70843c48fcf3c67c17
parente23604edac2a7be6a8808a5d13fac6b9df4eb9a8 (diff)
parent3500efae4410454522697c94c23fc40323c0cee9 (diff)
Merge commit 'fixes.2015.02.23a' into core/rcu
Conflicts: kernel/rcu/tree.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/srcu.h19
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/rcu/rcutorture.c14
-rw-r--r--kernel/rcu/tiny_plugin.h15
-rw-r--r--kernel/rcu/tree.c143
-rw-r--r--kernel/rcu/tree.h42
-rw-r--r--kernel/rcu/tree_plugin.h27
-rwxr-xr-xscripts/checkpatch.pl3
11 files changed, 159 insertions, 128 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a27f4f17c382..b5ff9881bef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -20,12 +20,14 @@
20# define __pmem __attribute__((noderef, address_space(5))) 20# define __pmem __attribute__((noderef, address_space(5)))
21#ifdef CONFIG_SPARSE_RCU_POINTER 21#ifdef CONFIG_SPARSE_RCU_POINTER
22# define __rcu __attribute__((noderef, address_space(4))) 22# define __rcu __attribute__((noderef, address_space(4)))
23#else 23#else /* CONFIG_SPARSE_RCU_POINTER */
24# define __rcu 24# define __rcu
25#endif 25#endif /* CONFIG_SPARSE_RCU_POINTER */
26# define __private __attribute__((noderef))
26extern void __chk_user_ptr(const volatile void __user *); 27extern void __chk_user_ptr(const volatile void __user *);
27extern void __chk_io_ptr(const volatile void __iomem *); 28extern void __chk_io_ptr(const volatile void __iomem *);
28#else 29# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
30#else /* __CHECKER__ */
29# define __user 31# define __user
30# define __kernel 32# define __kernel
31# define __safe 33# define __safe
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
44# define __percpu 46# define __percpu
45# define __rcu 47# define __rcu
46# define __pmem 48# define __pmem
47#endif 49# define __private
50# define ACCESS_PRIVATE(p, member) ((p)->member)
51#endif /* __CHECKER__ */
48 52
49/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 53/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
50#define ___PASTE(a,b) a##b 54#define ___PASTE(a,b) a##b
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c96786248..cd14cd4a22b4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -137,7 +137,7 @@ struct irq_domain;
137 * @msi_desc: MSI descriptor 137 * @msi_desc: MSI descriptor
138 */ 138 */
139struct irq_common_data { 139struct irq_common_data {
140 unsigned int state_use_accessors; 140 unsigned int __private state_use_accessors;
141#ifdef CONFIG_NUMA 141#ifdef CONFIG_NUMA
142 unsigned int node; 142 unsigned int node;
143#endif 143#endif
@@ -208,7 +208,7 @@ enum {
208 IRQD_FORWARDED_TO_VCPU = (1 << 20), 208 IRQD_FORWARDED_TO_VCPU = (1 << 20),
209}; 209};
210 210
211#define __irqd_to_state(d) ((d)->common->state_use_accessors) 211#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
212 212
213static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 213static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
214{ 214{
@@ -299,6 +299,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
299 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; 299 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
300} 300}
301 301
302#undef __irqd_to_state
303
302static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 304static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
303{ 305{
304 return d->hwirq; 306 return d->hwirq;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47ee16f..b5d48bd56e3f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -360,8 +360,6 @@ void rcu_user_exit(void);
360#else 360#else
361static inline void rcu_user_enter(void) { } 361static inline void rcu_user_enter(void) { }
362static inline void rcu_user_exit(void) { } 362static inline void rcu_user_exit(void) { }
363static inline void rcu_user_hooks_switch(struct task_struct *prev,
364 struct task_struct *next) { }
365#endif /* CONFIG_NO_HZ_FULL */ 363#endif /* CONFIG_NO_HZ_FULL */
366 364
367#ifdef CONFIG_RCU_NOCB_CPU 365#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f5f80c5643ac..dc8eb63c6568 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
99 } 99 }
100 100
101/* 101/*
102 * define and init a srcu struct at build time. 102 * Define and initialize a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 *
105 * Note that although DEFINE_STATIC_SRCU() hides the name from other
106 * files, the per-CPU variable rules nevertheless require that the
107 * chosen name be globally unique. These rules also prohibit use of
108 * DEFINE_STATIC_SRCU() within a function. If these rules are too
109 * restrictive, declare the srcu_struct manually. For example, in
110 * each file:
111 *
112 * static struct srcu_struct my_srcu;
113 *
114 * Then, before the first use of each my_srcu, manually initialize it:
115 *
116 * init_srcu_struct(&my_srcu);
117 *
118 * See include/linux/percpu-defs.h for the rules on per-CPU variables.
104 */ 119 */
105#define __DEFINE_SRCU(name, is_static) \ 120#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 121 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index fcab63c66905..3d182932d2d1 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -160,6 +160,8 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
160 __irq_put_desc_unlock(desc, flags, false); 160 __irq_put_desc_unlock(desc, flags, false);
161} 161}
162 162
163#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
164
163/* 165/*
164 * Manipulation functions for irq_data.state 166 * Manipulation functions for irq_data.state
165 */ 167 */
@@ -188,6 +190,8 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
188 return __irqd_to_state(d) & mask; 190 return __irqd_to_state(d) & mask;
189} 191}
190 192
193#undef __irqd_to_state
194
191static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) 195static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
192{ 196{
193 __this_cpu_inc(*desc->kstat_irqs); 197 __this_cpu_inc(*desc->kstat_irqs);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index d2988d047d66..65ae0e5c35da 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -932,12 +932,14 @@ rcu_torture_writer(void *arg)
932 int nsynctypes = 0; 932 int nsynctypes = 0;
933 933
934 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 934 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
935 pr_alert("%s" TORTURE_FLAG 935 if (!can_expedite) {
936 " Grace periods expedited from boot/sysfs for %s,\n", 936 pr_alert("%s" TORTURE_FLAG
937 torture_type, cur_ops->name); 937 " Grace periods expedited from boot/sysfs for %s,\n",
938 pr_alert("%s" TORTURE_FLAG 938 torture_type, cur_ops->name);
939 " Testing of dynamic grace-period expediting diabled.\n", 939 pr_alert("%s" TORTURE_FLAG
940 torture_type); 940 " Disabled dynamic grace-period expediting.\n",
941 torture_type);
942 }
941 943
942 /* Initialize synctype[] array. If none set, take default. */ 944 /* Initialize synctype[] array. If none set, take default. */
943 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 945 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index e492a5253e0f..196f0302e2f4 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <linux/module.h> 26#include <linux/init.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29 29
@@ -122,18 +122,7 @@ free_out:
122 debugfs_remove_recursive(rcudir); 122 debugfs_remove_recursive(rcudir);
123 return 1; 123 return 1;
124} 124}
125 125device_initcall(rcutiny_trace_init);
126static void __exit rcutiny_trace_cleanup(void)
127{
128 debugfs_remove_recursive(rcudir);
129}
130
131module_init(rcutiny_trace_init);
132module_exit(rcutiny_trace_cleanup);
133
134MODULE_AUTHOR("Paul E. McKenney");
135MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
136MODULE_LICENSE("GPL");
137 126
138static void check_cpu_stall(struct rcu_ctrlblk *rcp) 127static void check_cpu_stall(struct rcu_ctrlblk *rcp)
139{ 128{
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9fd5b628a88d..55cea189783f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
109 109
110static struct rcu_state *const rcu_state_p; 110static struct rcu_state *const rcu_state_p;
111static struct rcu_data __percpu *const rcu_data_p;
112LIST_HEAD(rcu_struct_flavors); 111LIST_HEAD(rcu_struct_flavors);
113 112
114/* Dump rcu_node combining tree at boot to verify correct setup. */ 113/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
1083 rcu_sysidle_check_cpu(rdp, isidle, maxj); 1082 rcu_sysidle_check_cpu(rdp, isidle, maxj);
1084 if ((rdp->dynticks_snap & 0x1) == 0) { 1083 if ((rdp->dynticks_snap & 0x1) == 0) {
1085 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1084 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1086 return 1;
1087 } else {
1088 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, 1085 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1089 rdp->mynode->gpnum)) 1086 rdp->mynode->gpnum))
1090 WRITE_ONCE(rdp->gpwrap, true); 1087 WRITE_ONCE(rdp->gpwrap, true);
1091 return 0; 1088 return 1;
1092 } 1089 }
1090 return 0;
1093} 1091}
1094 1092
1095/* 1093/*
@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1173 smp_mb(); /* ->cond_resched_completed before *rcrmp. */ 1171 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
1174 WRITE_ONCE(*rcrmp, 1172 WRITE_ONCE(*rcrmp,
1175 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); 1173 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
1176 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1177 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1178 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1179 /* Time to beat on that CPU again! */
1180 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1181 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1182 } 1174 }
1175 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1183 } 1176 }
1184 1177
1178 /* And if it has been a really long time, kick the CPU as well. */
1179 if (ULONG_CMP_GE(jiffies,
1180 rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
1181 ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
1182 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1183
1185 return 0; 1184 return 0;
1186} 1185}
1187 1186
@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1246 if (rnp->qsmask & (1UL << cpu)) 1245 if (rnp->qsmask & (1UL << cpu))
1247 dump_cpu_task(rnp->grplo + cpu); 1246 dump_cpu_task(rnp->grplo + cpu);
1248 } 1247 }
1249 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1248 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1250 } 1249 }
1251} 1250}
1252 1251
@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1266 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1265 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1267 delta = jiffies - READ_ONCE(rsp->jiffies_stall); 1266 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1268 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1267 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1269 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1270 return; 1269 return;
1271 } 1270 }
1272 WRITE_ONCE(rsp->jiffies_stall, 1271 WRITE_ONCE(rsp->jiffies_stall,
1273 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1272 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1274 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1273 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1275 1274
1276 /* 1275 /*
1277 * OK, time to rat on our buddy... 1276 * OK, time to rat on our buddy...
@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1292 ndetected++; 1291 ndetected++;
1293 } 1292 }
1294 } 1293 }
1295 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1294 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1296 } 1295 }
1297 1296
1298 print_cpu_stall_info_end(); 1297 print_cpu_stall_info_end();
@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
1357 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1356 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1358 WRITE_ONCE(rsp->jiffies_stall, 1357 WRITE_ONCE(rsp->jiffies_stall,
1359 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1358 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1360 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1359 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1361 1360
1362 /* 1361 /*
1363 * Attempt to revive the RCU machinery by forcing a context switch. 1362 * Attempt to revive the RCU machinery by forcing a context switch.
@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1595 } 1594 }
1596unlock_out: 1595unlock_out:
1597 if (rnp != rnp_root) 1596 if (rnp != rnp_root)
1598 raw_spin_unlock(&rnp_root->lock); 1597 raw_spin_unlock_rcu_node(rnp_root);
1599out: 1598out:
1600 if (c_out != NULL) 1599 if (c_out != NULL)
1601 *c_out = c; 1600 *c_out = c;
@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1814 return; 1813 return;
1815 } 1814 }
1816 needwake = __note_gp_changes(rsp, rnp, rdp); 1815 needwake = __note_gp_changes(rsp, rnp, rdp);
1817 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1816 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1818 if (needwake) 1817 if (needwake)
1819 rcu_gp_kthread_wake(rsp); 1818 rcu_gp_kthread_wake(rsp);
1820} 1819}
@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1839 raw_spin_lock_irq_rcu_node(rnp); 1838 raw_spin_lock_irq_rcu_node(rnp);
1840 if (!READ_ONCE(rsp->gp_flags)) { 1839 if (!READ_ONCE(rsp->gp_flags)) {
1841 /* Spurious wakeup, tell caller to go back to sleep. */ 1840 /* Spurious wakeup, tell caller to go back to sleep. */
1842 raw_spin_unlock_irq(&rnp->lock); 1841 raw_spin_unlock_irq_rcu_node(rnp);
1843 return false; 1842 return false;
1844 } 1843 }
1845 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ 1844 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1849 * Grace period already in progress, don't start another. 1848 * Grace period already in progress, don't start another.
1850 * Not supposed to be able to happen. 1849 * Not supposed to be able to happen.
1851 */ 1850 */
1852 raw_spin_unlock_irq(&rnp->lock); 1851 raw_spin_unlock_irq_rcu_node(rnp);
1853 return false; 1852 return false;
1854 } 1853 }
1855 1854
@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1858 /* Record GP times before starting GP, hence smp_store_release(). */ 1857 /* Record GP times before starting GP, hence smp_store_release(). */
1859 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1858 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1860 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1859 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1861 raw_spin_unlock_irq(&rnp->lock); 1860 raw_spin_unlock_irq_rcu_node(rnp);
1862 1861
1863 /* 1862 /*
1864 * Apply per-leaf buffered online and offline operations to the 1863 * Apply per-leaf buffered online and offline operations to the
@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1872 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1871 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1873 !rnp->wait_blkd_tasks) { 1872 !rnp->wait_blkd_tasks) {
1874 /* Nothing to do on this leaf rcu_node structure. */ 1873 /* Nothing to do on this leaf rcu_node structure. */
1875 raw_spin_unlock_irq(&rnp->lock); 1874 raw_spin_unlock_irq_rcu_node(rnp);
1876 continue; 1875 continue;
1877 } 1876 }
1878 1877
@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1906 rcu_cleanup_dead_rnp(rnp); 1905 rcu_cleanup_dead_rnp(rnp);
1907 } 1906 }
1908 1907
1909 raw_spin_unlock_irq(&rnp->lock); 1908 raw_spin_unlock_irq_rcu_node(rnp);
1910 } 1909 }
1911 1910
1912 /* 1911 /*
@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1937 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 1936 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1938 rnp->level, rnp->grplo, 1937 rnp->level, rnp->grplo,
1939 rnp->grphi, rnp->qsmask); 1938 rnp->grphi, rnp->qsmask);
1940 raw_spin_unlock_irq(&rnp->lock); 1939 raw_spin_unlock_irq_rcu_node(rnp);
1941 cond_resched_rcu_qs(); 1940 cond_resched_rcu_qs();
1942 WRITE_ONCE(rsp->gp_activity, jiffies); 1941 WRITE_ONCE(rsp->gp_activity, jiffies);
1943 } 1942 }
@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
1995 raw_spin_lock_irq_rcu_node(rnp); 1994 raw_spin_lock_irq_rcu_node(rnp);
1996 WRITE_ONCE(rsp->gp_flags, 1995 WRITE_ONCE(rsp->gp_flags,
1997 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 1996 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1998 raw_spin_unlock_irq(&rnp->lock); 1997 raw_spin_unlock_irq_rcu_node(rnp);
1999 } 1998 }
2000} 1999}
2001 2000
@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2025 * safe for us to drop the lock in order to mark the grace 2024 * safe for us to drop the lock in order to mark the grace
2026 * period as completed in all of the rcu_node structures. 2025 * period as completed in all of the rcu_node structures.
2027 */ 2026 */
2028 raw_spin_unlock_irq(&rnp->lock); 2027 raw_spin_unlock_irq_rcu_node(rnp);
2029 2028
2030 /* 2029 /*
2031 * Propagate new ->completed value to rcu_node structures so 2030 * Propagate new ->completed value to rcu_node structures so
@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2047 /* smp_mb() provided by prior unlock-lock pair. */ 2046 /* smp_mb() provided by prior unlock-lock pair. */
2048 nocb += rcu_future_gp_cleanup(rsp, rnp); 2047 nocb += rcu_future_gp_cleanup(rsp, rnp);
2049 sq = rcu_nocb_gp_get(rnp); 2048 sq = rcu_nocb_gp_get(rnp);
2050 raw_spin_unlock_irq(&rnp->lock); 2049 raw_spin_unlock_irq_rcu_node(rnp);
2051 rcu_nocb_gp_cleanup(sq); 2050 rcu_nocb_gp_cleanup(sq);
2052 cond_resched_rcu_qs(); 2051 cond_resched_rcu_qs();
2053 WRITE_ONCE(rsp->gp_activity, jiffies); 2052 WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2070 READ_ONCE(rsp->gpnum), 2069 READ_ONCE(rsp->gpnum),
2071 TPS("newreq")); 2070 TPS("newreq"));
2072 } 2071 }
2073 raw_spin_unlock_irq(&rnp->lock); 2072 raw_spin_unlock_irq_rcu_node(rnp);
2074} 2073}
2075 2074
2076/* 2075/*
@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
2236} 2235}
2237 2236
2238/* 2237/*
2239 * Report a full set of quiescent states to the specified rcu_state 2238 * Report a full set of quiescent states to the specified rcu_state data
2240 * data structure. This involves cleaning up after the prior grace 2239 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
2241 * period and letting rcu_start_gp() start up the next grace period 2240 * kthread if another grace period is required. Whether we wake
2242 * if one is needed. Note that the caller must hold rnp->lock, which 2241 * the grace-period kthread or it awakens itself for the next round
2243 * is released before return. 2242 * of quiescent-state forcing, that kthread will clean up after the
2243 * just-completed grace period. Note that the caller must hold rnp->lock,
2244 * which is released before return.
2244 */ 2245 */
2245static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2246static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2246 __releases(rcu_get_root(rsp)->lock) 2247 __releases(rcu_get_root(rsp)->lock)
2247{ 2248{
2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2249 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2250 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2251 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2251 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2252 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2252} 2253}
2253 2254
@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2277 * Our bit has already been cleared, or the 2278 * Our bit has already been cleared, or the
2278 * relevant grace period is already over, so done. 2279 * relevant grace period is already over, so done.
2279 */ 2280 */
2280 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2281 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2281 return; 2282 return;
2282 } 2283 }
2283 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2284 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2289 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2290 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2290 2291
2291 /* Other bits still set at this level, so done. */ 2292 /* Other bits still set at this level, so done. */
2292 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2293 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2293 return; 2294 return;
2294 } 2295 }
2295 mask = rnp->grpmask; 2296 mask = rnp->grpmask;
@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2299 2300
2300 break; 2301 break;
2301 } 2302 }
2302 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2303 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2303 rnp_c = rnp; 2304 rnp_c = rnp;
2304 rnp = rnp->parent; 2305 rnp = rnp->parent;
2305 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2306 raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2331 2332
2332 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2333 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2333 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2334 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2334 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2335 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2335 return; /* Still need more quiescent states! */ 2336 return; /* Still need more quiescent states! */
2336 } 2337 }
2337 2338
@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2348 /* Report up the rest of the hierarchy, tracking current ->gpnum. */ 2349 /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2349 gps = rnp->gpnum; 2350 gps = rnp->gpnum;
2350 mask = rnp->grpmask; 2351 mask = rnp->grpmask;
2351 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2352 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2352 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2353 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2353 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); 2354 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2354} 2355}
2355 2356
2356/* 2357/*
2357 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2358 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2358 * structure. This must be either called from the specified CPU, or 2359 * structure. This must be called from the specified CPU.
2359 * called when the specified CPU is known to be offline (and when it is
2360 * also known that no other CPU is concurrently trying to help the offline
2361 * CPU). The lastcomp argument is used to make sure we are still in the
2362 * grace period of interest. We don't want to end the current grace period
2363 * based on quiescent states detected in an earlier grace period!
2364 */ 2360 */
2365static void 2361static void
2366rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) 2362rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2385 */ 2381 */
2386 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2382 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2387 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 2383 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2388 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2384 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2389 return; 2385 return;
2390 } 2386 }
2391 mask = rdp->grpmask; 2387 mask = rdp->grpmask;
2392 if ((rnp->qsmask & mask) == 0) { 2388 if ((rnp->qsmask & mask) == 0) {
2393 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2389 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2394 } else { 2390 } else {
2395 rdp->core_needs_qs = 0; 2391 rdp->core_needs_qs = false;
2396 2392
2397 /* 2393 /*
2398 * This GP can't end until cpu checks in, so all of our 2394 * This GP can't end until cpu checks in, so all of our
@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2601 rnp->qsmaskinit &= ~mask; 2597 rnp->qsmaskinit &= ~mask;
2602 rnp->qsmask &= ~mask; 2598 rnp->qsmask &= ~mask;
2603 if (rnp->qsmaskinit) { 2599 if (rnp->qsmaskinit) {
2604 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2600 raw_spin_unlock_rcu_node(rnp);
2601 /* irqs remain disabled. */
2605 return; 2602 return;
2606 } 2603 }
2607 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2604 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2608 } 2605 }
2609} 2606}
2610 2607
@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2627 mask = rdp->grpmask; 2624 mask = rdp->grpmask;
2628 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 2625 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
2629 rnp->qsmaskinitnext &= ~mask; 2626 rnp->qsmaskinitnext &= ~mask;
2630 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2627 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2631} 2628}
2632 2629
2633/* 2630/*
@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2861 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2858 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2862 } else { 2859 } else {
2863 /* Nothing to do here, so just drop the lock. */ 2860 /* Nothing to do here, so just drop the lock. */
2864 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2861 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2865 } 2862 }
2866 } 2863 }
2867} 2864}
@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
2897 raw_spin_unlock(&rnp_old->fqslock); 2894 raw_spin_unlock(&rnp_old->fqslock);
2898 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2895 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2899 rsp->n_force_qs_lh++; 2896 rsp->n_force_qs_lh++;
2900 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2897 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2901 return; /* Someone beat us to it. */ 2898 return; /* Someone beat us to it. */
2902 } 2899 }
2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2900 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2901 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2905 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2902 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2906} 2903}
2907 2904
@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2927 if (cpu_needs_another_gp(rsp, rdp)) { 2924 if (cpu_needs_another_gp(rsp, rdp)) {
2928 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2925 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
2929 needwake = rcu_start_gp(rsp); 2926 needwake = rcu_start_gp(rsp);
2930 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2927 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2931 if (needwake) 2928 if (needwake)
2932 rcu_gp_kthread_wake(rsp); 2929 rcu_gp_kthread_wake(rsp);
2933 } else { 2930 } else {
@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
3018 3015
3019 raw_spin_lock_rcu_node(rnp_root); 3016 raw_spin_lock_rcu_node(rnp_root);
3020 needwake = rcu_start_gp(rsp); 3017 needwake = rcu_start_gp(rsp);
3021 raw_spin_unlock(&rnp_root->lock); 3018 raw_spin_unlock_rcu_node(rnp_root);
3022 if (needwake) 3019 if (needwake)
3023 rcu_gp_kthread_wake(rsp); 3020 rcu_gp_kthread_wake(rsp);
3024 } else { 3021 } else {
@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3438 rcu_for_each_leaf_node(rsp, rnp) { 3435 rcu_for_each_leaf_node(rsp, rnp) {
3439 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3436 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3440 if (rnp->expmaskinit == rnp->expmaskinitnext) { 3437 if (rnp->expmaskinit == rnp->expmaskinitnext) {
3441 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3438 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3442 continue; /* No new CPUs, nothing to do. */ 3439 continue; /* No new CPUs, nothing to do. */
3443 } 3440 }
3444 3441
3445 /* Update this node's mask, track old value for propagation. */ 3442 /* Update this node's mask, track old value for propagation. */
3446 oldmask = rnp->expmaskinit; 3443 oldmask = rnp->expmaskinit;
3447 rnp->expmaskinit = rnp->expmaskinitnext; 3444 rnp->expmaskinit = rnp->expmaskinitnext;
3448 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3445 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3449 3446
3450 /* If was already nonzero, nothing to propagate. */ 3447 /* If was already nonzero, nothing to propagate. */
3451 if (oldmask) 3448 if (oldmask)
@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3460 if (rnp_up->expmaskinit) 3457 if (rnp_up->expmaskinit)
3461 done = true; 3458 done = true;
3462 rnp_up->expmaskinit |= mask; 3459 rnp_up->expmaskinit |= mask;
3463 raw_spin_unlock_irqrestore(&rnp_up->lock, flags); 3460 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
3464 if (done) 3461 if (done)
3465 break; 3462 break;
3466 mask = rnp_up->grpmask; 3463 mask = rnp_up->grpmask;
@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
3483 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3480 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3484 WARN_ON_ONCE(rnp->expmask); 3481 WARN_ON_ONCE(rnp->expmask);
3485 rnp->expmask = rnp->expmaskinit; 3482 rnp->expmask = rnp->expmaskinit;
3486 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3483 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3487 } 3484 }
3488} 3485}
3489 3486
@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3524 if (!rnp->expmask) 3521 if (!rnp->expmask)
3525 rcu_initiate_boost(rnp, flags); 3522 rcu_initiate_boost(rnp, flags);
3526 else 3523 else
3527 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3524 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3528 break; 3525 break;
3529 } 3526 }
3530 if (rnp->parent == NULL) { 3527 if (rnp->parent == NULL) {
3531 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3528 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3532 if (wake) { 3529 if (wake) {
3533 smp_mb(); /* EGP done before wake_up(). */ 3530 smp_mb(); /* EGP done before wake_up(). */
3534 swake_up(&rsp->expedited_wq); 3531 swake_up(&rsp->expedited_wq);
@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3536 break; 3533 break;
3537 } 3534 }
3538 mask = rnp->grpmask; 3535 mask = rnp->grpmask;
3539 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 3536 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
3540 rnp = rnp->parent; 3537 rnp = rnp->parent;
3541 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 3538 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
3542 WARN_ON_ONCE(!(rnp->expmask & mask)); 3539 WARN_ON_ONCE(!(rnp->expmask & mask));
@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
3571 3568
3572 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3569 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3573 if (!(rnp->expmask & mask)) { 3570 if (!(rnp->expmask & mask)) {
3574 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3571 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3575 return; 3572 return;
3576 } 3573 }
3577 rnp->expmask &= ~mask; 3574 rnp->expmask &= ~mask;
@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
3732 */ 3729 */
3733 if (rcu_preempt_has_tasks(rnp)) 3730 if (rcu_preempt_has_tasks(rnp))
3734 rnp->exp_tasks = rnp->blkd_tasks.next; 3731 rnp->exp_tasks = rnp->blkd_tasks.next;
3735 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3732 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3736 3733
3737 /* IPI the remaining CPUs for expedited quiescent state. */ 3734 /* IPI the remaining CPUs for expedited quiescent state. */
3738 mask = 1; 3735 mask = 1;
@@ -3749,7 +3746,7 @@ retry_ipi:
3749 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3746 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3750 if (cpu_online(cpu) && 3747 if (cpu_online(cpu) &&
3751 (rnp->expmask & mask)) { 3748 (rnp->expmask & mask)) {
3752 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3749 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3753 schedule_timeout_uninterruptible(1); 3750 schedule_timeout_uninterruptible(1);
3754 if (cpu_online(cpu) && 3751 if (cpu_online(cpu) &&
3755 (rnp->expmask & mask)) 3752 (rnp->expmask & mask))
@@ -3758,7 +3755,7 @@ retry_ipi:
3758 } 3755 }
3759 if (!(rnp->expmask & mask)) 3756 if (!(rnp->expmask & mask))
3760 mask_ofl_ipi &= ~mask; 3757 mask_ofl_ipi &= ~mask;
3761 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3758 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3762 } 3759 }
3763 /* Report quiescent states for those that went offline. */ 3760 /* Report quiescent states for those that went offline. */
3764 mask_ofl_test |= mask_ofl_ipi; 3761 mask_ofl_test |= mask_ofl_ipi;
@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4165 return; 4162 return;
4166 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4163 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4167 rnp->qsmaskinit |= mask; 4164 rnp->qsmaskinit |= mask;
4168 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ 4165 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4169 } 4166 }
4170} 4167}
4171 4168
@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
4189 rdp->rsp = rsp; 4186 rdp->rsp = rsp;
4190 mutex_init(&rdp->exp_funnel_mutex); 4187 mutex_init(&rdp->exp_funnel_mutex);
4191 rcu_boot_init_nocb_percpu_data(rdp); 4188 rcu_boot_init_nocb_percpu_data(rdp);
4192 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4189 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4193} 4190}
4194 4191
4195/* 4192/*
@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4217 rcu_sysidle_init_percpu_data(rdp->dynticks); 4214 rcu_sysidle_init_percpu_data(rdp->dynticks);
4218 atomic_set(&rdp->dynticks->dynticks, 4215 atomic_set(&rdp->dynticks->dynticks,
4219 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 4216 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
4220 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 4217 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4221 4218
4222 /* 4219 /*
4223 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4220 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4238 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4235 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
4239 rdp->core_needs_qs = false; 4236 rdp->core_needs_qs = false;
4240 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4237 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
4241 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4238 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4242} 4239}
4243 4240
4244static void rcu_prepare_cpu(int cpu) 4241static void rcu_prepare_cpu(int cpu)
@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void)
4360 sp.sched_priority = kthread_prio; 4357 sp.sched_priority = kthread_prio;
4361 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4358 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4362 } 4359 }
4363 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4360 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4364 wake_up_process(t); 4361 wake_up_process(t);
4365 } 4362 }
4366 rcu_spawn_nocb_kthreads(); 4363 rcu_spawn_nocb_kthreads();
@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
4451 cpustride *= levelspread[i]; 4448 cpustride *= levelspread[i];
4452 rnp = rsp->level[i]; 4449 rnp = rsp->level[i];
4453 for (j = 0; j < levelcnt[i]; j++, rnp++) { 4450 for (j = 0; j < levelcnt[i]; j++, rnp++) {
4454 raw_spin_lock_init(&rnp->lock); 4451 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4455 lockdep_set_class_and_name(&rnp->lock, 4452 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4456 &rcu_node_class[i], buf[i]); 4453 &rcu_node_class[i], buf[i]);
4457 raw_spin_lock_init(&rnp->fqslock); 4454 raw_spin_lock_init(&rnp->fqslock);
4458 lockdep_set_class_and_name(&rnp->fqslock, 4455 lockdep_set_class_and_name(&rnp->fqslock,
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bbd235d0e71f..df668c0f9e64 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -150,8 +150,9 @@ struct rcu_dynticks {
150 * Definition for node within the RCU grace-period-detection hierarchy. 150 * Definition for node within the RCU grace-period-detection hierarchy.
151 */ 151 */
152struct rcu_node { 152struct rcu_node {
153 raw_spinlock_t lock; /* Root rcu_node's lock protects some */ 153 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
154 /* rcu_state fields as well as following. */ 154 /* some rcu_state fields as well as */
155 /* following. */
155 unsigned long gpnum; /* Current grace period for this node. */ 156 unsigned long gpnum; /* Current grace period for this node. */
156 /* This will either be equal to or one */ 157 /* This will either be equal to or one */
157 /* behind the root rcu_node's gpnum. */ 158 /* behind the root rcu_node's gpnum. */
@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
682#endif /* #else #ifdef CONFIG_PPC */ 683#endif /* #else #ifdef CONFIG_PPC */
683 684
684/* 685/*
685 * Wrappers for the rcu_node::lock acquire. 686 * Wrappers for the rcu_node::lock acquire and release.
686 * 687 *
687 * Because the rcu_nodes form a tree, the tree traversal locking will observe 688 * Because the rcu_nodes form a tree, the tree traversal locking will observe
688 * different lock values, this in turn means that an UNLOCK of one level 689 * different lock values, this in turn means that an UNLOCK of one level
@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
691 * 692 *
692 * In order to restore full ordering between tree levels, augment the regular 693 * In order to restore full ordering between tree levels, augment the regular
693 * lock acquire functions with smp_mb__after_unlock_lock(). 694 * lock acquire functions with smp_mb__after_unlock_lock().
695 *
696 * As ->lock of struct rcu_node is a __private field, therefore one should use
697 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
694 */ 698 */
695static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) 699static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
696{ 700{
697 raw_spin_lock(&rnp->lock); 701 raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
698 smp_mb__after_unlock_lock(); 702 smp_mb__after_unlock_lock();
699} 703}
700 704
705static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
706{
707 raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
708}
709
701static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) 710static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
702{ 711{
703 raw_spin_lock_irq(&rnp->lock); 712 raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
704 smp_mb__after_unlock_lock(); 713 smp_mb__after_unlock_lock();
705} 714}
706 715
707#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ 716static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
708do { \ 717{
709 typecheck(unsigned long, flags); \ 718 raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
710 raw_spin_lock_irqsave(&(rnp)->lock, flags); \ 719}
711 smp_mb__after_unlock_lock(); \ 720
721#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
722do { \
723 typecheck(unsigned long, flags); \
724 raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
725 smp_mb__after_unlock_lock(); \
726} while (0)
727
728#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
729do { \
730 typecheck(unsigned long, flags); \
731 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
712} while (0) 732} while (0)
713 733
714static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) 734static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
715{ 735{
716 bool locked = raw_spin_trylock(&rnp->lock); 736 bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
717 737
718 if (locked) 738 if (locked)
719 smp_mb__after_unlock_lock(); 739 smp_mb__after_unlock_lock();
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 080bd202d360..efdf7b61ce12 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
235 rnp->gp_tasks = &t->rcu_node_entry; 235 rnp->gp_tasks = &t->rcu_node_entry;
236 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 236 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
237 rnp->exp_tasks = &t->rcu_node_entry; 237 rnp->exp_tasks = &t->rcu_node_entry;
238 raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */ 238 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
239 239
240 /* 240 /*
241 * Report the quiescent state for the expedited GP. This expedited 241 * Report the quiescent state for the expedited GP. This expedited
@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
489 !!rnp->gp_tasks); 489 !!rnp->gp_tasks);
490 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); 490 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
491 } else { 491 } else {
492 raw_spin_unlock_irqrestore(&rnp->lock, flags); 492 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
493 } 493 }
494 494
495 /* Unboost if we were boosted. */ 495 /* Unboost if we were boosted. */
@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
518 518
519 raw_spin_lock_irqsave_rcu_node(rnp, flags); 519 raw_spin_lock_irqsave_rcu_node(rnp, flags);
520 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 520 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
521 raw_spin_unlock_irqrestore(&rnp->lock, flags); 521 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
522 return; 522 return;
523 } 523 }
524 t = list_entry(rnp->gp_tasks->prev, 524 t = list_entry(rnp->gp_tasks->prev,
525 struct task_struct, rcu_node_entry); 525 struct task_struct, rcu_node_entry);
526 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 526 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
527 sched_show_task(t); 527 sched_show_task(t);
528 raw_spin_unlock_irqrestore(&rnp->lock, flags); 528 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
529} 529}
530 530
531/* 531/*
@@ -807,7 +807,6 @@ void exit_rcu(void)
807#else /* #ifdef CONFIG_PREEMPT_RCU */ 807#else /* #ifdef CONFIG_PREEMPT_RCU */
808 808
809static struct rcu_state *const rcu_state_p = &rcu_sched_state; 809static struct rcu_state *const rcu_state_p = &rcu_sched_state;
810static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
811 810
812/* 811/*
813 * Tell them what RCU they are running. 812 * Tell them what RCU they are running.
@@ -991,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp)
991 * might exit their RCU read-side critical sections on their own. 990 * might exit their RCU read-side critical sections on their own.
992 */ 991 */
993 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 992 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
994 raw_spin_unlock_irqrestore(&rnp->lock, flags); 993 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
995 return 0; 994 return 0;
996 } 995 }
997 996
@@ -1028,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp)
1028 */ 1027 */
1029 t = container_of(tb, struct task_struct, rcu_node_entry); 1028 t = container_of(tb, struct task_struct, rcu_node_entry);
1030 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1029 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1031 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1030 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1032 /* Lock only for side effect: boosts task t's priority. */ 1031 /* Lock only for side effect: boosts task t's priority. */
1033 rt_mutex_lock(&rnp->boost_mtx); 1032 rt_mutex_lock(&rnp->boost_mtx);
1034 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1033 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
@@ -1088,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1088 1087
1089 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1088 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1090 rnp->n_balk_exp_gp_tasks++; 1089 rnp->n_balk_exp_gp_tasks++;
1091 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1090 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1092 return; 1091 return;
1093 } 1092 }
1094 if (rnp->exp_tasks != NULL || 1093 if (rnp->exp_tasks != NULL ||
@@ -1098,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1098 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1097 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1099 if (rnp->exp_tasks == NULL) 1098 if (rnp->exp_tasks == NULL)
1100 rnp->boost_tasks = rnp->gp_tasks; 1099 rnp->boost_tasks = rnp->gp_tasks;
1101 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1100 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1102 t = rnp->boost_kthread_task; 1101 t = rnp->boost_kthread_task;
1103 if (t) 1102 if (t)
1104 rcu_wake_cond(t, rnp->boost_kthread_status); 1103 rcu_wake_cond(t, rnp->boost_kthread_status);
1105 } else { 1104 } else {
1106 rcu_initiate_boost_trace(rnp); 1105 rcu_initiate_boost_trace(rnp);
1107 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1106 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1108 } 1107 }
1109} 1108}
1110 1109
@@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1172 return PTR_ERR(t); 1171 return PTR_ERR(t);
1173 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1172 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1174 rnp->boost_kthread_task = t; 1173 rnp->boost_kthread_task = t;
1175 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1174 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1176 sp.sched_priority = kthread_prio; 1175 sp.sched_priority = kthread_prio;
1177 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1176 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1178 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1177 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
@@ -1308,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu)
1308static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1307static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1309 __releases(rnp->lock) 1308 __releases(rnp->lock)
1310{ 1309{
1311 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1310 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1312} 1311}
1313 1312
1314static void invoke_rcu_callbacks_kthread(void) 1313static void invoke_rcu_callbacks_kthread(void)
@@ -1559,7 +1558,7 @@ static void rcu_prepare_for_idle(void)
1559 rnp = rdp->mynode; 1558 rnp = rdp->mynode;
1560 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1559 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1561 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 1560 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1562 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1561 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1563 if (needwake) 1562 if (needwake)
1564 rcu_gp_kthread_wake(rsp); 1563 rcu_gp_kthread_wake(rsp);
1565 } 1564 }
@@ -2064,7 +2063,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2064 2063
2065 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2064 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2066 needwake = rcu_start_future_gp(rnp, rdp, &c); 2065 needwake = rcu_start_future_gp(rnp, rdp, &c);
2067 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2066 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2068 if (needwake) 2067 if (needwake)
2069 rcu_gp_kthread_wake(rdp->rsp); 2068 rcu_gp_kthread_wake(rdp->rsp);
2070 2069
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0147c91fa549..874132b26d23 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -269,7 +269,8 @@ our $Sparse = qr{
269 __init_refok| 269 __init_refok|
270 __kprobes| 270 __kprobes|
271 __ref| 271 __ref|
272 __rcu 272 __rcu|
273 __private
273 }x; 274 }x;
274our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)}; 275our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
275our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)}; 276our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};