aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-15 04:00:12 -0400
committerIngo Molnar <mingo@kernel.org>2016-03-15 04:01:06 -0400
commit8bc6782fe20bd2584c73a35c47329c9fd0a8d34c (patch)
treec7fc6f467ee212e4ef442e70843c48fcf3c67c17 /kernel/rcu/tree.c
parente23604edac2a7be6a8808a5d13fac6b9df4eb9a8 (diff)
parent3500efae4410454522697c94c23fc40323c0cee9 (diff)
Merge commit 'fixes.2015.02.23a' into core/rcu
Conflicts: kernel/rcu/tree.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c143
1 files changed, 70 insertions, 73 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9fd5b628a88d..55cea189783f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
109 109
110static struct rcu_state *const rcu_state_p; 110static struct rcu_state *const rcu_state_p;
111static struct rcu_data __percpu *const rcu_data_p;
112LIST_HEAD(rcu_struct_flavors); 111LIST_HEAD(rcu_struct_flavors);
113 112
114/* Dump rcu_node combining tree at boot to verify correct setup. */ 113/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
1083 rcu_sysidle_check_cpu(rdp, isidle, maxj); 1082 rcu_sysidle_check_cpu(rdp, isidle, maxj);
1084 if ((rdp->dynticks_snap & 0x1) == 0) { 1083 if ((rdp->dynticks_snap & 0x1) == 0) {
1085 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1084 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1086 return 1;
1087 } else {
1088 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, 1085 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1089 rdp->mynode->gpnum)) 1086 rdp->mynode->gpnum))
1090 WRITE_ONCE(rdp->gpwrap, true); 1087 WRITE_ONCE(rdp->gpwrap, true);
1091 return 0; 1088 return 1;
1092 } 1089 }
1090 return 0;
1093} 1091}
1094 1092
1095/* 1093/*
@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1173 smp_mb(); /* ->cond_resched_completed before *rcrmp. */ 1171 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
1174 WRITE_ONCE(*rcrmp, 1172 WRITE_ONCE(*rcrmp,
1175 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); 1173 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
1176 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1177 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1178 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1179 /* Time to beat on that CPU again! */
1180 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1181 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1182 } 1174 }
1175 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1183 } 1176 }
1184 1177
1178 /* And if it has been a really long time, kick the CPU as well. */
1179 if (ULONG_CMP_GE(jiffies,
1180 rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
1181 ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
1182 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1183
1185 return 0; 1184 return 0;
1186} 1185}
1187 1186
@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1246 if (rnp->qsmask & (1UL << cpu)) 1245 if (rnp->qsmask & (1UL << cpu))
1247 dump_cpu_task(rnp->grplo + cpu); 1246 dump_cpu_task(rnp->grplo + cpu);
1248 } 1247 }
1249 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1248 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1250 } 1249 }
1251} 1250}
1252 1251
@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1266 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1265 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1267 delta = jiffies - READ_ONCE(rsp->jiffies_stall); 1266 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1268 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1267 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1269 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1270 return; 1269 return;
1271 } 1270 }
1272 WRITE_ONCE(rsp->jiffies_stall, 1271 WRITE_ONCE(rsp->jiffies_stall,
1273 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1272 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1274 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1273 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1275 1274
1276 /* 1275 /*
1277 * OK, time to rat on our buddy... 1276 * OK, time to rat on our buddy...
@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1292 ndetected++; 1291 ndetected++;
1293 } 1292 }
1294 } 1293 }
1295 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1294 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1296 } 1295 }
1297 1296
1298 print_cpu_stall_info_end(); 1297 print_cpu_stall_info_end();
@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
1357 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1356 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1358 WRITE_ONCE(rsp->jiffies_stall, 1357 WRITE_ONCE(rsp->jiffies_stall,
1359 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1358 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1360 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1359 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1361 1360
1362 /* 1361 /*
1363 * Attempt to revive the RCU machinery by forcing a context switch. 1362 * Attempt to revive the RCU machinery by forcing a context switch.
@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1595 } 1594 }
1596unlock_out: 1595unlock_out:
1597 if (rnp != rnp_root) 1596 if (rnp != rnp_root)
1598 raw_spin_unlock(&rnp_root->lock); 1597 raw_spin_unlock_rcu_node(rnp_root);
1599out: 1598out:
1600 if (c_out != NULL) 1599 if (c_out != NULL)
1601 *c_out = c; 1600 *c_out = c;
@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1814 return; 1813 return;
1815 } 1814 }
1816 needwake = __note_gp_changes(rsp, rnp, rdp); 1815 needwake = __note_gp_changes(rsp, rnp, rdp);
1817 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1816 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1818 if (needwake) 1817 if (needwake)
1819 rcu_gp_kthread_wake(rsp); 1818 rcu_gp_kthread_wake(rsp);
1820} 1819}
@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1839 raw_spin_lock_irq_rcu_node(rnp); 1838 raw_spin_lock_irq_rcu_node(rnp);
1840 if (!READ_ONCE(rsp->gp_flags)) { 1839 if (!READ_ONCE(rsp->gp_flags)) {
1841 /* Spurious wakeup, tell caller to go back to sleep. */ 1840 /* Spurious wakeup, tell caller to go back to sleep. */
1842 raw_spin_unlock_irq(&rnp->lock); 1841 raw_spin_unlock_irq_rcu_node(rnp);
1843 return false; 1842 return false;
1844 } 1843 }
1845 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ 1844 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1849 * Grace period already in progress, don't start another. 1848 * Grace period already in progress, don't start another.
1850 * Not supposed to be able to happen. 1849 * Not supposed to be able to happen.
1851 */ 1850 */
1852 raw_spin_unlock_irq(&rnp->lock); 1851 raw_spin_unlock_irq_rcu_node(rnp);
1853 return false; 1852 return false;
1854 } 1853 }
1855 1854
@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1858 /* Record GP times before starting GP, hence smp_store_release(). */ 1857 /* Record GP times before starting GP, hence smp_store_release(). */
1859 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1858 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1860 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1859 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1861 raw_spin_unlock_irq(&rnp->lock); 1860 raw_spin_unlock_irq_rcu_node(rnp);
1862 1861
1863 /* 1862 /*
1864 * Apply per-leaf buffered online and offline operations to the 1863 * Apply per-leaf buffered online and offline operations to the
@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1872 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1871 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1873 !rnp->wait_blkd_tasks) { 1872 !rnp->wait_blkd_tasks) {
1874 /* Nothing to do on this leaf rcu_node structure. */ 1873 /* Nothing to do on this leaf rcu_node structure. */
1875 raw_spin_unlock_irq(&rnp->lock); 1874 raw_spin_unlock_irq_rcu_node(rnp);
1876 continue; 1875 continue;
1877 } 1876 }
1878 1877
@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1906 rcu_cleanup_dead_rnp(rnp); 1905 rcu_cleanup_dead_rnp(rnp);
1907 } 1906 }
1908 1907
1909 raw_spin_unlock_irq(&rnp->lock); 1908 raw_spin_unlock_irq_rcu_node(rnp);
1910 } 1909 }
1911 1910
1912 /* 1911 /*
@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1937 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 1936 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1938 rnp->level, rnp->grplo, 1937 rnp->level, rnp->grplo,
1939 rnp->grphi, rnp->qsmask); 1938 rnp->grphi, rnp->qsmask);
1940 raw_spin_unlock_irq(&rnp->lock); 1939 raw_spin_unlock_irq_rcu_node(rnp);
1941 cond_resched_rcu_qs(); 1940 cond_resched_rcu_qs();
1942 WRITE_ONCE(rsp->gp_activity, jiffies); 1941 WRITE_ONCE(rsp->gp_activity, jiffies);
1943 } 1942 }
@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
1995 raw_spin_lock_irq_rcu_node(rnp); 1994 raw_spin_lock_irq_rcu_node(rnp);
1996 WRITE_ONCE(rsp->gp_flags, 1995 WRITE_ONCE(rsp->gp_flags,
1997 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 1996 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1998 raw_spin_unlock_irq(&rnp->lock); 1997 raw_spin_unlock_irq_rcu_node(rnp);
1999 } 1998 }
2000} 1999}
2001 2000
@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2025 * safe for us to drop the lock in order to mark the grace 2024 * safe for us to drop the lock in order to mark the grace
2026 * period as completed in all of the rcu_node structures. 2025 * period as completed in all of the rcu_node structures.
2027 */ 2026 */
2028 raw_spin_unlock_irq(&rnp->lock); 2027 raw_spin_unlock_irq_rcu_node(rnp);
2029 2028
2030 /* 2029 /*
2031 * Propagate new ->completed value to rcu_node structures so 2030 * Propagate new ->completed value to rcu_node structures so
@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2047 /* smp_mb() provided by prior unlock-lock pair. */ 2046 /* smp_mb() provided by prior unlock-lock pair. */
2048 nocb += rcu_future_gp_cleanup(rsp, rnp); 2047 nocb += rcu_future_gp_cleanup(rsp, rnp);
2049 sq = rcu_nocb_gp_get(rnp); 2048 sq = rcu_nocb_gp_get(rnp);
2050 raw_spin_unlock_irq(&rnp->lock); 2049 raw_spin_unlock_irq_rcu_node(rnp);
2051 rcu_nocb_gp_cleanup(sq); 2050 rcu_nocb_gp_cleanup(sq);
2052 cond_resched_rcu_qs(); 2051 cond_resched_rcu_qs();
2053 WRITE_ONCE(rsp->gp_activity, jiffies); 2052 WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2070 READ_ONCE(rsp->gpnum), 2069 READ_ONCE(rsp->gpnum),
2071 TPS("newreq")); 2070 TPS("newreq"));
2072 } 2071 }
2073 raw_spin_unlock_irq(&rnp->lock); 2072 raw_spin_unlock_irq_rcu_node(rnp);
2074} 2073}
2075 2074
2076/* 2075/*
@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
2236} 2235}
2237 2236
2238/* 2237/*
2239 * Report a full set of quiescent states to the specified rcu_state 2238 * Report a full set of quiescent states to the specified rcu_state data
2240 * data structure. This involves cleaning up after the prior grace 2239 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
2241 * period and letting rcu_start_gp() start up the next grace period 2240 * kthread if another grace period is required. Whether we wake
2242 * if one is needed. Note that the caller must hold rnp->lock, which 2241 * the grace-period kthread or it awakens itself for the next round
2243 * is released before return. 2242 * of quiescent-state forcing, that kthread will clean up after the
2243 * just-completed grace period. Note that the caller must hold rnp->lock,
2244 * which is released before return.
2244 */ 2245 */
2245static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2246static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2246 __releases(rcu_get_root(rsp)->lock) 2247 __releases(rcu_get_root(rsp)->lock)
2247{ 2248{
2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2249 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2250 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2251 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2251 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2252 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2252} 2253}
2253 2254
@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2277 * Our bit has already been cleared, or the 2278 * Our bit has already been cleared, or the
2278 * relevant grace period is already over, so done. 2279 * relevant grace period is already over, so done.
2279 */ 2280 */
2280 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2281 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2281 return; 2282 return;
2282 } 2283 }
2283 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2284 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2289 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2290 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2290 2291
2291 /* Other bits still set at this level, so done. */ 2292 /* Other bits still set at this level, so done. */
2292 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2293 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2293 return; 2294 return;
2294 } 2295 }
2295 mask = rnp->grpmask; 2296 mask = rnp->grpmask;
@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2299 2300
2300 break; 2301 break;
2301 } 2302 }
2302 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2303 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2303 rnp_c = rnp; 2304 rnp_c = rnp;
2304 rnp = rnp->parent; 2305 rnp = rnp->parent;
2305 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2306 raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2331 2332
2332 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2333 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2333 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2334 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2334 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2335 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2335 return; /* Still need more quiescent states! */ 2336 return; /* Still need more quiescent states! */
2336 } 2337 }
2337 2338
@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2348 /* Report up the rest of the hierarchy, tracking current ->gpnum. */ 2349 /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2349 gps = rnp->gpnum; 2350 gps = rnp->gpnum;
2350 mask = rnp->grpmask; 2351 mask = rnp->grpmask;
2351 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2352 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2352 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2353 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2353 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); 2354 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2354} 2355}
2355 2356
2356/* 2357/*
2357 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2358 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2358 * structure. This must be either called from the specified CPU, or 2359 * structure. This must be called from the specified CPU.
2359 * called when the specified CPU is known to be offline (and when it is
2360 * also known that no other CPU is concurrently trying to help the offline
2361 * CPU). The lastcomp argument is used to make sure we are still in the
2362 * grace period of interest. We don't want to end the current grace period
2363 * based on quiescent states detected in an earlier grace period!
2364 */ 2360 */
2365static void 2361static void
2366rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) 2362rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2385 */ 2381 */
2386 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2382 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2387 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 2383 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2388 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2384 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2389 return; 2385 return;
2390 } 2386 }
2391 mask = rdp->grpmask; 2387 mask = rdp->grpmask;
2392 if ((rnp->qsmask & mask) == 0) { 2388 if ((rnp->qsmask & mask) == 0) {
2393 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2389 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2394 } else { 2390 } else {
2395 rdp->core_needs_qs = 0; 2391 rdp->core_needs_qs = false;
2396 2392
2397 /* 2393 /*
2398 * This GP can't end until cpu checks in, so all of our 2394 * This GP can't end until cpu checks in, so all of our
@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2601 rnp->qsmaskinit &= ~mask; 2597 rnp->qsmaskinit &= ~mask;
2602 rnp->qsmask &= ~mask; 2598 rnp->qsmask &= ~mask;
2603 if (rnp->qsmaskinit) { 2599 if (rnp->qsmaskinit) {
2604 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2600 raw_spin_unlock_rcu_node(rnp);
2601 /* irqs remain disabled. */
2605 return; 2602 return;
2606 } 2603 }
2607 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2604 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2608 } 2605 }
2609} 2606}
2610 2607
@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2627 mask = rdp->grpmask; 2624 mask = rdp->grpmask;
2628 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 2625 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
2629 rnp->qsmaskinitnext &= ~mask; 2626 rnp->qsmaskinitnext &= ~mask;
2630 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2627 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2631} 2628}
2632 2629
2633/* 2630/*
@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2861 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2858 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2862 } else { 2859 } else {
2863 /* Nothing to do here, so just drop the lock. */ 2860 /* Nothing to do here, so just drop the lock. */
2864 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2861 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2865 } 2862 }
2866 } 2863 }
2867} 2864}
@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
2897 raw_spin_unlock(&rnp_old->fqslock); 2894 raw_spin_unlock(&rnp_old->fqslock);
2898 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2895 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2899 rsp->n_force_qs_lh++; 2896 rsp->n_force_qs_lh++;
2900 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2897 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2901 return; /* Someone beat us to it. */ 2898 return; /* Someone beat us to it. */
2902 } 2899 }
2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2900 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2901 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2905 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2902 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2906} 2903}
2907 2904
@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2927 if (cpu_needs_another_gp(rsp, rdp)) { 2924 if (cpu_needs_another_gp(rsp, rdp)) {
2928 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2925 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
2929 needwake = rcu_start_gp(rsp); 2926 needwake = rcu_start_gp(rsp);
2930 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2927 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2931 if (needwake) 2928 if (needwake)
2932 rcu_gp_kthread_wake(rsp); 2929 rcu_gp_kthread_wake(rsp);
2933 } else { 2930 } else {
@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
3018 3015
3019 raw_spin_lock_rcu_node(rnp_root); 3016 raw_spin_lock_rcu_node(rnp_root);
3020 needwake = rcu_start_gp(rsp); 3017 needwake = rcu_start_gp(rsp);
3021 raw_spin_unlock(&rnp_root->lock); 3018 raw_spin_unlock_rcu_node(rnp_root);
3022 if (needwake) 3019 if (needwake)
3023 rcu_gp_kthread_wake(rsp); 3020 rcu_gp_kthread_wake(rsp);
3024 } else { 3021 } else {
@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3438 rcu_for_each_leaf_node(rsp, rnp) { 3435 rcu_for_each_leaf_node(rsp, rnp) {
3439 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3436 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3440 if (rnp->expmaskinit == rnp->expmaskinitnext) { 3437 if (rnp->expmaskinit == rnp->expmaskinitnext) {
3441 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3438 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3442 continue; /* No new CPUs, nothing to do. */ 3439 continue; /* No new CPUs, nothing to do. */
3443 } 3440 }
3444 3441
3445 /* Update this node's mask, track old value for propagation. */ 3442 /* Update this node's mask, track old value for propagation. */
3446 oldmask = rnp->expmaskinit; 3443 oldmask = rnp->expmaskinit;
3447 rnp->expmaskinit = rnp->expmaskinitnext; 3444 rnp->expmaskinit = rnp->expmaskinitnext;
3448 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3445 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3449 3446
3450 /* If was already nonzero, nothing to propagate. */ 3447 /* If was already nonzero, nothing to propagate. */
3451 if (oldmask) 3448 if (oldmask)
@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3460 if (rnp_up->expmaskinit) 3457 if (rnp_up->expmaskinit)
3461 done = true; 3458 done = true;
3462 rnp_up->expmaskinit |= mask; 3459 rnp_up->expmaskinit |= mask;
3463 raw_spin_unlock_irqrestore(&rnp_up->lock, flags); 3460 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
3464 if (done) 3461 if (done)
3465 break; 3462 break;
3466 mask = rnp_up->grpmask; 3463 mask = rnp_up->grpmask;
@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
3483 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3480 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3484 WARN_ON_ONCE(rnp->expmask); 3481 WARN_ON_ONCE(rnp->expmask);
3485 rnp->expmask = rnp->expmaskinit; 3482 rnp->expmask = rnp->expmaskinit;
3486 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3483 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3487 } 3484 }
3488} 3485}
3489 3486
@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3524 if (!rnp->expmask) 3521 if (!rnp->expmask)
3525 rcu_initiate_boost(rnp, flags); 3522 rcu_initiate_boost(rnp, flags);
3526 else 3523 else
3527 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3524 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3528 break; 3525 break;
3529 } 3526 }
3530 if (rnp->parent == NULL) { 3527 if (rnp->parent == NULL) {
3531 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3528 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3532 if (wake) { 3529 if (wake) {
3533 smp_mb(); /* EGP done before wake_up(). */ 3530 smp_mb(); /* EGP done before wake_up(). */
3534 swake_up(&rsp->expedited_wq); 3531 swake_up(&rsp->expedited_wq);
@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3536 break; 3533 break;
3537 } 3534 }
3538 mask = rnp->grpmask; 3535 mask = rnp->grpmask;
3539 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 3536 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
3540 rnp = rnp->parent; 3537 rnp = rnp->parent;
3541 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 3538 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
3542 WARN_ON_ONCE(!(rnp->expmask & mask)); 3539 WARN_ON_ONCE(!(rnp->expmask & mask));
@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
3571 3568
3572 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3569 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3573 if (!(rnp->expmask & mask)) { 3570 if (!(rnp->expmask & mask)) {
3574 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3571 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3575 return; 3572 return;
3576 } 3573 }
3577 rnp->expmask &= ~mask; 3574 rnp->expmask &= ~mask;
@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
3732 */ 3729 */
3733 if (rcu_preempt_has_tasks(rnp)) 3730 if (rcu_preempt_has_tasks(rnp))
3734 rnp->exp_tasks = rnp->blkd_tasks.next; 3731 rnp->exp_tasks = rnp->blkd_tasks.next;
3735 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3732 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3736 3733
3737 /* IPI the remaining CPUs for expedited quiescent state. */ 3734 /* IPI the remaining CPUs for expedited quiescent state. */
3738 mask = 1; 3735 mask = 1;
@@ -3749,7 +3746,7 @@ retry_ipi:
3749 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3746 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3750 if (cpu_online(cpu) && 3747 if (cpu_online(cpu) &&
3751 (rnp->expmask & mask)) { 3748 (rnp->expmask & mask)) {
3752 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3749 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3753 schedule_timeout_uninterruptible(1); 3750 schedule_timeout_uninterruptible(1);
3754 if (cpu_online(cpu) && 3751 if (cpu_online(cpu) &&
3755 (rnp->expmask & mask)) 3752 (rnp->expmask & mask))
@@ -3758,7 +3755,7 @@ retry_ipi:
3758 } 3755 }
3759 if (!(rnp->expmask & mask)) 3756 if (!(rnp->expmask & mask))
3760 mask_ofl_ipi &= ~mask; 3757 mask_ofl_ipi &= ~mask;
3761 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3758 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3762 } 3759 }
3763 /* Report quiescent states for those that went offline. */ 3760 /* Report quiescent states for those that went offline. */
3764 mask_ofl_test |= mask_ofl_ipi; 3761 mask_ofl_test |= mask_ofl_ipi;
@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4165 return; 4162 return;
4166 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4163 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4167 rnp->qsmaskinit |= mask; 4164 rnp->qsmaskinit |= mask;
4168 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ 4165 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4169 } 4166 }
4170} 4167}
4171 4168
@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
4189 rdp->rsp = rsp; 4186 rdp->rsp = rsp;
4190 mutex_init(&rdp->exp_funnel_mutex); 4187 mutex_init(&rdp->exp_funnel_mutex);
4191 rcu_boot_init_nocb_percpu_data(rdp); 4188 rcu_boot_init_nocb_percpu_data(rdp);
4192 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4189 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4193} 4190}
4194 4191
4195/* 4192/*
@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4217 rcu_sysidle_init_percpu_data(rdp->dynticks); 4214 rcu_sysidle_init_percpu_data(rdp->dynticks);
4218 atomic_set(&rdp->dynticks->dynticks, 4215 atomic_set(&rdp->dynticks->dynticks,
4219 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 4216 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
4220 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 4217 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4221 4218
4222 /* 4219 /*
4223 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4220 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4238 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4235 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
4239 rdp->core_needs_qs = false; 4236 rdp->core_needs_qs = false;
4240 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4237 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
4241 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4238 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4242} 4239}
4243 4240
4244static void rcu_prepare_cpu(int cpu) 4241static void rcu_prepare_cpu(int cpu)
@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void)
4360 sp.sched_priority = kthread_prio; 4357 sp.sched_priority = kthread_prio;
4361 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4358 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4362 } 4359 }
4363 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4360 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4364 wake_up_process(t); 4361 wake_up_process(t);
4365 } 4362 }
4366 rcu_spawn_nocb_kthreads(); 4363 rcu_spawn_nocb_kthreads();
@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
4451 cpustride *= levelspread[i]; 4448 cpustride *= levelspread[i];
4452 rnp = rsp->level[i]; 4449 rnp = rsp->level[i];
4453 for (j = 0; j < levelcnt[i]; j++, rnp++) { 4450 for (j = 0; j < levelcnt[i]; j++, rnp++) {
4454 raw_spin_lock_init(&rnp->lock); 4451 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4455 lockdep_set_class_and_name(&rnp->lock, 4452 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4456 &rcu_node_class[i], buf[i]); 4453 &rcu_node_class[i], buf[i]);
4457 raw_spin_lock_init(&rnp->fqslock); 4454 raw_spin_lock_init(&rnp->fqslock);
4458 lockdep_set_class_and_name(&rnp->fqslock, 4455 lockdep_set_class_and_name(&rnp->fqslock,