diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:18:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:18:10 -0500 |
commit | 6098850e7e6978f95a958f79a645a653228d0002 (patch) | |
tree | 42e347ddd93cef05099b93157c32b80593572f02 /kernel/rcu/tree.c | |
parent | f08d8bcc12de5a153e587027e77de83662eefb8a (diff) | |
parent | 72bc286b81d21404cdfecddf76b64c7163aac764 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar:
"The main changes in this cycle are:
- Documentation updates
- RCU CPU stall-warning updates
- Torture-test updates
- Miscellaneous fixes
Size wise the biggest updates are to documentation. Excluding
documentation most of the code increase comes from a single commit
which expands debugging"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
srcu: Add parameters to SRCU docbook comments
doc: Rewrite confusing statement about memory barriers
memory-barriers.txt: Fix typo in pairing example
rcu/segcblist: Include rcupdate.h
rcu: Add extended-quiescent-state testing advice
rcu: Suppress lockdep false-positive ->boost_mtx complaints
rcu: Do not include rtmutex_common.h unconditionally
torture: Provide TMPDIR environment variable to specify tmpdir
rcutorture: Dump writer stack if stalled
rcutorture: Add interrupt-disable capability to stall-warning tests
rcu: Suppress RCU CPU stall warnings while dumping trace
rcu: Turn off tracing before dumping trace
rcu: Make RCU CPU stall warnings check for irq-disabled CPUs
sched,rcu: Make cond_resched() provide RCU quiescent state
sched: Make resched_cpu() unconditional
irq_work: Map irq_work_on_queue() to irq_work_on() in !SMP
rcu: Create call_rcu_tasks() kthread at boot time
rcu: Fix up pending cbs check in rcu_prepare_for_idle
memory-barriers: Rework multicopy-atomicity section
memory-barriers: Replace uses of "transitive"
...
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 159 |
1 files changed, 127 insertions, 32 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e3650e94ae6..e4fe06d42385 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -534,8 +534,8 @@ module_param(rcu_kick_kthreads, bool, 0644); | |||
534 | * How long the grace period must be before we start recruiting | 534 | * How long the grace period must be before we start recruiting |
535 | * quiescent-state help from rcu_note_context_switch(). | 535 | * quiescent-state help from rcu_note_context_switch(). |
536 | */ | 536 | */ |
537 | static ulong jiffies_till_sched_qs = HZ / 20; | 537 | static ulong jiffies_till_sched_qs = HZ / 10; |
538 | module_param(jiffies_till_sched_qs, ulong, 0644); | 538 | module_param(jiffies_till_sched_qs, ulong, 0444); |
539 | 539 | ||
540 | static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | 540 | static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, |
541 | struct rcu_data *rdp); | 541 | struct rcu_data *rdp); |
@@ -837,6 +837,9 @@ static void rcu_eqs_enter(bool user) | |||
837 | * We crowbar the ->dynticks_nesting field to zero to allow for | 837 | * We crowbar the ->dynticks_nesting field to zero to allow for |
838 | * the possibility of usermode upcalls having messed up our count | 838 | * the possibility of usermode upcalls having messed up our count |
839 | * of interrupt nesting level during the prior busy period. | 839 | * of interrupt nesting level during the prior busy period. |
840 | * | ||
841 | * If you add or remove a call to rcu_idle_enter(), be sure to test with | ||
842 | * CONFIG_RCU_EQS_DEBUG=y. | ||
840 | */ | 843 | */ |
841 | void rcu_idle_enter(void) | 844 | void rcu_idle_enter(void) |
842 | { | 845 | { |
@@ -852,6 +855,9 @@ void rcu_idle_enter(void) | |||
852 | * is permitted between this call and rcu_user_exit(). This way the | 855 | * is permitted between this call and rcu_user_exit(). This way the |
853 | * CPU doesn't need to maintain the tick for RCU maintenance purposes | 856 | * CPU doesn't need to maintain the tick for RCU maintenance purposes |
854 | * when the CPU runs in userspace. | 857 | * when the CPU runs in userspace. |
858 | * | ||
859 | * If you add or remove a call to rcu_user_enter(), be sure to test with | ||
860 | * CONFIG_RCU_EQS_DEBUG=y. | ||
855 | */ | 861 | */ |
856 | void rcu_user_enter(void) | 862 | void rcu_user_enter(void) |
857 | { | 863 | { |
@@ -875,6 +881,9 @@ void rcu_user_enter(void) | |||
875 | * Use things like work queues to work around this limitation. | 881 | * Use things like work queues to work around this limitation. |
876 | * | 882 | * |
877 | * You have been warned. | 883 | * You have been warned. |
884 | * | ||
885 | * If you add or remove a call to rcu_irq_exit(), be sure to test with | ||
886 | * CONFIG_RCU_EQS_DEBUG=y. | ||
878 | */ | 887 | */ |
879 | void rcu_irq_exit(void) | 888 | void rcu_irq_exit(void) |
880 | { | 889 | { |
@@ -899,6 +908,9 @@ void rcu_irq_exit(void) | |||
899 | 908 | ||
900 | /* | 909 | /* |
901 | * Wrapper for rcu_irq_exit() where interrupts are enabled. | 910 | * Wrapper for rcu_irq_exit() where interrupts are enabled. |
911 | * | ||
912 | * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test | ||
913 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
902 | */ | 914 | */ |
903 | void rcu_irq_exit_irqson(void) | 915 | void rcu_irq_exit_irqson(void) |
904 | { | 916 | { |
@@ -971,6 +983,9 @@ static void rcu_eqs_exit(bool user) | |||
971 | * allow for the possibility of usermode upcalls messing up our count | 983 | * allow for the possibility of usermode upcalls messing up our count |
972 | * of interrupt nesting level during the busy period that is just | 984 | * of interrupt nesting level during the busy period that is just |
973 | * now starting. | 985 | * now starting. |
986 | * | ||
987 | * If you add or remove a call to rcu_idle_exit(), be sure to test with | ||
988 | * CONFIG_RCU_EQS_DEBUG=y. | ||
974 | */ | 989 | */ |
975 | void rcu_idle_exit(void) | 990 | void rcu_idle_exit(void) |
976 | { | 991 | { |
@@ -987,6 +1002,9 @@ void rcu_idle_exit(void) | |||
987 | * | 1002 | * |
988 | * Exit RCU idle mode while entering the kernel because it can | 1003 | * Exit RCU idle mode while entering the kernel because it can |
989 | * run a RCU read side critical section anytime. | 1004 | * run a RCU read side critical section anytime. |
1005 | * | ||
1006 | * If you add or remove a call to rcu_user_exit(), be sure to test with | ||
1007 | * CONFIG_RCU_EQS_DEBUG=y. | ||
990 | */ | 1008 | */ |
991 | void rcu_user_exit(void) | 1009 | void rcu_user_exit(void) |
992 | { | 1010 | { |
@@ -1012,6 +1030,9 @@ void rcu_user_exit(void) | |||
1012 | * Use things like work queues to work around this limitation. | 1030 | * Use things like work queues to work around this limitation. |
1013 | * | 1031 | * |
1014 | * You have been warned. | 1032 | * You have been warned. |
1033 | * | ||
1034 | * If you add or remove a call to rcu_irq_enter(), be sure to test with | ||
1035 | * CONFIG_RCU_EQS_DEBUG=y. | ||
1015 | */ | 1036 | */ |
1016 | void rcu_irq_enter(void) | 1037 | void rcu_irq_enter(void) |
1017 | { | 1038 | { |
@@ -1037,6 +1058,9 @@ void rcu_irq_enter(void) | |||
1037 | 1058 | ||
1038 | /* | 1059 | /* |
1039 | * Wrapper for rcu_irq_enter() where interrupts are enabled. | 1060 | * Wrapper for rcu_irq_enter() where interrupts are enabled. |
1061 | * | ||
1062 | * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test | ||
1063 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
1040 | */ | 1064 | */ |
1041 | void rcu_irq_enter_irqson(void) | 1065 | void rcu_irq_enter_irqson(void) |
1042 | { | 1066 | { |
@@ -1055,6 +1079,9 @@ void rcu_irq_enter_irqson(void) | |||
1055 | * that the CPU is active. This implementation permits nested NMIs, as | 1079 | * that the CPU is active. This implementation permits nested NMIs, as |
1056 | * long as the nesting level does not overflow an int. (You will probably | 1080 | * long as the nesting level does not overflow an int. (You will probably |
1057 | * run out of stack space first.) | 1081 | * run out of stack space first.) |
1082 | * | ||
1083 | * If you add or remove a call to rcu_nmi_enter(), be sure to test | ||
1084 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
1058 | */ | 1085 | */ |
1059 | void rcu_nmi_enter(void) | 1086 | void rcu_nmi_enter(void) |
1060 | { | 1087 | { |
@@ -1087,6 +1114,9 @@ void rcu_nmi_enter(void) | |||
1087 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting | 1114 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting |
1088 | * to let the RCU grace-period handling know that the CPU is back to | 1115 | * to let the RCU grace-period handling know that the CPU is back to |
1089 | * being RCU-idle. | 1116 | * being RCU-idle. |
1117 | * | ||
1118 | * If you add or remove a call to rcu_nmi_exit(), be sure to test | ||
1119 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
1090 | */ | 1120 | */ |
1091 | void rcu_nmi_exit(void) | 1121 | void rcu_nmi_exit(void) |
1092 | { | 1122 | { |
@@ -1207,6 +1237,22 @@ static int rcu_is_cpu_rrupt_from_idle(void) | |||
1207 | } | 1237 | } |
1208 | 1238 | ||
1209 | /* | 1239 | /* |
1240 | * We are reporting a quiescent state on behalf of some other CPU, so | ||
1241 | * it is our responsibility to check for and handle potential overflow | ||
1242 | * of the rcu_node ->gpnum counter with respect to the rcu_data counters. | ||
1243 | * After all, the CPU might be in deep idle state, and thus executing no | ||
1244 | * code whatsoever. | ||
1245 | */ | ||
1246 | static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) | ||
1247 | { | ||
1248 | lockdep_assert_held(&rnp->lock); | ||
1249 | if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum)) | ||
1250 | WRITE_ONCE(rdp->gpwrap, true); | ||
1251 | if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum)) | ||
1252 | rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4; | ||
1253 | } | ||
1254 | |||
1255 | /* | ||
1210 | * Snapshot the specified CPU's dynticks counter so that we can later | 1256 | * Snapshot the specified CPU's dynticks counter so that we can later |
1211 | * credit them with an implicit quiescent state. Return 1 if this CPU | 1257 | * credit them with an implicit quiescent state. Return 1 if this CPU |
1212 | * is in dynticks idle mode, which is an extended quiescent state. | 1258 | * is in dynticks idle mode, which is an extended quiescent state. |
@@ -1216,15 +1262,34 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) | |||
1216 | rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); | 1262 | rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); |
1217 | if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { | 1263 | if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { |
1218 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); | 1264 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); |
1219 | if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, | 1265 | rcu_gpnum_ovf(rdp->mynode, rdp); |
1220 | rdp->mynode->gpnum)) | ||
1221 | WRITE_ONCE(rdp->gpwrap, true); | ||
1222 | return 1; | 1266 | return 1; |
1223 | } | 1267 | } |
1224 | return 0; | 1268 | return 0; |
1225 | } | 1269 | } |
1226 | 1270 | ||
1227 | /* | 1271 | /* |
1272 | * Handler for the irq_work request posted when a grace period has | ||
1273 | * gone on for too long, but not yet long enough for an RCU CPU | ||
1274 | * stall warning. Set state appropriately, but just complain if | ||
1275 | * there is unexpected state on entry. | ||
1276 | */ | ||
1277 | static void rcu_iw_handler(struct irq_work *iwp) | ||
1278 | { | ||
1279 | struct rcu_data *rdp; | ||
1280 | struct rcu_node *rnp; | ||
1281 | |||
1282 | rdp = container_of(iwp, struct rcu_data, rcu_iw); | ||
1283 | rnp = rdp->mynode; | ||
1284 | raw_spin_lock_rcu_node(rnp); | ||
1285 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { | ||
1286 | rdp->rcu_iw_gpnum = rnp->gpnum; | ||
1287 | rdp->rcu_iw_pending = false; | ||
1288 | } | ||
1289 | raw_spin_unlock_rcu_node(rnp); | ||
1290 | } | ||
1291 | |||
1292 | /* | ||
1228 | * Return true if the specified CPU has passed through a quiescent | 1293 | * Return true if the specified CPU has passed through a quiescent |
1229 | * state by virtue of being in or having passed through an dynticks | 1294 | * state by virtue of being in or having passed through an dynticks |
1230 | * idle state since the last call to dyntick_save_progress_counter() | 1295 | * idle state since the last call to dyntick_save_progress_counter() |
@@ -1235,8 +1300,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
1235 | unsigned long jtsq; | 1300 | unsigned long jtsq; |
1236 | bool *rnhqp; | 1301 | bool *rnhqp; |
1237 | bool *ruqp; | 1302 | bool *ruqp; |
1238 | unsigned long rjtsc; | 1303 | struct rcu_node *rnp = rdp->mynode; |
1239 | struct rcu_node *rnp; | ||
1240 | 1304 | ||
1241 | /* | 1305 | /* |
1242 | * If the CPU passed through or entered a dynticks idle phase with | 1306 | * If the CPU passed through or entered a dynticks idle phase with |
@@ -1249,34 +1313,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
1249 | if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { | 1313 | if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { |
1250 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); | 1314 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); |
1251 | rdp->dynticks_fqs++; | 1315 | rdp->dynticks_fqs++; |
1316 | rcu_gpnum_ovf(rnp, rdp); | ||
1252 | return 1; | 1317 | return 1; |
1253 | } | 1318 | } |
1254 | 1319 | ||
1255 | /* Compute and saturate jiffies_till_sched_qs. */ | ||
1256 | jtsq = jiffies_till_sched_qs; | ||
1257 | rjtsc = rcu_jiffies_till_stall_check(); | ||
1258 | if (jtsq > rjtsc / 2) { | ||
1259 | WRITE_ONCE(jiffies_till_sched_qs, rjtsc); | ||
1260 | jtsq = rjtsc / 2; | ||
1261 | } else if (jtsq < 1) { | ||
1262 | WRITE_ONCE(jiffies_till_sched_qs, 1); | ||
1263 | jtsq = 1; | ||
1264 | } | ||
1265 | |||
1266 | /* | 1320 | /* |
1267 | * Has this CPU encountered a cond_resched_rcu_qs() since the | 1321 | * Has this CPU encountered a cond_resched_rcu_qs() since the |
1268 | * beginning of the grace period? For this to be the case, | 1322 | * beginning of the grace period? For this to be the case, |
1269 | * the CPU has to have noticed the current grace period. This | 1323 | * the CPU has to have noticed the current grace period. This |
1270 | * might not be the case for nohz_full CPUs looping in the kernel. | 1324 | * might not be the case for nohz_full CPUs looping in the kernel. |
1271 | */ | 1325 | */ |
1272 | rnp = rdp->mynode; | 1326 | jtsq = jiffies_till_sched_qs; |
1273 | ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); | 1327 | ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); |
1274 | if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && | 1328 | if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && |
1275 | READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && | 1329 | READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && |
1276 | READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { | 1330 | READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { |
1277 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); | 1331 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); |
1332 | rcu_gpnum_ovf(rnp, rdp); | ||
1278 | return 1; | 1333 | return 1; |
1279 | } else { | 1334 | } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) { |
1280 | /* Load rcu_qs_ctr before store to rcu_urgent_qs. */ | 1335 | /* Load rcu_qs_ctr before store to rcu_urgent_qs. */ |
1281 | smp_store_release(ruqp, true); | 1336 | smp_store_release(ruqp, true); |
1282 | } | 1337 | } |
@@ -1285,6 +1340,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
1285 | if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) { | 1340 | if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) { |
1286 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); | 1341 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); |
1287 | rdp->offline_fqs++; | 1342 | rdp->offline_fqs++; |
1343 | rcu_gpnum_ovf(rnp, rdp); | ||
1288 | return 1; | 1344 | return 1; |
1289 | } | 1345 | } |
1290 | 1346 | ||
@@ -1304,10 +1360,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
1304 | * updates are only once every few jiffies, the probability of | 1360 | * updates are only once every few jiffies, the probability of |
1305 | * lossage (and thus of slight grace-period extension) is | 1361 | * lossage (and thus of slight grace-period extension) is |
1306 | * quite low. | 1362 | * quite low. |
1307 | * | ||
1308 | * Note that if the jiffies_till_sched_qs boot/sysfs parameter | ||
1309 | * is set too high, we override with half of the RCU CPU stall | ||
1310 | * warning delay. | ||
1311 | */ | 1363 | */ |
1312 | rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); | 1364 | rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); |
1313 | if (!READ_ONCE(*rnhqp) && | 1365 | if (!READ_ONCE(*rnhqp) && |
@@ -1316,15 +1368,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
1316 | WRITE_ONCE(*rnhqp, true); | 1368 | WRITE_ONCE(*rnhqp, true); |
1317 | /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ | 1369 | /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ |
1318 | smp_store_release(ruqp, true); | 1370 | smp_store_release(ruqp, true); |
1319 | rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */ | 1371 | rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */ |
1320 | } | 1372 | } |
1321 | 1373 | ||
1322 | /* | 1374 | /* |
1323 | * If more than halfway to RCU CPU stall-warning time, do | 1375 | * If more than halfway to RCU CPU stall-warning time, do a |
1324 | * a resched_cpu() to try to loosen things up a bit. | 1376 | * resched_cpu() to try to loosen things up a bit. Also check to |
1377 | * see if the CPU is getting hammered with interrupts, but only | ||
1378 | * once per grace period, just to keep the IPIs down to a dull roar. | ||
1325 | */ | 1379 | */ |
1326 | if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) | 1380 | if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) { |
1327 | resched_cpu(rdp->cpu); | 1381 | resched_cpu(rdp->cpu); |
1382 | if (IS_ENABLED(CONFIG_IRQ_WORK) && | ||
1383 | !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum && | ||
1384 | (rnp->ffmask & rdp->grpmask)) { | ||
1385 | init_irq_work(&rdp->rcu_iw, rcu_iw_handler); | ||
1386 | rdp->rcu_iw_pending = true; | ||
1387 | rdp->rcu_iw_gpnum = rnp->gpnum; | ||
1388 | irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); | ||
1389 | } | ||
1390 | } | ||
1328 | 1391 | ||
1329 | return 0; | 1392 | return 0; |
1330 | } | 1393 | } |
@@ -1513,6 +1576,7 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
1513 | { | 1576 | { |
1514 | int cpu; | 1577 | int cpu; |
1515 | unsigned long flags; | 1578 | unsigned long flags; |
1579 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | ||
1516 | struct rcu_node *rnp = rcu_get_root(rsp); | 1580 | struct rcu_node *rnp = rcu_get_root(rsp); |
1517 | long totqlen = 0; | 1581 | long totqlen = 0; |
1518 | 1582 | ||
@@ -1528,7 +1592,9 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
1528 | */ | 1592 | */ |
1529 | pr_err("INFO: %s self-detected stall on CPU", rsp->name); | 1593 | pr_err("INFO: %s self-detected stall on CPU", rsp->name); |
1530 | print_cpu_stall_info_begin(); | 1594 | print_cpu_stall_info_begin(); |
1595 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); | ||
1531 | print_cpu_stall_info(rsp, smp_processor_id()); | 1596 | print_cpu_stall_info(rsp, smp_processor_id()); |
1597 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | ||
1532 | print_cpu_stall_info_end(); | 1598 | print_cpu_stall_info_end(); |
1533 | for_each_possible_cpu(cpu) | 1599 | for_each_possible_cpu(cpu) |
1534 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, | 1600 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, |
@@ -1922,6 +1988,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1922 | rdp->core_needs_qs = need_gp; | 1988 | rdp->core_needs_qs = need_gp; |
1923 | zero_cpu_stall_ticks(rdp); | 1989 | zero_cpu_stall_ticks(rdp); |
1924 | WRITE_ONCE(rdp->gpwrap, false); | 1990 | WRITE_ONCE(rdp->gpwrap, false); |
1991 | rcu_gpnum_ovf(rnp, rdp); | ||
1925 | } | 1992 | } |
1926 | return ret; | 1993 | return ret; |
1927 | } | 1994 | } |
@@ -3702,6 +3769,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3702 | rdp->cpu_no_qs.b.norm = true; | 3769 | rdp->cpu_no_qs.b.norm = true; |
3703 | rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); | 3770 | rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); |
3704 | rdp->core_needs_qs = false; | 3771 | rdp->core_needs_qs = false; |
3772 | rdp->rcu_iw_pending = false; | ||
3773 | rdp->rcu_iw_gpnum = rnp->gpnum - 1; | ||
3705 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); | 3774 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); |
3706 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3775 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
3707 | } | 3776 | } |
@@ -3739,10 +3808,24 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing) | |||
3739 | */ | 3808 | */ |
3740 | int rcutree_online_cpu(unsigned int cpu) | 3809 | int rcutree_online_cpu(unsigned int cpu) |
3741 | { | 3810 | { |
3742 | sync_sched_exp_online_cleanup(cpu); | 3811 | unsigned long flags; |
3743 | rcutree_affinity_setting(cpu, -1); | 3812 | struct rcu_data *rdp; |
3813 | struct rcu_node *rnp; | ||
3814 | struct rcu_state *rsp; | ||
3815 | |||
3816 | for_each_rcu_flavor(rsp) { | ||
3817 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
3818 | rnp = rdp->mynode; | ||
3819 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | ||
3820 | rnp->ffmask |= rdp->grpmask; | ||
3821 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | ||
3822 | } | ||
3744 | if (IS_ENABLED(CONFIG_TREE_SRCU)) | 3823 | if (IS_ENABLED(CONFIG_TREE_SRCU)) |
3745 | srcu_online_cpu(cpu); | 3824 | srcu_online_cpu(cpu); |
3825 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | ||
3826 | return 0; /* Too early in boot for scheduler work. */ | ||
3827 | sync_sched_exp_online_cleanup(cpu); | ||
3828 | rcutree_affinity_setting(cpu, -1); | ||
3746 | return 0; | 3829 | return 0; |
3747 | } | 3830 | } |
3748 | 3831 | ||
@@ -3752,6 +3835,19 @@ int rcutree_online_cpu(unsigned int cpu) | |||
3752 | */ | 3835 | */ |
3753 | int rcutree_offline_cpu(unsigned int cpu) | 3836 | int rcutree_offline_cpu(unsigned int cpu) |
3754 | { | 3837 | { |
3838 | unsigned long flags; | ||
3839 | struct rcu_data *rdp; | ||
3840 | struct rcu_node *rnp; | ||
3841 | struct rcu_state *rsp; | ||
3842 | |||
3843 | for_each_rcu_flavor(rsp) { | ||
3844 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
3845 | rnp = rdp->mynode; | ||
3846 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | ||
3847 | rnp->ffmask &= ~rdp->grpmask; | ||
3848 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | ||
3849 | } | ||
3850 | |||
3755 | rcutree_affinity_setting(cpu, cpu); | 3851 | rcutree_affinity_setting(cpu, cpu); |
3756 | if (IS_ENABLED(CONFIG_TREE_SRCU)) | 3852 | if (IS_ENABLED(CONFIG_TREE_SRCU)) |
3757 | srcu_offline_cpu(cpu); | 3853 | srcu_offline_cpu(cpu); |
@@ -4200,8 +4296,7 @@ void __init rcu_init(void) | |||
4200 | for_each_online_cpu(cpu) { | 4296 | for_each_online_cpu(cpu) { |
4201 | rcutree_prepare_cpu(cpu); | 4297 | rcutree_prepare_cpu(cpu); |
4202 | rcu_cpu_starting(cpu); | 4298 | rcu_cpu_starting(cpu); |
4203 | if (IS_ENABLED(CONFIG_TREE_SRCU)) | 4299 | rcutree_online_cpu(cpu); |
4204 | srcu_online_cpu(cpu); | ||
4205 | } | 4300 | } |
4206 | } | 4301 | } |
4207 | 4302 | ||