aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/tree.c14
-rw-r--r--kernel/rcu/tree.h4
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/tree_trace.c4
4 files changed, 12 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d2cdcada6fe0..7c158ffc7769 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1746,7 +1746,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1746 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1746 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1747 rdp->passed_quiesce = 0; 1747 rdp->passed_quiesce = 0;
1748 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1748 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1749 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1749 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1750 zero_cpu_stall_ticks(rdp); 1750 zero_cpu_stall_ticks(rdp);
1751 WRITE_ONCE(rdp->gpwrap, false); 1751 WRITE_ONCE(rdp->gpwrap, false);
1752 } 1752 }
@@ -2357,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2357 if ((rnp->qsmask & mask) == 0) { 2357 if ((rnp->qsmask & mask) == 0) {
2358 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2358 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2359 } else { 2359 } else {
2360 rdp->qs_pending = 0; 2360 rdp->core_needs_qs = 0;
2361 2361
2362 /* 2362 /*
2363 * This GP can't end until cpu checks in, so all of our 2363 * This GP can't end until cpu checks in, so all of our
@@ -2388,7 +2388,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2388 * Does this CPU still need to do its part for current grace period? 2388 * Does this CPU still need to do its part for current grace period?
2389 * If no, return and let the other CPUs do their part as well. 2389 * If no, return and let the other CPUs do their part as well.
2390 */ 2390 */
2391 if (!rdp->qs_pending) 2391 if (!rdp->core_needs_qs)
2392 return; 2392 return;
2393 2393
2394 /* 2394 /*
@@ -3828,10 +3828,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3828 3828
3829 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3829 /* Is the RCU core waiting for a quiescent state from this CPU? */
3830 if (rcu_scheduler_fully_active && 3830 if (rcu_scheduler_fully_active &&
3831 rdp->qs_pending && !rdp->passed_quiesce && 3831 rdp->core_needs_qs && !rdp->passed_quiesce &&
3832 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { 3832 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3833 rdp->n_rp_qs_pending++; 3833 rdp->n_rp_core_needs_qs++;
3834 } else if (rdp->qs_pending && 3834 } else if (rdp->core_needs_qs &&
3835 (rdp->passed_quiesce || 3835 (rdp->passed_quiesce ||
3836 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { 3836 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3837 rdp->n_rp_report_qs++; 3837 rdp->n_rp_report_qs++;
@@ -4157,7 +4157,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4157 rdp->completed = rnp->completed; 4157 rdp->completed = rnp->completed;
4158 rdp->passed_quiesce = false; 4158 rdp->passed_quiesce = false;
4159 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4159 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
4160 rdp->qs_pending = false; 4160 rdp->core_needs_qs = false;
4161 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4161 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
4162 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4162 raw_spin_unlock_irqrestore(&rnp->lock, flags);
4163} 4163}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index efe361c764ab..4a0f30676ba8 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -303,7 +303,7 @@ struct rcu_data {
303 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ 303 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
304 /* for rcu_all_qs() invocations. */ 304 /* for rcu_all_qs() invocations. */
305 bool passed_quiesce; /* User-mode/idle loop etc. */ 305 bool passed_quiesce; /* User-mode/idle loop etc. */
306 bool qs_pending; /* Core waits for quiesc state. */ 306 bool core_needs_qs; /* Core waits for quiesc state. */
307 bool beenonline; /* CPU online at least once. */ 307 bool beenonline; /* CPU online at least once. */
308 bool gpwrap; /* Possible gpnum/completed wrap. */ 308 bool gpwrap; /* Possible gpnum/completed wrap. */
309 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 309 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
@@ -368,7 +368,7 @@ struct rcu_data {
368 368
369 /* 5) __rcu_pending() statistics. */ 369 /* 5) __rcu_pending() statistics. */
370 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 370 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
371 unsigned long n_rp_qs_pending; 371 unsigned long n_rp_core_needs_qs;
372 unsigned long n_rp_report_qs; 372 unsigned long n_rp_report_qs;
373 unsigned long n_rp_cb_ready; 373 unsigned long n_rp_cb_ready;
374 unsigned long n_rp_cpu_needs_gp; 374 unsigned long n_rp_cpu_needs_gp;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6f7500f9387c..e33b4f3b8e0a 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -619,7 +619,7 @@ static void rcu_preempt_check_callbacks(void)
619 return; 619 return;
620 } 620 }
621 if (t->rcu_read_lock_nesting > 0 && 621 if (t->rcu_read_lock_nesting > 0 &&
622 __this_cpu_read(rcu_data_p->qs_pending) && 622 __this_cpu_read(rcu_data_p->core_needs_qs) &&
623 !__this_cpu_read(rcu_data_p->passed_quiesce)) 623 !__this_cpu_read(rcu_data_p->passed_quiesce))
624 t->rcu_read_unlock_special.b.need_qs = true; 624 t->rcu_read_unlock_special.b.need_qs = true;
625} 625}
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 6fc4c5ff3bb5..4ac25f8520d6 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
123 ulong2long(rdp->completed), ulong2long(rdp->gpnum), 123 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
124 rdp->passed_quiesce, 124 rdp->passed_quiesce,
125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), 125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
126 rdp->qs_pending); 126 rdp->core_needs_qs);
127 seq_printf(m, " dt=%d/%llx/%d df=%lu", 127 seq_printf(m, " dt=%d/%llx/%d df=%lu",
128 atomic_read(&rdp->dynticks->dynticks), 128 atomic_read(&rdp->dynticks->dynticks),
129 rdp->dynticks->dynticks_nesting, 129 rdp->dynticks->dynticks_nesting,
@@ -361,7 +361,7 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
361 cpu_is_offline(rdp->cpu) ? '!' : ' ', 361 cpu_is_offline(rdp->cpu) ? '!' : ' ',
362 rdp->n_rcu_pending); 362 rdp->n_rcu_pending);
363 seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ", 363 seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
364 rdp->n_rp_qs_pending, 364 rdp->n_rp_core_needs_qs,
365 rdp->n_rp_report_qs, 365 rdp->n_rp_report_qs,
366 rdp->n_rp_cb_ready, 366 rdp->n_rp_cb_ready,
367 rdp->n_rp_cpu_needs_gp); 367 rdp->n_rp_cpu_needs_gp);