aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-08-06 14:31:51 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-09-21 00:16:20 -0400
commit97c668b8e983b722e2ed765b98b05f644aff1b13 (patch)
tree98ff9c8a31e2eb957703a0e547dc87e1c1178640 /kernel/rcu/tree.c
parentbce5fa12aad148e15efd9bc0015dc4898b6e723b (diff)
rcu: Rename qs_pending to core_needs_qs
An upcoming commit needs to invert the sense of the ->passed_quiesce rcu_data structure field, so this commit is taking this opportunity to clarify things a bit by renaming ->qs_pending to ->core_needs_qs. So if !rdp->core_needs_qs, then this CPU need not concern itself with quiescent states, in particular, it need not acquire its leaf rcu_node structure's ->lock to check. Otherwise, it needs to report the next quiescent state. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d2cdcada6fe0..7c158ffc7769 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1746,7 +1746,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1746 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1746 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1747 rdp->passed_quiesce = 0; 1747 rdp->passed_quiesce = 0;
1748 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1748 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1749 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1749 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1750 zero_cpu_stall_ticks(rdp); 1750 zero_cpu_stall_ticks(rdp);
1751 WRITE_ONCE(rdp->gpwrap, false); 1751 WRITE_ONCE(rdp->gpwrap, false);
1752 } 1752 }
@@ -2357,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2357 if ((rnp->qsmask & mask) == 0) { 2357 if ((rnp->qsmask & mask) == 0) {
2358 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2358 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2359 } else { 2359 } else {
2360 rdp->qs_pending = 0; 2360 rdp->core_needs_qs = 0;
2361 2361
2362 /* 2362 /*
2363 * This GP can't end until cpu checks in, so all of our 2363 * This GP can't end until cpu checks in, so all of our
@@ -2388,7 +2388,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2388 * Does this CPU still need to do its part for current grace period? 2388 * Does this CPU still need to do its part for current grace period?
2389 * If no, return and let the other CPUs do their part as well. 2389 * If no, return and let the other CPUs do their part as well.
2390 */ 2390 */
2391 if (!rdp->qs_pending) 2391 if (!rdp->core_needs_qs)
2392 return; 2392 return;
2393 2393
2394 /* 2394 /*
@@ -3828,10 +3828,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3828 3828
3829 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3829 /* Is the RCU core waiting for a quiescent state from this CPU? */
3830 if (rcu_scheduler_fully_active && 3830 if (rcu_scheduler_fully_active &&
3831 rdp->qs_pending && !rdp->passed_quiesce && 3831 rdp->core_needs_qs && !rdp->passed_quiesce &&
3832 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { 3832 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3833 rdp->n_rp_qs_pending++; 3833 rdp->n_rp_core_needs_qs++;
3834 } else if (rdp->qs_pending && 3834 } else if (rdp->core_needs_qs &&
3835 (rdp->passed_quiesce || 3835 (rdp->passed_quiesce ||
3836 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { 3836 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3837 rdp->n_rp_report_qs++; 3837 rdp->n_rp_report_qs++;
@@ -4157,7 +4157,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
4157 rdp->completed = rnp->completed; 4157 rdp->completed = rnp->completed;
4158 rdp->passed_quiesce = false; 4158 rdp->passed_quiesce = false;
4159 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4159 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
4160 rdp->qs_pending = false; 4160 rdp->core_needs_qs = false;
4161 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4161 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
4162 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4162 raw_spin_unlock_irqrestore(&rnp->lock, flags);
4163} 4163}