diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-01-26 19:18:07 -0500 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-18 14:38:17 -0400 |
| commit | 9577df9a3122af08fff84b8a1a60dccf524a3891 (patch) | |
| tree | 3c5f01e4cbdef694343d839b696a5ecbeb66472f /kernel/rcu/tree.c | |
| parent | abb06b99484a9f5af05c7147c289faf835f68e8e (diff) | |
rcu: Pull rcu_qs_ctr into rcu_dynticks structure
The rcu_qs_ctr variable is yet another isolated per-CPU variable,
so this commit pulls it into the pre-existing rcu_dynticks per-CPU
structure.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3a0703035874..82a86a67c92a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -436,9 +436,6 @@ bool rcu_eqs_special_set(int cpu) | |||
| 436 | return true; | 436 | return true; |
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); | ||
| 440 | EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); | ||
| 441 | |||
| 442 | /* | 439 | /* |
| 443 | * Let the RCU core know that this CPU has gone through the scheduler, | 440 | * Let the RCU core know that this CPU has gone through the scheduler, |
| 444 | * which is a quiescent state. This is called when the need for a | 441 | * which is a quiescent state. This is called when the need for a |
| @@ -542,7 +539,7 @@ void rcu_all_qs(void) | |||
| 542 | rcu_sched_qs(); | 539 | rcu_sched_qs(); |
| 543 | preempt_enable(); | 540 | preempt_enable(); |
| 544 | } | 541 | } |
| 545 | this_cpu_inc(rcu_qs_ctr); | 542 | this_cpu_inc(rcu_dynticks.rcu_qs_ctr); |
| 546 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ | 543 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ |
| 547 | } | 544 | } |
| 548 | EXPORT_SYMBOL_GPL(rcu_all_qs); | 545 | EXPORT_SYMBOL_GPL(rcu_all_qs); |
| @@ -1315,7 +1312,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
| 1315 | */ | 1312 | */ |
| 1316 | rnp = rdp->mynode; | 1313 | rnp = rdp->mynode; |
| 1317 | if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && | 1314 | if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && |
| 1318 | READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_qs_ctr, rdp->cpu) && | 1315 | READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && |
| 1319 | READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { | 1316 | READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { |
| 1320 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); | 1317 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); |
| 1321 | return 1; | 1318 | return 1; |
| @@ -2024,7 +2021,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
| 2024 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); | 2021 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); |
| 2025 | need_gp = !!(rnp->qsmask & rdp->grpmask); | 2022 | need_gp = !!(rnp->qsmask & rdp->grpmask); |
| 2026 | rdp->cpu_no_qs.b.norm = need_gp; | 2023 | rdp->cpu_no_qs.b.norm = need_gp; |
| 2027 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | 2024 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); |
| 2028 | rdp->core_needs_qs = need_gp; | 2025 | rdp->core_needs_qs = need_gp; |
| 2029 | zero_cpu_stall_ticks(rdp); | 2026 | zero_cpu_stall_ticks(rdp); |
| 2030 | WRITE_ONCE(rdp->gpwrap, false); | 2027 | WRITE_ONCE(rdp->gpwrap, false); |
| @@ -2622,7 +2619,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 2622 | * within the current grace period. | 2619 | * within the current grace period. |
| 2623 | */ | 2620 | */ |
| 2624 | rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ | 2621 | rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ |
| 2625 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | 2622 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); |
| 2626 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 2623 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 2627 | return; | 2624 | return; |
| 2628 | } | 2625 | } |
| @@ -3620,7 +3617,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 3620 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 3617 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
| 3621 | if (rcu_scheduler_fully_active && | 3618 | if (rcu_scheduler_fully_active && |
| 3622 | rdp->core_needs_qs && rdp->cpu_no_qs.b.norm && | 3619 | rdp->core_needs_qs && rdp->cpu_no_qs.b.norm && |
| 3623 | rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { | 3620 | rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) { |
| 3624 | rdp->n_rp_core_needs_qs++; | 3621 | rdp->n_rp_core_needs_qs++; |
| 3625 | } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) { | 3622 | } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) { |
| 3626 | rdp->n_rp_report_qs++; | 3623 | rdp->n_rp_report_qs++; |
| @@ -3933,7 +3930,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 3933 | rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ | 3930 | rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ |
| 3934 | rdp->completed = rnp->completed; | 3931 | rdp->completed = rnp->completed; |
| 3935 | rdp->cpu_no_qs.b.norm = true; | 3932 | rdp->cpu_no_qs.b.norm = true; |
| 3936 | rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); | 3933 | rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); |
| 3937 | rdp->core_needs_qs = false; | 3934 | rdp->core_needs_qs = false; |
| 3938 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); | 3935 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); |
| 3939 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3936 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
