aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-08-14 19:38:46 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:35 -0400
commit284a8c93af47306beed967a303d84730b32bab39 (patch)
tree7744a2f51d5f59b2a7241c09fbc5325da37dc800 /kernel/rcu/tree.c
parent1d082fd061884a587c490c4fc8a2056ce1e47624 (diff)
rcu: Per-CPU operation cleanups to rcu_*_qs() functions
The rcu_bh_qs(), rcu_preempt_qs(), and rcu_sched_qs() functions use old-style per-CPU variable access and write to ->passed_quiesce even if it is already set. This commit therefore updates to use the new-style per-CPU variable access functions and avoids the spurious writes. This commit also eliminates the "cpu" argument to these functions because they are always invoked on the indicated CPU. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c880f5387b1f..4c340625ffd4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -188,22 +188,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
188 * one since the start of the grace period, this just sets a flag. 188 * one since the start of the grace period, this just sets a flag.
189 * The caller must have disabled preemption. 189 * The caller must have disabled preemption.
190 */ 190 */
191void rcu_sched_qs(int cpu) 191void rcu_sched_qs(void)
192{ 192{
193 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 193 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
194 194 trace_rcu_grace_period(TPS("rcu_sched"),
195 if (rdp->passed_quiesce == 0) 195 __this_cpu_read(rcu_sched_data.gpnum),
196 trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs")); 196 TPS("cpuqs"));
197 rdp->passed_quiesce = 1; 197 __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
198 }
198} 199}
199 200
200void rcu_bh_qs(int cpu) 201void rcu_bh_qs(void)
201{ 202{
202 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 203 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
203 204 trace_rcu_grace_period(TPS("rcu_bh"),
204 if (rdp->passed_quiesce == 0) 205 __this_cpu_read(rcu_bh_data.gpnum),
205 trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); 206 TPS("cpuqs"));
206 rdp->passed_quiesce = 1; 207 __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
208 }
207} 209}
208 210
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask); 211static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -278,7 +280,7 @@ static void rcu_momentary_dyntick_idle(void)
278void rcu_note_context_switch(int cpu) 280void rcu_note_context_switch(int cpu)
279{ 281{
280 trace_rcu_utilization(TPS("Start context switch")); 282 trace_rcu_utilization(TPS("Start context switch"));
281 rcu_sched_qs(cpu); 283 rcu_sched_qs();
282 rcu_preempt_note_context_switch(cpu); 284 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 285 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle(); 286 rcu_momentary_dyntick_idle();
@@ -2395,8 +2397,8 @@ void rcu_check_callbacks(int cpu, int user)
2395 * at least not while the corresponding CPU is online. 2397 * at least not while the corresponding CPU is online.
2396 */ 2398 */
2397 2399
2398 rcu_sched_qs(cpu); 2400 rcu_sched_qs();
2399 rcu_bh_qs(cpu); 2401 rcu_bh_qs();
2400 2402
2401 } else if (!in_softirq()) { 2403 } else if (!in_softirq()) {
2402 2404
@@ -2407,7 +2409,7 @@ void rcu_check_callbacks(int cpu, int user)
2407 * critical section, so note it. 2409 * critical section, so note it.
2408 */ 2410 */
2409 2411
2410 rcu_bh_qs(cpu); 2412 rcu_bh_qs();
2411 } 2413 }
2412 rcu_preempt_check_callbacks(cpu); 2414 rcu_preempt_check_callbacks(cpu);
2413 if (rcu_pending(cpu)) 2415 if (rcu_pending(cpu))