aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-08-14 19:38:46 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:35 -0400
commit284a8c93af47306beed967a303d84730b32bab39 (patch)
tree7744a2f51d5f59b2a7241c09fbc5325da37dc800 /kernel/rcu
parent1d082fd061884a587c490c4fc8a2056ce1e47624 (diff)
rcu: Per-CPU operation cleanups to rcu_*_qs() functions
The rcu_bh_qs(), rcu_preempt_qs(), and rcu_sched_qs() functions use old-style per-CPU variable access and write to ->passed_quiesce even if it is already set. This commit therefore updates to use the new-style per-CPU variable access functions and avoids the spurious writes. This commit also eliminates the "cpu" argument to these functions because they are always invoked on the indicated CPU. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tiny.c10
-rw-r--r--kernel/rcu/tree.c34
-rw-r--r--kernel/rcu/tree_plugin.h27
3 files changed, 38 insertions, 33 deletions
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 717f00854fc0..61b8d2ccc2cb 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -72,7 +72,7 @@ static void rcu_idle_enter_common(long long newval)
72 current->pid, current->comm, 72 current->pid, current->comm,
73 idle->pid, idle->comm); /* must be idle task! */ 73 idle->pid, idle->comm); /* must be idle task! */
74 } 74 }
75 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ 75 rcu_sched_qs(); /* implies rcu_bh_inc() */
76 barrier(); 76 barrier();
77 rcu_dynticks_nesting = newval; 77 rcu_dynticks_nesting = newval;
78} 78}
@@ -217,7 +217,7 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
217 * are at it, given that any rcu quiescent state is also an rcu_bh 217 * are at it, given that any rcu quiescent state is also an rcu_bh
218 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 218 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
219 */ 219 */
220void rcu_sched_qs(int cpu) 220void rcu_sched_qs(void)
221{ 221{
222 unsigned long flags; 222 unsigned long flags;
223 223
@@ -231,7 +231,7 @@ void rcu_sched_qs(int cpu)
231/* 231/*
232 * Record an rcu_bh quiescent state. 232 * Record an rcu_bh quiescent state.
233 */ 233 */
234void rcu_bh_qs(int cpu) 234void rcu_bh_qs(void)
235{ 235{
236 unsigned long flags; 236 unsigned long flags;
237 237
@@ -251,9 +251,9 @@ void rcu_check_callbacks(int cpu, int user)
251{ 251{
252 RCU_TRACE(check_cpu_stalls()); 252 RCU_TRACE(check_cpu_stalls());
253 if (user || rcu_is_cpu_rrupt_from_idle()) 253 if (user || rcu_is_cpu_rrupt_from_idle())
254 rcu_sched_qs(cpu); 254 rcu_sched_qs();
255 else if (!in_softirq()) 255 else if (!in_softirq())
256 rcu_bh_qs(cpu); 256 rcu_bh_qs();
257 if (user) 257 if (user)
258 rcu_note_voluntary_context_switch(current); 258 rcu_note_voluntary_context_switch(current);
259} 259}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c880f5387b1f..4c340625ffd4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -188,22 +188,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
188 * one since the start of the grace period, this just sets a flag. 188 * one since the start of the grace period, this just sets a flag.
189 * The caller must have disabled preemption. 189 * The caller must have disabled preemption.
190 */ 190 */
191void rcu_sched_qs(int cpu) 191void rcu_sched_qs(void)
192{ 192{
193 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 193 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
194 194 trace_rcu_grace_period(TPS("rcu_sched"),
195 if (rdp->passed_quiesce == 0) 195 __this_cpu_read(rcu_sched_data.gpnum),
196 trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs")); 196 TPS("cpuqs"));
197 rdp->passed_quiesce = 1; 197 __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
198 }
198} 199}
199 200
200void rcu_bh_qs(int cpu) 201void rcu_bh_qs(void)
201{ 202{
202 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 203 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
203 204 trace_rcu_grace_period(TPS("rcu_bh"),
204 if (rdp->passed_quiesce == 0) 205 __this_cpu_read(rcu_bh_data.gpnum),
205 trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); 206 TPS("cpuqs"));
206 rdp->passed_quiesce = 1; 207 __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
208 }
207} 209}
208 210
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask); 211static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -278,7 +280,7 @@ static void rcu_momentary_dyntick_idle(void)
278void rcu_note_context_switch(int cpu) 280void rcu_note_context_switch(int cpu)
279{ 281{
280 trace_rcu_utilization(TPS("Start context switch")); 282 trace_rcu_utilization(TPS("Start context switch"));
281 rcu_sched_qs(cpu); 283 rcu_sched_qs();
282 rcu_preempt_note_context_switch(cpu); 284 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 285 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle(); 286 rcu_momentary_dyntick_idle();
@@ -2395,8 +2397,8 @@ void rcu_check_callbacks(int cpu, int user)
2395 * at least not while the corresponding CPU is online. 2397 * at least not while the corresponding CPU is online.
2396 */ 2398 */
2397 2399
2398 rcu_sched_qs(cpu); 2400 rcu_sched_qs();
2399 rcu_bh_qs(cpu); 2401 rcu_bh_qs();
2400 2402
2401 } else if (!in_softirq()) { 2403 } else if (!in_softirq()) {
2402 2404
@@ -2407,7 +2409,7 @@ void rcu_check_callbacks(int cpu, int user)
2407 * critical section, so note it. 2409 * critical section, so note it.
2408 */ 2410 */
2409 2411
2410 rcu_bh_qs(cpu); 2412 rcu_bh_qs();
2411 } 2413 }
2412 rcu_preempt_check_callbacks(cpu); 2414 rcu_preempt_check_callbacks(cpu);
2413 if (rcu_pending(cpu)) 2415 if (rcu_pending(cpu))
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0981c0cd70fe..25e692a36280 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -158,14 +158,16 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
158 * As with the other rcu_*_qs() functions, callers to this function 158 * As with the other rcu_*_qs() functions, callers to this function
159 * must disable preemption. 159 * must disable preemption.
160 */ 160 */
161static void rcu_preempt_qs(int cpu) 161static void rcu_preempt_qs(void)
162{ 162{
163 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 163 if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
164 164 trace_rcu_grace_period(TPS("rcu_preempt"),
165 if (rdp->passed_quiesce == 0) 165 __this_cpu_read(rcu_preempt_data.gpnum),
166 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); 166 TPS("cpuqs"));
167 rdp->passed_quiesce = 1; 167 __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
168 current->rcu_read_unlock_special.b.need_qs = false; 168 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
169 current->rcu_read_unlock_special.b.need_qs = false;
170 }
169} 171}
170 172
171/* 173/*
@@ -256,7 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu)
256 * grace period, then the fact that the task has been enqueued 258 * grace period, then the fact that the task has been enqueued
257 * means that we continue to block the current grace period. 259 * means that we continue to block the current grace period.
258 */ 260 */
259 rcu_preempt_qs(cpu); 261 rcu_preempt_qs();
260} 262}
261 263
262/* 264/*
@@ -352,7 +354,7 @@ void rcu_read_unlock_special(struct task_struct *t)
352 */ 354 */
353 special = t->rcu_read_unlock_special; 355 special = t->rcu_read_unlock_special;
354 if (special.b.need_qs) { 356 if (special.b.need_qs) {
355 rcu_preempt_qs(smp_processor_id()); 357 rcu_preempt_qs();
356 if (!t->rcu_read_unlock_special.s) { 358 if (!t->rcu_read_unlock_special.s) {
357 local_irq_restore(flags); 359 local_irq_restore(flags);
358 return; 360 return;
@@ -651,11 +653,12 @@ static void rcu_preempt_check_callbacks(int cpu)
651 struct task_struct *t = current; 653 struct task_struct *t = current;
652 654
653 if (t->rcu_read_lock_nesting == 0) { 655 if (t->rcu_read_lock_nesting == 0) {
654 rcu_preempt_qs(cpu); 656 rcu_preempt_qs();
655 return; 657 return;
656 } 658 }
657 if (t->rcu_read_lock_nesting > 0 && 659 if (t->rcu_read_lock_nesting > 0 &&
658 per_cpu(rcu_preempt_data, cpu).qs_pending) 660 per_cpu(rcu_preempt_data, cpu).qs_pending &&
661 !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
659 t->rcu_read_unlock_special.b.need_qs = true; 662 t->rcu_read_unlock_special.b.need_qs = true;
660} 663}
661 664