aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-01-26 16:45:38 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-18 14:38:17 -0400
commitabb06b99484a9f5af05c7147c289faf835f68e8e (patch)
treeff5a96680bf5e7049ab9a8ee9c687fc88ca59eef /kernel/rcu/tree.c
parent88a4976d0e37c0797ff3e6579a5f91cb7dced90d (diff)
rcu: Pull rcu_sched_qs_mask into rcu_dynticks structure
The rcu_sched_qs_mask variable is yet another isolated per-CPU variable, so this commit pulls it into the pre-existing rcu_dynticks per-CPU structure. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3747277aae67..3a0703035874 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -272,8 +272,6 @@ void rcu_bh_qs(void)
272 } 272 }
273} 273}
274 274
275static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
276
277/* 275/*
278 * Steal a bit from the bottom of ->dynticks for idle entry/exit 276 * Steal a bit from the bottom of ->dynticks for idle entry/exit
279 * control. Initially this is for TLB flushing. 277 * control. Initially this is for TLB flushing.
@@ -464,8 +462,8 @@ static void rcu_momentary_dyntick_idle(void)
464 * Yes, we can lose flag-setting operations. This is OK, because 462 * Yes, we can lose flag-setting operations. This is OK, because
465 * the flag will be set again after some delay. 463 * the flag will be set again after some delay.
466 */ 464 */
467 resched_mask = raw_cpu_read(rcu_sched_qs_mask); 465 resched_mask = raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask);
468 raw_cpu_write(rcu_sched_qs_mask, 0); 466 raw_cpu_write(rcu_dynticks.rcu_sched_qs_mask, 0);
469 467
470 /* Find the flavor that needs a quiescent state. */ 468 /* Find the flavor that needs a quiescent state. */
471 for_each_rcu_flavor(rsp) { 469 for_each_rcu_flavor(rsp) {
@@ -499,7 +497,7 @@ void rcu_note_context_switch(void)
499 trace_rcu_utilization(TPS("Start context switch")); 497 trace_rcu_utilization(TPS("Start context switch"));
500 rcu_sched_qs(); 498 rcu_sched_qs();
501 rcu_preempt_note_context_switch(); 499 rcu_preempt_note_context_switch();
502 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 500 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask)))
503 rcu_momentary_dyntick_idle(); 501 rcu_momentary_dyntick_idle();
504 trace_rcu_utilization(TPS("End context switch")); 502 trace_rcu_utilization(TPS("End context switch"));
505 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 503 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -524,7 +522,7 @@ void rcu_all_qs(void)
524 unsigned long flags; 522 unsigned long flags;
525 523
526 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 524 barrier(); /* Avoid RCU read-side critical sections leaking down. */
527 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) { 525 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask))) {
528 local_irq_save(flags); 526 local_irq_save(flags);
529 rcu_momentary_dyntick_idle(); 527 rcu_momentary_dyntick_idle();
530 local_irq_restore(flags); 528 local_irq_restore(flags);
@@ -1351,7 +1349,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1351 * is set too high, we override with half of the RCU CPU stall 1349 * is set too high, we override with half of the RCU CPU stall
1352 * warning delay. 1350 * warning delay.
1353 */ 1351 */
1354 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu); 1352 rcrmp = &per_cpu(rcu_dynticks.rcu_sched_qs_mask, rdp->cpu);
1355 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) || 1353 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1356 time_after(jiffies, rdp->rsp->jiffies_resched)) { 1354 time_after(jiffies, rdp->rsp->jiffies_resched)) {
1357 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { 1355 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {