aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-03-27 18:51:25 -0400
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-05-28 11:48:19 -0400
commit1bb336443cde1154600bd147a45a30baa59c57db (patch)
treee70aecaa5867cf0948a756c3dd5df6900df927cc
parenteddded80121f2a7bda810f65bf7cb648a709ed11 (diff)
rcu: Rename rcu_data's ->deferred_qs to ->exp_deferred_qs
The rcu_data structure's ->deferred_qs field is used to indicate that the current CPU is blocking an expedited grace period (perhaps a future one). Given that it is used only for expedited grace periods, its current name is misleading, so this commit renames it to ->exp_deferred_qs. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r--kernel/rcu/tree.h2
-rw-r--r--kernel/rcu/tree_exp.h8
-rw-r--r--kernel/rcu/tree_plugin.h14
3 files changed, 12 insertions, 12 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 21d740f0b8dc..7acaf3a62d39 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -154,7 +154,7 @@ struct rcu_data {
154 bool core_needs_qs; /* Core waits for quiesc state. */ 154 bool core_needs_qs; /* Core waits for quiesc state. */
155 bool beenonline; /* CPU online at least once. */ 155 bool beenonline; /* CPU online at least once. */
156 bool gpwrap; /* Possible ->gp_seq wrap. */ 156 bool gpwrap; /* Possible ->gp_seq wrap. */
157 bool deferred_qs; /* This CPU awaiting a deferred QS? */ 157 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
158 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 158 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
159 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 159 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
160 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 160 unsigned long ticks_this_gp; /* The number of scheduling-clock */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index de1b4acf6979..e0c928d04be5 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
250 */ 250 */
251static void rcu_report_exp_rdp(struct rcu_data *rdp) 251static void rcu_report_exp_rdp(struct rcu_data *rdp)
252{ 252{
253 WRITE_ONCE(rdp->deferred_qs, false); 253 WRITE_ONCE(rdp->exp_deferred_qs, false);
254 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 254 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
255} 255}
256 256
@@ -616,7 +616,7 @@ static void rcu_exp_handler(void *unused)
616 rcu_dynticks_curr_cpu_in_eqs()) { 616 rcu_dynticks_curr_cpu_in_eqs()) {
617 rcu_report_exp_rdp(rdp); 617 rcu_report_exp_rdp(rdp);
618 } else { 618 } else {
619 rdp->deferred_qs = true; 619 rdp->exp_deferred_qs = true;
620 set_tsk_need_resched(t); 620 set_tsk_need_resched(t);
621 set_preempt_need_resched(); 621 set_preempt_need_resched();
622 } 622 }
@@ -638,7 +638,7 @@ static void rcu_exp_handler(void *unused)
638 if (t->rcu_read_lock_nesting > 0) { 638 if (t->rcu_read_lock_nesting > 0) {
639 raw_spin_lock_irqsave_rcu_node(rnp, flags); 639 raw_spin_lock_irqsave_rcu_node(rnp, flags);
640 if (rnp->expmask & rdp->grpmask) { 640 if (rnp->expmask & rdp->grpmask) {
641 rdp->deferred_qs = true; 641 rdp->exp_deferred_qs = true;
642 t->rcu_read_unlock_special.b.exp_hint = true; 642 t->rcu_read_unlock_special.b.exp_hint = true;
643 } 643 }
644 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 644 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -661,7 +661,7 @@ static void rcu_exp_handler(void *unused)
661 * 661 *
662 * Otherwise, force a context switch after the CPU enables everything. 662 * Otherwise, force a context switch after the CPU enables everything.
663 */ 663 */
664 rdp->deferred_qs = true; 664 rdp->exp_deferred_qs = true;
665 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 665 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
666 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { 666 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
667 rcu_preempt_deferred_qs(t); 667 rcu_preempt_deferred_qs(t);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 58c7853f19e7..1aeb4ae187ce 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -237,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
237 * no need to check for a subsequent expedited GP. (Though we are 237 * no need to check for a subsequent expedited GP. (Though we are
238 * still in a quiescent state in any case.) 238 * still in a quiescent state in any case.)
239 */ 239 */
240 if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) 240 if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
241 rcu_report_exp_rdp(rdp); 241 rcu_report_exp_rdp(rdp);
242 else 242 else
243 WARN_ON_ONCE(rdp->deferred_qs); 243 WARN_ON_ONCE(rdp->exp_deferred_qs);
244} 244}
245 245
246/* 246/*
@@ -337,7 +337,7 @@ void rcu_note_context_switch(bool preempt)
337 * means that we continue to block the current grace period. 337 * means that we continue to block the current grace period.
338 */ 338 */
339 rcu_qs(); 339 rcu_qs();
340 if (rdp->deferred_qs) 340 if (rdp->exp_deferred_qs)
341 rcu_report_exp_rdp(rdp); 341 rcu_report_exp_rdp(rdp);
342 trace_rcu_utilization(TPS("End context switch")); 342 trace_rcu_utilization(TPS("End context switch"));
343 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 343 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -451,7 +451,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
451 */ 451 */
452 special = t->rcu_read_unlock_special; 452 special = t->rcu_read_unlock_special;
453 rdp = this_cpu_ptr(&rcu_data); 453 rdp = this_cpu_ptr(&rcu_data);
454 if (!special.s && !rdp->deferred_qs) { 454 if (!special.s && !rdp->exp_deferred_qs) {
455 local_irq_restore(flags); 455 local_irq_restore(flags);
456 return; 456 return;
457 } 457 }
@@ -459,7 +459,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
459 if (special.b.need_qs) { 459 if (special.b.need_qs) {
460 rcu_qs(); 460 rcu_qs();
461 t->rcu_read_unlock_special.b.need_qs = false; 461 t->rcu_read_unlock_special.b.need_qs = false;
462 if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) { 462 if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
463 local_irq_restore(flags); 463 local_irq_restore(flags);
464 return; 464 return;
465 } 465 }
@@ -471,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
471 * tasks are handled when removing the task from the 471 * tasks are handled when removing the task from the
472 * blocked-tasks list below. 472 * blocked-tasks list below.
473 */ 473 */
474 if (rdp->deferred_qs) { 474 if (rdp->exp_deferred_qs) {
475 rcu_report_exp_rdp(rdp); 475 rcu_report_exp_rdp(rdp);
476 if (!t->rcu_read_unlock_special.s) { 476 if (!t->rcu_read_unlock_special.s) {
477 local_irq_restore(flags); 477 local_irq_restore(flags);
@@ -560,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
560 */ 560 */
561static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 561static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
562{ 562{
563 return (__this_cpu_read(rcu_data.deferred_qs) || 563 return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
564 READ_ONCE(t->rcu_read_unlock_special.s)) && 564 READ_ONCE(t->rcu_read_unlock_special.s)) &&
565 t->rcu_read_lock_nesting <= 0; 565 t->rcu_read_lock_nesting <= 0;
566} 566}