diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-03-27 18:51:25 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-05-28 11:48:19 -0400 |
commit | 1bb336443cde1154600bd147a45a30baa59c57db (patch) | |
tree | e70aecaa5867cf0948a756c3dd5df6900df927cc /kernel/rcu/tree_exp.h | |
parent | eddded80121f2a7bda810f65bf7cb648a709ed11 (diff) |
rcu: Rename rcu_data's ->deferred_qs to ->exp_deferred_qs
The rcu_data structure's ->deferred_qs field is used to indicate that the
current CPU is blocking an expedited grace period (perhaps a future one).
Given that it is used only for expedited grace periods, its current name
is misleading, so this commit renames it to ->exp_deferred_qs.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_exp.h')
-rw-r--r-- | kernel/rcu/tree_exp.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index de1b4acf6979..e0c928d04be5 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, | |||
250 | */ | 250 | */ |
251 | static void rcu_report_exp_rdp(struct rcu_data *rdp) | 251 | static void rcu_report_exp_rdp(struct rcu_data *rdp) |
252 | { | 252 | { |
253 | WRITE_ONCE(rdp->deferred_qs, false); | 253 | WRITE_ONCE(rdp->exp_deferred_qs, false); |
254 | rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); | 254 | rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); |
255 | } | 255 | } |
256 | 256 | ||
@@ -616,7 +616,7 @@ static void rcu_exp_handler(void *unused) | |||
616 | rcu_dynticks_curr_cpu_in_eqs()) { | 616 | rcu_dynticks_curr_cpu_in_eqs()) { |
617 | rcu_report_exp_rdp(rdp); | 617 | rcu_report_exp_rdp(rdp); |
618 | } else { | 618 | } else { |
619 | rdp->deferred_qs = true; | 619 | rdp->exp_deferred_qs = true; |
620 | set_tsk_need_resched(t); | 620 | set_tsk_need_resched(t); |
621 | set_preempt_need_resched(); | 621 | set_preempt_need_resched(); |
622 | } | 622 | } |
@@ -638,7 +638,7 @@ static void rcu_exp_handler(void *unused) | |||
638 | if (t->rcu_read_lock_nesting > 0) { | 638 | if (t->rcu_read_lock_nesting > 0) { |
639 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 639 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
640 | if (rnp->expmask & rdp->grpmask) { | 640 | if (rnp->expmask & rdp->grpmask) { |
641 | rdp->deferred_qs = true; | 641 | rdp->exp_deferred_qs = true; |
642 | t->rcu_read_unlock_special.b.exp_hint = true; | 642 | t->rcu_read_unlock_special.b.exp_hint = true; |
643 | } | 643 | } |
644 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 644 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
@@ -661,7 +661,7 @@ static void rcu_exp_handler(void *unused) | |||
661 | * | 661 | * |
662 | * Otherwise, force a context switch after the CPU enables everything. | 662 | * Otherwise, force a context switch after the CPU enables everything. |
663 | */ | 663 | */ |
664 | rdp->deferred_qs = true; | 664 | rdp->exp_deferred_qs = true; |
665 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || | 665 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || |
666 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { | 666 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { |
667 | rcu_preempt_deferred_qs(t); | 667 | rcu_preempt_deferred_qs(t); |