diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 58c7853f19e7..1aeb4ae187ce 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -237,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) | |||
237 | * no need to check for a subsequent expedited GP. (Though we are | 237 | * no need to check for a subsequent expedited GP. (Though we are |
238 | * still in a quiescent state in any case.) | 238 | * still in a quiescent state in any case.) |
239 | */ | 239 | */ |
240 | if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) | 240 | if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) |
241 | rcu_report_exp_rdp(rdp); | 241 | rcu_report_exp_rdp(rdp); |
242 | else | 242 | else |
243 | WARN_ON_ONCE(rdp->deferred_qs); | 243 | WARN_ON_ONCE(rdp->exp_deferred_qs); |
244 | } | 244 | } |
245 | 245 | ||
246 | /* | 246 | /* |
@@ -337,7 +337,7 @@ void rcu_note_context_switch(bool preempt) | |||
337 | * means that we continue to block the current grace period. | 337 | * means that we continue to block the current grace period. |
338 | */ | 338 | */ |
339 | rcu_qs(); | 339 | rcu_qs(); |
340 | if (rdp->deferred_qs) | 340 | if (rdp->exp_deferred_qs) |
341 | rcu_report_exp_rdp(rdp); | 341 | rcu_report_exp_rdp(rdp); |
342 | trace_rcu_utilization(TPS("End context switch")); | 342 | trace_rcu_utilization(TPS("End context switch")); |
343 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ | 343 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ |
@@ -451,7 +451,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) | |||
451 | */ | 451 | */ |
452 | special = t->rcu_read_unlock_special; | 452 | special = t->rcu_read_unlock_special; |
453 | rdp = this_cpu_ptr(&rcu_data); | 453 | rdp = this_cpu_ptr(&rcu_data); |
454 | if (!special.s && !rdp->deferred_qs) { | 454 | if (!special.s && !rdp->exp_deferred_qs) { |
455 | local_irq_restore(flags); | 455 | local_irq_restore(flags); |
456 | return; | 456 | return; |
457 | } | 457 | } |
@@ -459,7 +459,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) | |||
459 | if (special.b.need_qs) { | 459 | if (special.b.need_qs) { |
460 | rcu_qs(); | 460 | rcu_qs(); |
461 | t->rcu_read_unlock_special.b.need_qs = false; | 461 | t->rcu_read_unlock_special.b.need_qs = false; |
462 | if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) { | 462 | if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) { |
463 | local_irq_restore(flags); | 463 | local_irq_restore(flags); |
464 | return; | 464 | return; |
465 | } | 465 | } |
@@ -471,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) | |||
471 | * tasks are handled when removing the task from the | 471 | * tasks are handled when removing the task from the |
472 | * blocked-tasks list below. | 472 | * blocked-tasks list below. |
473 | */ | 473 | */ |
474 | if (rdp->deferred_qs) { | 474 | if (rdp->exp_deferred_qs) { |
475 | rcu_report_exp_rdp(rdp); | 475 | rcu_report_exp_rdp(rdp); |
476 | if (!t->rcu_read_unlock_special.s) { | 476 | if (!t->rcu_read_unlock_special.s) { |
477 | local_irq_restore(flags); | 477 | local_irq_restore(flags); |
@@ -560,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) | |||
560 | */ | 560 | */ |
561 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t) | 561 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
562 | { | 562 | { |
563 | return (__this_cpu_read(rcu_data.deferred_qs) || | 563 | return (__this_cpu_read(rcu_data.exp_deferred_qs) || |
564 | READ_ONCE(t->rcu_read_unlock_special.s)) && | 564 | READ_ONCE(t->rcu_read_unlock_special.s)) && |
565 | t->rcu_read_lock_nesting <= 0; | 565 | t->rcu_read_lock_nesting <= 0; |
566 | } | 566 | } |