aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-10-03 19:51:47 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-11-27 11:42:03 -0500
commit844ccdd7dce2c1a6ea9b437fcf8c3265b136e4a5 (patch)
tree849c27c829544f9cde57bd2fd311060d5206c1d6 /kernel/rcu/tree.c
parent51a1fd30f13090be7750fed86cf3728afaf4e394 (diff)
rcu: Eliminate rcu_irq_enter_disabled()
Now that the irq path uses the rcu_nmi_{enter,exit}() algorithm, rcu_irq_enter() and rcu_irq_exit() may be used from any context. There is thus no need for rcu_irq_enter_disabled() and for the checks using it. This commit therefore eliminates rcu_irq_enter_disabled(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d123474fe829..444aa2b3f24d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -271,20 +271,6 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
271}; 271};
272 272
273/* 273/*
274 * There's a few places, currently just in the tracing infrastructure,
275 * that uses rcu_irq_enter() to make sure RCU is watching. But there's
276 * a small location where that will not even work. In those cases
277 * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
278 * can be called.
279 */
280static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
281
282bool rcu_irq_enter_disabled(void)
283{
284 return this_cpu_read(disable_rcu_irq_enter);
285}
286
287/*
288 * Record entry into an extended quiescent state. This is only to be 274 * Record entry into an extended quiescent state. This is only to be
289 * called when not already in an extended quiescent state. 275 * called when not already in an extended quiescent state.
290 */ 276 */
@@ -792,10 +778,8 @@ static void rcu_eqs_enter_common(bool user)
792 do_nocb_deferred_wakeup(rdp); 778 do_nocb_deferred_wakeup(rdp);
793 } 779 }
794 rcu_prepare_for_idle(); 780 rcu_prepare_for_idle();
795 __this_cpu_inc(disable_rcu_irq_enter); 781 rdtp->dynticks_nesting = 0;
796 rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */ 782 rcu_dynticks_eqs_enter();
797 rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
798 __this_cpu_dec(disable_rcu_irq_enter);
799 rcu_dynticks_task_enter(); 783 rcu_dynticks_task_enter();
800 784
801 /* 785 /*
@@ -1001,10 +985,8 @@ static void rcu_eqs_exit(bool user)
1001 if (oldval) { 985 if (oldval) {
1002 rdtp->dynticks_nesting++; 986 rdtp->dynticks_nesting++;
1003 } else { 987 } else {
1004 __this_cpu_inc(disable_rcu_irq_enter);
1005 rcu_eqs_exit_common(1, user); 988 rcu_eqs_exit_common(1, user);
1006 rdtp->dynticks_nesting = 1; 989 rdtp->dynticks_nesting = 1;
1007 __this_cpu_dec(disable_rcu_irq_enter);
1008 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 990 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
1009 } 991 }
1010} 992}