summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-10-03 17:43:40 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-11-27 11:42:03 -0500
commit51a1fd30f13090be7750fed86cf3728afaf4e394 (patch)
tree346dbd94f1ec39ed399403112a12698e33ee1dc3 /kernel/rcu/tree.c
parent58721f5da4bcd5187566f4159a4fc88f70bf74f6 (diff)
rcu: Make ->dynticks_nesting be a simple counter
Now that ->dynticks_nesting counts only process-level dyntick-idle entry and exit, there is no need for the elaborate segmented counter with its guard fields and overflow checking. This commit therefore makes ->dynticks_nesting be a simple counter. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index fde0e840563f..d123474fe829 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -265,7 +265,7 @@ void rcu_bh_qs(void)
265#endif 265#endif
266 266
267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 268 .dynticks_nesting = 1,
269 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 269 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
270 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 270 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
271}; 271};
@@ -813,6 +813,10 @@ static void rcu_eqs_enter_common(bool user)
813/* 813/*
814 * Enter an RCU extended quiescent state, which can be either the 814 * Enter an RCU extended quiescent state, which can be either the
815 * idle loop or adaptive-tickless usermode execution. 815 * idle loop or adaptive-tickless usermode execution.
816 *
817 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
818 * the possibility of usermode upcalls having messed up our count
819 * of interrupt nesting level during the prior busy period.
816 */ 820 */
817static void rcu_eqs_enter(bool user) 821static void rcu_eqs_enter(bool user)
818{ 822{
@@ -821,11 +825,11 @@ static void rcu_eqs_enter(bool user)
821 rdtp = this_cpu_ptr(&rcu_dynticks); 825 rdtp = this_cpu_ptr(&rcu_dynticks);
822 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); 826 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
823 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 827 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
824 (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); 828 rdtp->dynticks_nesting == 0);
825 if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) 829 if (rdtp->dynticks_nesting == 1)
826 rcu_eqs_enter_common(user); 830 rcu_eqs_enter_common(user);
827 else 831 else
828 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 832 rdtp->dynticks_nesting--;
829} 833}
830 834
831/** 835/**
@@ -836,10 +840,6 @@ static void rcu_eqs_enter(bool user)
836 * critical sections can occur in irq handlers in idle, a possibility 840 * critical sections can occur in irq handlers in idle, a possibility
837 * handled by irq_enter() and irq_exit().) 841 * handled by irq_enter() and irq_exit().)
838 * 842 *
839 * We crowbar the ->dynticks_nesting field to zero to allow for
840 * the possibility of usermode upcalls having messed up our count
841 * of interrupt nesting level during the prior busy period.
842 *
843 * If you add or remove a call to rcu_idle_enter(), be sure to test with 843 * If you add or remove a call to rcu_idle_enter(), be sure to test with
844 * CONFIG_RCU_EQS_DEBUG=y. 844 * CONFIG_RCU_EQS_DEBUG=y.
845 */ 845 */
@@ -984,6 +984,10 @@ static void rcu_eqs_exit_common(long long newval, int user)
984/* 984/*
985 * Exit an RCU extended quiescent state, which can be either the 985 * Exit an RCU extended quiescent state, which can be either the
986 * idle loop or adaptive-tickless usermode execution. 986 * idle loop or adaptive-tickless usermode execution.
987 *
988 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
989 * allow for the possibility of usermode upcalls messing up our count of
990 * interrupt nesting level during the busy period that is just now starting.
987 */ 991 */
988static void rcu_eqs_exit(bool user) 992static void rcu_eqs_exit(bool user)
989{ 993{
@@ -994,12 +998,12 @@ static void rcu_eqs_exit(bool user)
994 rdtp = this_cpu_ptr(&rcu_dynticks); 998 rdtp = this_cpu_ptr(&rcu_dynticks);
995 oldval = rdtp->dynticks_nesting; 999 oldval = rdtp->dynticks_nesting;
996 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 1000 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
997 if (oldval & DYNTICK_TASK_NEST_MASK) { 1001 if (oldval) {
998 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 1002 rdtp->dynticks_nesting++;
999 } else { 1003 } else {
1000 __this_cpu_inc(disable_rcu_irq_enter); 1004 __this_cpu_inc(disable_rcu_irq_enter);
1001 rcu_eqs_exit_common(DYNTICK_TASK_EXIT_IDLE, user); 1005 rcu_eqs_exit_common(1, user);
1002 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 1006 rdtp->dynticks_nesting = 1;
1003 __this_cpu_dec(disable_rcu_irq_enter); 1007 __this_cpu_dec(disable_rcu_irq_enter);
1004 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 1008 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
1005 } 1009 }
@@ -1011,11 +1015,6 @@ static void rcu_eqs_exit(bool user)
1011 * Exit idle mode, in other words, -enter- the mode in which RCU 1015 * Exit idle mode, in other words, -enter- the mode in which RCU
1012 * read-side critical sections can occur. 1016 * read-side critical sections can occur.
1013 * 1017 *
1014 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
1015 * allow for the possibility of usermode upcalls messing up our count
1016 * of interrupt nesting level during the busy period that is just
1017 * now starting.
1018 *
1019 * If you add or remove a call to rcu_idle_exit(), be sure to test with 1018 * If you add or remove a call to rcu_idle_exit(), be sure to test with
1020 * CONFIG_RCU_EQS_DEBUG=y. 1019 * CONFIG_RCU_EQS_DEBUG=y.
1021 */ 1020 */
@@ -1219,7 +1218,8 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1219 */ 1218 */
1220static int rcu_is_cpu_rrupt_from_idle(void) 1219static int rcu_is_cpu_rrupt_from_idle(void)
1221{ 1220{
1222 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; 1221 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
1222 __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
1223} 1223}
1224 1224
1225/* 1225/*
@@ -3709,7 +3709,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3709 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3709 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3710 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3710 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3711 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3711 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3712 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 3712 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3713 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); 3713 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3714 rdp->cpu = cpu; 3714 rdp->cpu = cpu;
3715 rdp->rsp = rsp; 3715 rdp->rsp = rsp;
@@ -3738,7 +3738,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3738 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3738 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3739 !init_nocb_callback_list(rdp)) 3739 !init_nocb_callback_list(rdp))
3740 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3740 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
3741 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 3741 rdp->dynticks->dynticks_nesting = 1;
3742 rcu_dynticks_eqs_online(); 3742 rcu_dynticks_eqs_online();
3743 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3743 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
3744 3744