diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-10-03 17:43:40 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-11-27 11:42:03 -0500 |
| commit | 51a1fd30f13090be7750fed86cf3728afaf4e394 (patch) | |
| tree | 346dbd94f1ec39ed399403112a12698e33ee1dc3 /kernel | |
| parent | 58721f5da4bcd5187566f4159a4fc88f70bf74f6 (diff) | |
rcu: Make ->dynticks_nesting be a simple counter
Now that ->dynticks_nesting counts only process-level dyntick-idle
entry and exit, there is no need for the elaborate segmented counter
with its guard fields and overflow checking. This commit therefore
makes ->dynticks_nesting be a simple counter.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcu/rcu.h | 27 | ||||
| -rw-r--r-- | kernel/rcu/tree.c | 40 | ||||
| -rw-r--r-- | kernel/rcu/tree.h | 1 |
3 files changed, 21 insertions, 47 deletions
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index f4a411964c41..afe0559d1867 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h | |||
| @@ -30,32 +30,7 @@ | |||
| 30 | #define RCU_TRACE(stmt) | 30 | #define RCU_TRACE(stmt) |
| 31 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 31 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 32 | 32 | ||
| 33 | /* | 33 | /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */ |
| 34 | * Process-level increment to ->dynticks_nesting field. This allows for | ||
| 35 | * architectures that use half-interrupts and half-exceptions from | ||
| 36 | * process context. | ||
| 37 | * | ||
| 38 | * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH | ||
| 39 | * that counts the number of process-based reasons why RCU cannot | ||
| 40 | * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE | ||
| 41 | * is the value used to increment or decrement this field. | ||
| 42 | * | ||
| 43 | * The rest of the bits could in principle be used to count interrupts, | ||
| 44 | * but this would mean that a negative-one value in the interrupt | ||
| 45 | * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field. | ||
| 46 | * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK | ||
| 47 | * that is set to DYNTICK_TASK_FLAG upon initial exit from idle. | ||
| 48 | * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon | ||
| 49 | * initial exit from idle. | ||
| 50 | */ | ||
| 51 | #define DYNTICK_TASK_NEST_WIDTH 7 | ||
| 52 | #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1) | ||
| 53 | #define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1) | ||
| 54 | #define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2) | ||
| 55 | #define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3) | ||
| 56 | #define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \ | ||
| 57 | DYNTICK_TASK_FLAG) | ||
| 58 | |||
| 59 | #define DYNTICK_IRQ_NONIDLE ((INT_MAX / 2) + 1) | 34 | #define DYNTICK_IRQ_NONIDLE ((INT_MAX / 2) + 1) |
| 60 | 35 | ||
| 61 | 36 | ||
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index fde0e840563f..d123474fe829 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -265,7 +265,7 @@ void rcu_bh_qs(void) | |||
| 265 | #endif | 265 | #endif |
| 266 | 266 | ||
| 267 | static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 267 | static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
| 268 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 268 | .dynticks_nesting = 1, |
| 269 | .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, | 269 | .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, |
| 270 | .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), | 270 | .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), |
| 271 | }; | 271 | }; |
| @@ -813,6 +813,10 @@ static void rcu_eqs_enter_common(bool user) | |||
| 813 | /* | 813 | /* |
| 814 | * Enter an RCU extended quiescent state, which can be either the | 814 | * Enter an RCU extended quiescent state, which can be either the |
| 815 | * idle loop or adaptive-tickless usermode execution. | 815 | * idle loop or adaptive-tickless usermode execution. |
| 816 | * | ||
| 817 | * We crowbar the ->dynticks_nmi_nesting field to zero to allow for | ||
| 818 | * the possibility of usermode upcalls having messed up our count | ||
| 819 | * of interrupt nesting level during the prior busy period. | ||
| 816 | */ | 820 | */ |
| 817 | static void rcu_eqs_enter(bool user) | 821 | static void rcu_eqs_enter(bool user) |
| 818 | { | 822 | { |
| @@ -821,11 +825,11 @@ static void rcu_eqs_enter(bool user) | |||
| 821 | rdtp = this_cpu_ptr(&rcu_dynticks); | 825 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 822 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); | 826 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); |
| 823 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 827 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
| 824 | (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | 828 | rdtp->dynticks_nesting == 0); |
| 825 | if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 829 | if (rdtp->dynticks_nesting == 1) |
| 826 | rcu_eqs_enter_common(user); | 830 | rcu_eqs_enter_common(user); |
| 827 | else | 831 | else |
| 828 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 832 | rdtp->dynticks_nesting--; |
| 829 | } | 833 | } |
| 830 | 834 | ||
| 831 | /** | 835 | /** |
| @@ -836,10 +840,6 @@ static void rcu_eqs_enter(bool user) | |||
| 836 | * critical sections can occur in irq handlers in idle, a possibility | 840 | * critical sections can occur in irq handlers in idle, a possibility |
| 837 | * handled by irq_enter() and irq_exit().) | 841 | * handled by irq_enter() and irq_exit().) |
| 838 | * | 842 | * |
| 839 | * We crowbar the ->dynticks_nesting field to zero to allow for | ||
| 840 | * the possibility of usermode upcalls having messed up our count | ||
| 841 | * of interrupt nesting level during the prior busy period. | ||
| 842 | * | ||
| 843 | * If you add or remove a call to rcu_idle_enter(), be sure to test with | 843 | * If you add or remove a call to rcu_idle_enter(), be sure to test with |
| 844 | * CONFIG_RCU_EQS_DEBUG=y. | 844 | * CONFIG_RCU_EQS_DEBUG=y. |
| 845 | */ | 845 | */ |
| @@ -984,6 +984,10 @@ static void rcu_eqs_exit_common(long long newval, int user) | |||
| 984 | /* | 984 | /* |
| 985 | * Exit an RCU extended quiescent state, which can be either the | 985 | * Exit an RCU extended quiescent state, which can be either the |
| 986 | * idle loop or adaptive-tickless usermode execution. | 986 | * idle loop or adaptive-tickless usermode execution. |
| 987 | * | ||
| 988 | * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to | ||
| 989 | * allow for the possibility of usermode upcalls messing up our count of | ||
| 990 | * interrupt nesting level during the busy period that is just now starting. | ||
| 987 | */ | 991 | */ |
| 988 | static void rcu_eqs_exit(bool user) | 992 | static void rcu_eqs_exit(bool user) |
| 989 | { | 993 | { |
| @@ -994,12 +998,12 @@ static void rcu_eqs_exit(bool user) | |||
| 994 | rdtp = this_cpu_ptr(&rcu_dynticks); | 998 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 995 | oldval = rdtp->dynticks_nesting; | 999 | oldval = rdtp->dynticks_nesting; |
| 996 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); | 1000 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); |
| 997 | if (oldval & DYNTICK_TASK_NEST_MASK) { | 1001 | if (oldval) { |
| 998 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 1002 | rdtp->dynticks_nesting++; |
| 999 | } else { | 1003 | } else { |
| 1000 | __this_cpu_inc(disable_rcu_irq_enter); | 1004 | __this_cpu_inc(disable_rcu_irq_enter); |
| 1001 | rcu_eqs_exit_common(DYNTICK_TASK_EXIT_IDLE, user); | 1005 | rcu_eqs_exit_common(1, user); |
| 1002 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 1006 | rdtp->dynticks_nesting = 1; |
| 1003 | __this_cpu_dec(disable_rcu_irq_enter); | 1007 | __this_cpu_dec(disable_rcu_irq_enter); |
| 1004 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | 1008 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); |
| 1005 | } | 1009 | } |
| @@ -1011,11 +1015,6 @@ static void rcu_eqs_exit(bool user) | |||
| 1011 | * Exit idle mode, in other words, -enter- the mode in which RCU | 1015 | * Exit idle mode, in other words, -enter- the mode in which RCU |
| 1012 | * read-side critical sections can occur. | 1016 | * read-side critical sections can occur. |
| 1013 | * | 1017 | * |
| 1014 | * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to | ||
| 1015 | * allow for the possibility of usermode upcalls messing up our count | ||
| 1016 | * of interrupt nesting level during the busy period that is just | ||
| 1017 | * now starting. | ||
| 1018 | * | ||
| 1019 | * If you add or remove a call to rcu_idle_exit(), be sure to test with | 1018 | * If you add or remove a call to rcu_idle_exit(), be sure to test with |
| 1020 | * CONFIG_RCU_EQS_DEBUG=y. | 1019 | * CONFIG_RCU_EQS_DEBUG=y. |
| 1021 | */ | 1020 | */ |
| @@ -1219,7 +1218,8 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); | |||
| 1219 | */ | 1218 | */ |
| 1220 | static int rcu_is_cpu_rrupt_from_idle(void) | 1219 | static int rcu_is_cpu_rrupt_from_idle(void) |
| 1221 | { | 1220 | { |
| 1222 | return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; | 1221 | return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 && |
| 1222 | __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; | ||
| 1223 | } | 1223 | } |
| 1224 | 1224 | ||
| 1225 | /* | 1225 | /* |
| @@ -3709,7 +3709,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 3709 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 3709 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 3710 | rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); | 3710 | rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); |
| 3711 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 3711 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
| 3712 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 3712 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); |
| 3713 | WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); | 3713 | WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); |
| 3714 | rdp->cpu = cpu; | 3714 | rdp->cpu = cpu; |
| 3715 | rdp->rsp = rsp; | 3715 | rdp->rsp = rsp; |
| @@ -3738,7 +3738,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 3738 | if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ | 3738 | if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ |
| 3739 | !init_nocb_callback_list(rdp)) | 3739 | !init_nocb_callback_list(rdp)) |
| 3740 | rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ | 3740 | rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ |
| 3741 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 3741 | rdp->dynticks->dynticks_nesting = 1; |
| 3742 | rcu_dynticks_eqs_online(); | 3742 | rcu_dynticks_eqs_online(); |
| 3743 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ | 3743 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ |
| 3744 | 3744 | ||
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 46a5d1991450..dbd7e3753bed 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | */ | 39 | */ |
| 40 | struct rcu_dynticks { | 40 | struct rcu_dynticks { |
| 41 | long long dynticks_nesting; /* Track irq/process nesting level. */ | 41 | long long dynticks_nesting; /* Track irq/process nesting level. */ |
| 42 | /* Process level is worth LLONG_MAX/2. */ | ||
| 43 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | 42 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
| 44 | atomic_t dynticks; /* Even value for idle, else odd. */ | 43 | atomic_t dynticks; /* Even value for idle, else odd. */ |
| 45 | bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ | 44 | bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ |
