aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-10-03 13:42:22 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-11-27 11:42:03 -0500
commit58721f5da4bcd5187566f4159a4fc88f70bf74f6 (patch)
tree2223b1e8a89ff15021cc6b925ec070668939090a /kernel/rcu/tree.c
parent6136d6e48a0138f6be5bb3427dbeb0ba07a546a4 (diff)
rcu: Define rcu_irq_{enter,exit}() in terms of rcu_nmi_{enter,exit}()
RCU currently uses two different mechanisms for tracking irqs and NMIs. This is unnecessary complexity: Given that NMIs can nest and given that RCU's tracking handles such nesting, the NMI tracking mechanism can also be used to track irqs. This commit therefore defines rcu_irq_enter() in terms of rcu_nmi_enter() and rcu_irq_exit() in terms of rcu_nmi_exit(). Unfortunately, callers must still distinguish between the irq and NMI functions because additional actions are taken when an irq interrupts idle or nohz_full usermode execution, and these actions cannot always be taken from NMI handlers. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c59
1 files changed, 21 insertions, 38 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 142cdd4a50c9..fde0e840563f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -266,6 +266,7 @@ void rcu_bh_qs(void)
266 266
267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 268 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
269 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
269 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 270 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
270}; 271};
271 272
@@ -914,8 +915,8 @@ void rcu_nmi_exit(void)
914 * 915 *
915 * This code assumes that the idle loop never does anything that might 916 * This code assumes that the idle loop never does anything that might
916 * result in unbalanced calls to irq_enter() and irq_exit(). If your 917 * result in unbalanced calls to irq_enter() and irq_exit(). If your
917 * architecture violates this assumption, RCU will give you what you 918 * architecture's idle loop violates this assumption, RCU will give you what
918 * deserve, good and hard. But very infrequently and irreproducibly. 919 * you deserve, good and hard. But very infrequently and irreproducibly.
919 * 920 *
920 * Use things like work queues to work around this limitation. 921 * Use things like work queues to work around this limitation.
921 * 922 *
@@ -926,23 +927,14 @@ void rcu_nmi_exit(void)
926 */ 927 */
927void rcu_irq_exit(void) 928void rcu_irq_exit(void)
928{ 929{
929 struct rcu_dynticks *rdtp; 930 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
930 931
931 lockdep_assert_irqs_disabled(); 932 lockdep_assert_irqs_disabled();
932 rdtp = this_cpu_ptr(&rcu_dynticks); 933 if (rdtp->dynticks_nmi_nesting == 1)
933 934 rcu_prepare_for_idle();
934 /* Page faults can happen in NMI handlers, so check... */ 935 rcu_nmi_exit();
935 if (rdtp->dynticks_nmi_nesting) 936 if (rdtp->dynticks_nmi_nesting == 0)
936 return; 937 rcu_dynticks_task_enter();
937
938 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
939 rdtp->dynticks_nesting < 1);
940 if (rdtp->dynticks_nesting <= 1) {
941 rcu_eqs_enter_common(true);
942 } else {
943 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
944 rdtp->dynticks_nesting--;
945 }
946} 938}
947 939
948/* 940/*
@@ -1097,12 +1089,12 @@ void rcu_nmi_enter(void)
1097 * sections can occur. The caller must have disabled interrupts. 1089 * sections can occur. The caller must have disabled interrupts.
1098 * 1090 *
1099 * Note that the Linux kernel is fully capable of entering an interrupt 1091 * Note that the Linux kernel is fully capable of entering an interrupt
1100 * handler that it never exits, for example when doing upcalls to 1092 * handler that it never exits, for example when doing upcalls to user mode!
1101 * user mode! This code assumes that the idle loop never does upcalls to 1093 * This code assumes that the idle loop never does upcalls to user mode.
1102 * user mode. If your architecture does do upcalls from the idle loop (or 1094 * If your architecture's idle loop does do upcalls to user mode (or does
1103 * does anything else that results in unbalanced calls to the irq_enter() 1095 * anything else that results in unbalanced calls to the irq_enter() and
1104 * and irq_exit() functions), RCU will give you what you deserve, good 1096 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1105 * and hard. But very infrequently and irreproducibly. 1097 * But very infrequently and irreproducibly.
1106 * 1098 *
1107 * Use things like work queues to work around this limitation. 1099 * Use things like work queues to work around this limitation.
1108 * 1100 *
@@ -1113,23 +1105,14 @@ void rcu_nmi_enter(void)
1113 */ 1105 */
1114void rcu_irq_enter(void) 1106void rcu_irq_enter(void)
1115{ 1107{
1116 struct rcu_dynticks *rdtp; 1108 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1117 long long newval;
1118 1109
1119 lockdep_assert_irqs_disabled(); 1110 lockdep_assert_irqs_disabled();
1120 rdtp = this_cpu_ptr(&rcu_dynticks); 1111 if (rdtp->dynticks_nmi_nesting == 0)
1121 1112 rcu_dynticks_task_exit();
1122 /* Page faults can happen in NMI handlers, so check... */ 1113 rcu_nmi_enter();
1123 if (rdtp->dynticks_nmi_nesting) 1114 if (rdtp->dynticks_nmi_nesting == 1)
1124 return; 1115 rcu_cleanup_after_idle();
1125
1126 newval = rdtp->dynticks_nesting + 1;
1127 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && newval == 0);
1128 if (rdtp->dynticks_nesting)
1129 trace_rcu_dyntick(TPS("++="), rdtp->dynticks_nesting, newval);
1130 else
1131 rcu_eqs_exit_common(newval, true);
1132 rdtp->dynticks_nesting++;
1133} 1116}
1134 1117
1135/* 1118/*