diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-10-03 00:56:20 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-11-27 11:40:10 -0500 |
| commit | fd581a91ac16187625ec509414d08d37827472c4 (patch) | |
| tree | 7690c7656dba03c57ab93b9b801709184365bef5 /kernel/rcu/tree.c | |
| parent | a0eb22bf64a755bb162b421120b9fbe7d012b85f (diff) | |
rcu: Move rcu_nmi_{enter,exit}() to prepare for consolidation
This is a code-motion-only commit that prepares to define rcu_irq_enter()
in terms of rcu_nmi_enter() and rcu_irq_exit() in terms of rcu_irq_exit().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 150 |
1 files changed, 75 insertions, 75 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 49f661bb8ffe..419f3c38e1b6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -867,6 +867,44 @@ void rcu_user_enter(void) | |||
| 867 | #endif /* CONFIG_NO_HZ_FULL */ | 867 | #endif /* CONFIG_NO_HZ_FULL */ |
| 868 | 868 | ||
| 869 | /** | 869 | /** |
| 870 | * rcu_nmi_exit - inform RCU of exit from NMI context | ||
| 871 | * | ||
| 872 | * If we are returning from the outermost NMI handler that interrupted an | ||
| 873 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting | ||
| 874 | * to let the RCU grace-period handling know that the CPU is back to | ||
| 875 | * being RCU-idle. | ||
| 876 | * | ||
| 877 | * If you add or remove a call to rcu_nmi_exit(), be sure to test | ||
| 878 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
| 879 | */ | ||
| 880 | void rcu_nmi_exit(void) | ||
| 881 | { | ||
| 882 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 883 | |||
| 884 | /* | ||
| 885 | * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. | ||
| 886 | * (We are exiting an NMI handler, so RCU better be paying attention | ||
| 887 | * to us!) | ||
| 888 | */ | ||
| 889 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); | ||
| 890 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); | ||
| 891 | |||
| 892 | /* | ||
| 893 | * If the nesting level is not 1, the CPU wasn't RCU-idle, so | ||
| 894 | * leave it in non-RCU-idle state. | ||
| 895 | */ | ||
| 896 | if (rdtp->dynticks_nmi_nesting != 1) { | ||
| 897 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */ | ||
| 898 | rdtp->dynticks_nmi_nesting - 2); | ||
| 899 | return; | ||
| 900 | } | ||
| 901 | |||
| 902 | /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ | ||
| 903 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ | ||
| 904 | rcu_dynticks_eqs_enter(); | ||
| 905 | } | ||
| 906 | |||
| 907 | /** | ||
| 870 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle | 908 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle |
| 871 | * | 909 | * |
| 872 | * Exit from an interrupt handler, which might possibly result in entering | 910 | * Exit from an interrupt handler, which might possibly result in entering |
| @@ -1013,6 +1051,43 @@ void rcu_user_exit(void) | |||
| 1013 | #endif /* CONFIG_NO_HZ_FULL */ | 1051 | #endif /* CONFIG_NO_HZ_FULL */ |
| 1014 | 1052 | ||
| 1015 | /** | 1053 | /** |
| 1054 | * rcu_nmi_enter - inform RCU of entry to NMI context | ||
| 1055 | * | ||
| 1056 | * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and | ||
| 1057 | * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know | ||
| 1058 | * that the CPU is active. This implementation permits nested NMIs, as | ||
| 1059 | * long as the nesting level does not overflow an int. (You will probably | ||
| 1060 | * run out of stack space first.) | ||
| 1061 | * | ||
| 1062 | * If you add or remove a call to rcu_nmi_enter(), be sure to test | ||
| 1063 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
| 1064 | */ | ||
| 1065 | void rcu_nmi_enter(void) | ||
| 1066 | { | ||
| 1067 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 1068 | int incby = 2; | ||
| 1069 | |||
| 1070 | /* Complain about underflow. */ | ||
| 1071 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); | ||
| 1072 | |||
| 1073 | /* | ||
| 1074 | * If idle from RCU viewpoint, atomically increment ->dynticks | ||
| 1075 | * to mark non-idle and increment ->dynticks_nmi_nesting by one. | ||
| 1076 | * Otherwise, increment ->dynticks_nmi_nesting by two. This means | ||
| 1077 | * if ->dynticks_nmi_nesting is equal to one, we are guaranteed | ||
| 1078 | * to be in the outermost NMI handler that interrupted an RCU-idle | ||
| 1079 | * period (observation due to Andy Lutomirski). | ||
| 1080 | */ | ||
| 1081 | if (rcu_dynticks_curr_cpu_in_eqs()) { | ||
| 1082 | rcu_dynticks_eqs_exit(); | ||
| 1083 | incby = 1; | ||
| 1084 | } | ||
| 1085 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */ | ||
| 1086 | rdtp->dynticks_nmi_nesting + incby); | ||
| 1087 | barrier(); | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | /** | ||
| 1016 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle | 1091 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle |
| 1017 | * | 1092 | * |
| 1018 | * Enter an interrupt handler, which might possibly result in exiting | 1093 | * Enter an interrupt handler, which might possibly result in exiting |
| @@ -1071,81 +1146,6 @@ void rcu_irq_enter_irqson(void) | |||
| 1071 | } | 1146 | } |
| 1072 | 1147 | ||
| 1073 | /** | 1148 | /** |
| 1074 | * rcu_nmi_enter - inform RCU of entry to NMI context | ||
| 1075 | * | ||
| 1076 | * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and | ||
| 1077 | * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know | ||
| 1078 | * that the CPU is active. This implementation permits nested NMIs, as | ||
| 1079 | * long as the nesting level does not overflow an int. (You will probably | ||
| 1080 | * run out of stack space first.) | ||
| 1081 | * | ||
| 1082 | * If you add or remove a call to rcu_nmi_enter(), be sure to test | ||
| 1083 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
| 1084 | */ | ||
| 1085 | void rcu_nmi_enter(void) | ||
| 1086 | { | ||
| 1087 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 1088 | int incby = 2; | ||
| 1089 | |||
| 1090 | /* Complain about underflow. */ | ||
| 1091 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); | ||
| 1092 | |||
| 1093 | /* | ||
| 1094 | * If idle from RCU viewpoint, atomically increment ->dynticks | ||
| 1095 | * to mark non-idle and increment ->dynticks_nmi_nesting by one. | ||
| 1096 | * Otherwise, increment ->dynticks_nmi_nesting by two. This means | ||
| 1097 | * if ->dynticks_nmi_nesting is equal to one, we are guaranteed | ||
| 1098 | * to be in the outermost NMI handler that interrupted an RCU-idle | ||
| 1099 | * period (observation due to Andy Lutomirski). | ||
| 1100 | */ | ||
| 1101 | if (rcu_dynticks_curr_cpu_in_eqs()) { | ||
| 1102 | rcu_dynticks_eqs_exit(); | ||
| 1103 | incby = 1; | ||
| 1104 | } | ||
| 1105 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */ | ||
| 1106 | rdtp->dynticks_nmi_nesting + incby); | ||
| 1107 | barrier(); | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | /** | ||
| 1111 | * rcu_nmi_exit - inform RCU of exit from NMI context | ||
| 1112 | * | ||
| 1113 | * If we are returning from the outermost NMI handler that interrupted an | ||
| 1114 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting | ||
| 1115 | * to let the RCU grace-period handling know that the CPU is back to | ||
| 1116 | * being RCU-idle. | ||
| 1117 | * | ||
| 1118 | * If you add or remove a call to rcu_nmi_exit(), be sure to test | ||
| 1119 | * with CONFIG_RCU_EQS_DEBUG=y. | ||
| 1120 | */ | ||
| 1121 | void rcu_nmi_exit(void) | ||
| 1122 | { | ||
| 1123 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 1124 | |||
| 1125 | /* | ||
| 1126 | * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. | ||
| 1127 | * (We are exiting an NMI handler, so RCU better be paying attention | ||
| 1128 | * to us!) | ||
| 1129 | */ | ||
| 1130 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); | ||
| 1131 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); | ||
| 1132 | |||
| 1133 | /* | ||
| 1134 | * If the nesting level is not 1, the CPU wasn't RCU-idle, so | ||
| 1135 | * leave it in non-RCU-idle state. | ||
| 1136 | */ | ||
| 1137 | if (rdtp->dynticks_nmi_nesting != 1) { | ||
| 1138 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */ | ||
| 1139 | rdtp->dynticks_nmi_nesting - 2); | ||
| 1140 | return; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ | ||
| 1144 | WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ | ||
| 1145 | rcu_dynticks_eqs_enter(); | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | /** | ||
| 1149 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | 1149 | * rcu_is_watching - see if RCU thinks that the current CPU is idle |
| 1150 | * | 1150 | * |
| 1151 | * Return true if RCU is watching the running CPU, which means that this | 1151 | * Return true if RCU is watching the running CPU, which means that this |
