diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 260 |
1 files changed, 183 insertions, 77 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e441b77b614e..5b8ad827fd86 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -105,7 +105,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ | |||
105 | * The rcu_scheduler_active variable transitions from zero to one just | 105 | * The rcu_scheduler_active variable transitions from zero to one just |
106 | * before the first task is spawned. So when this variable is zero, RCU | 106 | * before the first task is spawned. So when this variable is zero, RCU |
107 | * can assume that there is but one task, allowing RCU to (for example) | 107 | * can assume that there is but one task, allowing RCU to (for example) |
108 | * optimized synchronize_sched() to a simple barrier(). When this variable | 108 | * optimize synchronize_sched() to a simple barrier(). When this variable |
109 | * is one, RCU must actually do all the hard work required to detect real | 109 | * is one, RCU must actually do all the hard work required to detect real |
110 | * grace periods. This variable is also used to suppress boot-time false | 110 | * grace periods. This variable is also used to suppress boot-time false |
111 | * positives from lockdep-RCU error checking. | 111 | * positives from lockdep-RCU error checking. |
@@ -217,12 +217,6 @@ module_param(blimit, long, 0444); | |||
217 | module_param(qhimark, long, 0444); | 217 | module_param(qhimark, long, 0444); |
218 | module_param(qlowmark, long, 0444); | 218 | module_param(qlowmark, long, 0444); |
219 | 219 | ||
220 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | ||
221 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; | ||
222 | |||
223 | module_param(rcu_cpu_stall_suppress, int, 0644); | ||
224 | module_param(rcu_cpu_stall_timeout, int, 0644); | ||
225 | |||
226 | static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; | 220 | static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; |
227 | static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; | 221 | static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; |
228 | 222 | ||
@@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | |||
305 | } | 299 | } |
306 | 300 | ||
307 | /* | 301 | /* |
308 | * Does the current CPU require a yet-as-unscheduled grace period? | 302 | * Does the current CPU require a not-yet-started grace period? |
303 | * The caller must have disabled interrupts to prevent races with | ||
304 | * normal callback registry. | ||
309 | */ | 305 | */ |
310 | static int | 306 | static int |
311 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 307 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
312 | { | 308 | { |
313 | struct rcu_head **ntp; | 309 | int i; |
314 | 310 | ||
315 | ntp = rdp->nxttail[RCU_DONE_TAIL + | 311 | if (rcu_gp_in_progress(rsp)) |
316 | (ACCESS_ONCE(rsp->completed) != rdp->completed)]; | 312 | return 0; /* No, a grace period is already in progress. */ |
317 | return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp && | 313 | if (!rdp->nxttail[RCU_NEXT_TAIL]) |
318 | !rcu_gp_in_progress(rsp); | 314 | return 0; /* No, this is a no-CBs (or offline) CPU. */ |
315 | if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) | ||
316 | return 1; /* Yes, this CPU has newly registered callbacks. */ | ||
317 | for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) | ||
318 | if (rdp->nxttail[i - 1] != rdp->nxttail[i] && | ||
319 | ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), | ||
320 | rdp->nxtcompleted[i])) | ||
321 | return 1; /* Yes, CBs for future grace period. */ | ||
322 | return 0; /* No grace period needed. */ | ||
319 | } | 323 | } |
320 | 324 | ||
321 | /* | 325 | /* |
@@ -336,7 +340,7 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
336 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | 340 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
337 | bool user) | 341 | bool user) |
338 | { | 342 | { |
339 | trace_rcu_dyntick("Start", oldval, 0); | 343 | trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); |
340 | if (!user && !is_idle_task(current)) { | 344 | if (!user && !is_idle_task(current)) { |
341 | struct task_struct *idle = idle_task(smp_processor_id()); | 345 | struct task_struct *idle = idle_task(smp_processor_id()); |
342 | 346 | ||
@@ -727,7 +731,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); | |||
727 | * interrupt from idle, return true. The caller must have at least | 731 | * interrupt from idle, return true. The caller must have at least |
728 | * disabled preemption. | 732 | * disabled preemption. |
729 | */ | 733 | */ |
730 | int rcu_is_cpu_rrupt_from_idle(void) | 734 | static int rcu_is_cpu_rrupt_from_idle(void) |
731 | { | 735 | { |
732 | return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; | 736 | return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; |
733 | } | 737 | } |
@@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
793 | return 0; | 797 | return 0; |
794 | } | 798 | } |
795 | 799 | ||
796 | static int jiffies_till_stall_check(void) | ||
797 | { | ||
798 | int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); | ||
799 | |||
800 | /* | ||
801 | * Limit check must be consistent with the Kconfig limits | ||
802 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | ||
803 | */ | ||
804 | if (till_stall_check < 3) { | ||
805 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; | ||
806 | till_stall_check = 3; | ||
807 | } else if (till_stall_check > 300) { | ||
808 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; | ||
809 | till_stall_check = 300; | ||
810 | } | ||
811 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | ||
812 | } | ||
813 | |||
814 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 800 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
815 | { | 801 | { |
816 | rsp->gp_start = jiffies; | 802 | rsp->gp_start = jiffies; |
817 | rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); | 803 | rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); |
818 | } | 804 | } |
819 | 805 | ||
820 | /* | 806 | /* |
@@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
857 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 843 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
858 | return; | 844 | return; |
859 | } | 845 | } |
860 | rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3; | 846 | rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; |
861 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 847 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
862 | 848 | ||
863 | /* | 849 | /* |
@@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
935 | raw_spin_lock_irqsave(&rnp->lock, flags); | 921 | raw_spin_lock_irqsave(&rnp->lock, flags); |
936 | if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) | 922 | if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) |
937 | rsp->jiffies_stall = jiffies + | 923 | rsp->jiffies_stall = jiffies + |
938 | 3 * jiffies_till_stall_check() + 3; | 924 | 3 * rcu_jiffies_till_stall_check() + 3; |
939 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 925 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
940 | 926 | ||
941 | set_need_resched(); /* kick ourselves to get things going. */ | 927 | set_need_resched(); /* kick ourselves to get things going. */ |
@@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
966 | } | 952 | } |
967 | } | 953 | } |
968 | 954 | ||
969 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | ||
970 | { | ||
971 | rcu_cpu_stall_suppress = 1; | ||
972 | return NOTIFY_DONE; | ||
973 | } | ||
974 | |||
975 | /** | 955 | /** |
976 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | 956 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period |
977 | * | 957 | * |
@@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void) | |||
989 | rsp->jiffies_stall = jiffies + ULONG_MAX / 2; | 969 | rsp->jiffies_stall = jiffies + ULONG_MAX / 2; |
990 | } | 970 | } |
991 | 971 | ||
992 | static struct notifier_block rcu_panic_block = { | ||
993 | .notifier_call = rcu_panic, | ||
994 | }; | ||
995 | |||
996 | static void __init check_cpu_stall_init(void) | ||
997 | { | ||
998 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | ||
999 | } | ||
1000 | |||
1001 | /* | 972 | /* |
1002 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 973 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
1003 | * This is used both when we started the grace period and when we notice | 974 | * This is used both when we started the grace period and when we notice |
@@ -1071,6 +1042,145 @@ static void init_callback_list(struct rcu_data *rdp) | |||
1071 | } | 1042 | } |
1072 | 1043 | ||
1073 | /* | 1044 | /* |
1045 | * Determine the value that ->completed will have at the end of the | ||
1046 | * next subsequent grace period. This is used to tag callbacks so that | ||
1047 | * a CPU can invoke callbacks in a timely fashion even if that CPU has | ||
1048 | * been dyntick-idle for an extended period with callbacks under the | ||
1049 | * influence of RCU_FAST_NO_HZ. | ||
1050 | * | ||
1051 | * The caller must hold rnp->lock with interrupts disabled. | ||
1052 | */ | ||
1053 | static unsigned long rcu_cbs_completed(struct rcu_state *rsp, | ||
1054 | struct rcu_node *rnp) | ||
1055 | { | ||
1056 | /* | ||
1057 | * If RCU is idle, we just wait for the next grace period. | ||
1058 | * But we can only be sure that RCU is idle if we are looking | ||
1059 | * at the root rcu_node structure -- otherwise, a new grace | ||
1060 | * period might have started, but just not yet gotten around | ||
1061 | * to initializing the current non-root rcu_node structure. | ||
1062 | */ | ||
1063 | if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) | ||
1064 | return rnp->completed + 1; | ||
1065 | |||
1066 | /* | ||
1067 | * Otherwise, wait for a possible partial grace period and | ||
1068 | * then the subsequent full grace period. | ||
1069 | */ | ||
1070 | return rnp->completed + 2; | ||
1071 | } | ||
1072 | |||
1073 | /* | ||
1074 | * If there is room, assign a ->completed number to any callbacks on | ||
1075 | * this CPU that have not already been assigned. Also accelerate any | ||
1076 | * callbacks that were previously assigned a ->completed number that has | ||
1077 | * since proven to be too conservative, which can happen if callbacks get | ||
1078 | * assigned a ->completed number while RCU is idle, but with reference to | ||
1079 | * a non-root rcu_node structure. This function is idempotent, so it does | ||
1080 | * not hurt to call it repeatedly. | ||
1081 | * | ||
1082 | * The caller must hold rnp->lock with interrupts disabled. | ||
1083 | */ | ||
1084 | static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | ||
1085 | struct rcu_data *rdp) | ||
1086 | { | ||
1087 | unsigned long c; | ||
1088 | int i; | ||
1089 | |||
1090 | /* If the CPU has no callbacks, nothing to do. */ | ||
1091 | if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) | ||
1092 | return; | ||
1093 | |||
1094 | /* | ||
1095 | * Starting from the sublist containing the callbacks most | ||
1096 | * recently assigned a ->completed number and working down, find the | ||
1097 | * first sublist that is not assignable to an upcoming grace period. | ||
1098 | * Such a sublist has something in it (first two tests) and has | ||
1099 | * a ->completed number assigned that will complete sooner than | ||
1100 | * the ->completed number for newly arrived callbacks (last test). | ||
1101 | * | ||
1102 | * The key point is that any later sublist can be assigned the | ||
1103 | * same ->completed number as the newly arrived callbacks, which | ||
1104 | * means that the callbacks in any of these later sublist can be | ||
1105 | * grouped into a single sublist, whether or not they have already | ||
1106 | * been assigned a ->completed number. | ||
1107 | */ | ||
1108 | c = rcu_cbs_completed(rsp, rnp); | ||
1109 | for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--) | ||
1110 | if (rdp->nxttail[i] != rdp->nxttail[i - 1] && | ||
1111 | !ULONG_CMP_GE(rdp->nxtcompleted[i], c)) | ||
1112 | break; | ||
1113 | |||
1114 | /* | ||
1115 | * If there are no sublist for unassigned callbacks, leave. | ||
1116 | * At the same time, advance "i" one sublist, so that "i" will | ||
1117 | * index into the sublist where all the remaining callbacks should | ||
1118 | * be grouped into. | ||
1119 | */ | ||
1120 | if (++i >= RCU_NEXT_TAIL) | ||
1121 | return; | ||
1122 | |||
1123 | /* | ||
1124 | * Assign all subsequent callbacks' ->completed number to the next | ||
1125 | * full grace period and group them all in the sublist initially | ||
1126 | * indexed by "i". | ||
1127 | */ | ||
1128 | for (; i <= RCU_NEXT_TAIL; i++) { | ||
1129 | rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
1130 | rdp->nxtcompleted[i] = c; | ||
1131 | } | ||
1132 | |||
1133 | /* Trace depending on how much we were able to accelerate. */ | ||
1134 | if (!*rdp->nxttail[RCU_WAIT_TAIL]) | ||
1135 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB"); | ||
1136 | else | ||
1137 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB"); | ||
1138 | } | ||
1139 | |||
1140 | /* | ||
1141 | * Move any callbacks whose grace period has completed to the | ||
1142 | * RCU_DONE_TAIL sublist, then compact the remaining sublists and | ||
1143 | * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL | ||
1144 | * sublist. This function is idempotent, so it does not hurt to | ||
1145 | * invoke it repeatedly. As long as it is not invoked -too- often... | ||
1146 | * | ||
1147 | * The caller must hold rnp->lock with interrupts disabled. | ||
1148 | */ | ||
1149 | static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | ||
1150 | struct rcu_data *rdp) | ||
1151 | { | ||
1152 | int i, j; | ||
1153 | |||
1154 | /* If the CPU has no callbacks, nothing to do. */ | ||
1155 | if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) | ||
1156 | return; | ||
1157 | |||
1158 | /* | ||
1159 | * Find all callbacks whose ->completed numbers indicate that they | ||
1160 | * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. | ||
1161 | */ | ||
1162 | for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { | ||
1163 | if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) | ||
1164 | break; | ||
1165 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i]; | ||
1166 | } | ||
1167 | /* Clean up any sublist tail pointers that were misordered above. */ | ||
1168 | for (j = RCU_WAIT_TAIL; j < i; j++) | ||
1169 | rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL]; | ||
1170 | |||
1171 | /* Copy down callbacks to fill in empty sublists. */ | ||
1172 | for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { | ||
1173 | if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL]) | ||
1174 | break; | ||
1175 | rdp->nxttail[j] = rdp->nxttail[i]; | ||
1176 | rdp->nxtcompleted[j] = rdp->nxtcompleted[i]; | ||
1177 | } | ||
1178 | |||
1179 | /* Classify any remaining callbacks. */ | ||
1180 | rcu_accelerate_cbs(rsp, rnp, rdp); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1074 | * Advance this CPU's callbacks, but only if the current grace period | 1184 | * Advance this CPU's callbacks, but only if the current grace period |
1075 | * has ended. This may be called only from the CPU to whom the rdp | 1185 | * has ended. This may be called only from the CPU to whom the rdp |
1076 | * belongs. In addition, the corresponding leaf rcu_node structure's | 1186 | * belongs. In addition, the corresponding leaf rcu_node structure's |
@@ -1080,12 +1190,15 @@ static void | |||
1080 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | 1190 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) |
1081 | { | 1191 | { |
1082 | /* Did another grace period end? */ | 1192 | /* Did another grace period end? */ |
1083 | if (rdp->completed != rnp->completed) { | 1193 | if (rdp->completed == rnp->completed) { |
1084 | 1194 | ||
1085 | /* Advance callbacks. No harm if list empty. */ | 1195 | /* No, so just accelerate recent callbacks. */ |
1086 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | 1196 | rcu_accelerate_cbs(rsp, rnp, rdp); |
1087 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | 1197 | |
1088 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 1198 | } else { |
1199 | |||
1200 | /* Advance callbacks. */ | ||
1201 | rcu_advance_cbs(rsp, rnp, rdp); | ||
1089 | 1202 | ||
1090 | /* Remember that we saw this grace-period completion. */ | 1203 | /* Remember that we saw this grace-period completion. */ |
1091 | rdp->completed = rnp->completed; | 1204 | rdp->completed = rnp->completed; |
@@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
1392 | /* | 1505 | /* |
1393 | * Because there is no grace period in progress right now, | 1506 | * Because there is no grace period in progress right now, |
1394 | * any callbacks we have up to this point will be satisfied | 1507 | * any callbacks we have up to this point will be satisfied |
1395 | * by the next grace period. So promote all callbacks to be | 1508 | * by the next grace period. So this is a good place to |
1396 | * handled after the end of the next grace period. If the | 1509 | * assign a grace period number to recently posted callbacks. |
1397 | * CPU is not yet aware of the end of the previous grace period, | ||
1398 | * we need to allow for the callback advancement that will | ||
1399 | * occur when it does become aware. Deadlock prevents us from | ||
1400 | * making it aware at this point: We cannot acquire a leaf | ||
1401 | * rcu_node ->lock while holding the root rcu_node ->lock. | ||
1402 | */ | 1510 | */ |
1403 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 1511 | rcu_accelerate_cbs(rsp, rnp, rdp); |
1404 | if (rdp->completed == rsp->completed) | ||
1405 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
1406 | 1512 | ||
1407 | rsp->gp_flags = RCU_GP_FLAG_INIT; | 1513 | rsp->gp_flags = RCU_GP_FLAG_INIT; |
1408 | raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ | 1514 | raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ |
@@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) | |||
1527 | * This GP can't end until cpu checks in, so all of our | 1633 | * This GP can't end until cpu checks in, so all of our |
1528 | * callbacks can be processed during the next GP. | 1634 | * callbacks can be processed during the next GP. |
1529 | */ | 1635 | */ |
1530 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 1636 | rcu_accelerate_cbs(rsp, rnp, rdp); |
1531 | 1637 | ||
1532 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ | 1638 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
1533 | } | 1639 | } |
@@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1779 | long bl, count, count_lazy; | 1885 | long bl, count, count_lazy; |
1780 | int i; | 1886 | int i; |
1781 | 1887 | ||
1782 | /* If no callbacks are ready, just return.*/ | 1888 | /* If no callbacks are ready, just return. */ |
1783 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { | 1889 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { |
1784 | trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); | 1890 | trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); |
1785 | trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), | 1891 | trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), |
@@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
2008 | 2114 | ||
2009 | WARN_ON_ONCE(rdp->beenonline == 0); | 2115 | WARN_ON_ONCE(rdp->beenonline == 0); |
2010 | 2116 | ||
2011 | /* | 2117 | /* Handle the end of a grace period that some other CPU ended. */ |
2012 | * Advance callbacks in response to end of earlier grace | ||
2013 | * period that some other CPU ended. | ||
2014 | */ | ||
2015 | rcu_process_gp_end(rsp, rdp); | 2118 | rcu_process_gp_end(rsp, rdp); |
2016 | 2119 | ||
2017 | /* Update RCU state based on any recent quiescent states. */ | 2120 | /* Update RCU state based on any recent quiescent states. */ |
2018 | rcu_check_quiescent_state(rsp, rdp); | 2121 | rcu_check_quiescent_state(rsp, rdp); |
2019 | 2122 | ||
2020 | /* Does this CPU require a not-yet-started grace period? */ | 2123 | /* Does this CPU require a not-yet-started grace period? */ |
2124 | local_irq_save(flags); | ||
2021 | if (cpu_needs_another_gp(rsp, rdp)) { | 2125 | if (cpu_needs_another_gp(rsp, rdp)) { |
2022 | raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); | 2126 | raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */ |
2023 | rcu_start_gp(rsp, flags); /* releases above lock */ | 2127 | rcu_start_gp(rsp, flags); /* releases above lock */ |
2128 | } else { | ||
2129 | local_irq_restore(flags); | ||
2024 | } | 2130 | } |
2025 | 2131 | ||
2026 | /* If there are callbacks ready, invoke them. */ | 2132 | /* If there are callbacks ready, invoke them. */ |
@@ -2719,9 +2825,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2719 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 2825 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
2720 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 2826 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
2721 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 2827 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
2722 | #ifdef CONFIG_RCU_USER_QS | ||
2723 | WARN_ON_ONCE(rdp->dynticks->in_user); | ||
2724 | #endif | ||
2725 | rdp->cpu = cpu; | 2828 | rdp->cpu = cpu; |
2726 | rdp->rsp = rsp; | 2829 | rdp->rsp = rsp; |
2727 | rcu_boot_init_nocb_percpu_data(rdp); | 2830 | rcu_boot_init_nocb_percpu_data(rdp); |
@@ -2938,6 +3041,10 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
2938 | 3041 | ||
2939 | BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ | 3042 | BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ |
2940 | 3043 | ||
3044 | /* Silence gcc 4.8 warning about array index out of range. */ | ||
3045 | if (rcu_num_lvls > RCU_NUM_LVLS) | ||
3046 | panic("rcu_init_one: rcu_num_lvls overflow"); | ||
3047 | |||
2941 | /* Initialize the level-tracking arrays. */ | 3048 | /* Initialize the level-tracking arrays. */ |
2942 | 3049 | ||
2943 | for (i = 0; i < rcu_num_lvls; i++) | 3050 | for (i = 0; i < rcu_num_lvls; i++) |
@@ -3074,7 +3181,6 @@ void __init rcu_init(void) | |||
3074 | cpu_notifier(rcu_cpu_notify, 0); | 3181 | cpu_notifier(rcu_cpu_notify, 0); |
3075 | for_each_online_cpu(cpu) | 3182 | for_each_online_cpu(cpu) |
3076 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 3183 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
3077 | check_cpu_stall_init(); | ||
3078 | } | 3184 | } |
3079 | 3185 | ||
3080 | #include "rcutree_plugin.h" | 3186 | #include "rcutree_plugin.h" |