diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 273 |
1 files changed, 177 insertions, 96 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7717b95c2027..71bc79791cd9 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #include <linux/mutex.h> | 46 | #include <linux/mutex.h> |
47 | #include <linux/time.h> | 47 | #include <linux/time.h> |
48 | 48 | ||
49 | #include "rcutree.h" | ||
50 | |||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 52 | static struct lock_class_key rcu_lock_key; |
51 | struct lockdep_map rcu_lock_map = | 53 | struct lockdep_map rcu_lock_map = |
@@ -72,30 +74,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
72 | .n_force_qs_ngp = 0, \ | 74 | .n_force_qs_ngp = 0, \ |
73 | } | 75 | } |
74 | 76 | ||
75 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | 77 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
76 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | 78 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
77 | 79 | ||
78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
80 | 82 | ||
83 | extern long rcu_batches_completed_sched(void); | ||
84 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
85 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | ||
86 | struct rcu_node *rnp, unsigned long flags); | ||
87 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | ||
88 | #ifdef CONFIG_HOTPLUG_CPU | ||
89 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); | ||
90 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
91 | static void __rcu_process_callbacks(struct rcu_state *rsp, | ||
92 | struct rcu_data *rdp); | ||
93 | static void __call_rcu(struct rcu_head *head, | ||
94 | void (*func)(struct rcu_head *rcu), | ||
95 | struct rcu_state *rsp); | ||
96 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); | ||
97 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | ||
98 | int preemptable); | ||
99 | |||
100 | #include "rcutree_plugin.h" | ||
101 | |||
81 | /* | 102 | /* |
82 | * Increment the quiescent state counter. | 103 | * Note a quiescent state. Because we do not need to know |
83 | * The counter is a bit degenerated: We do not need to know | ||
84 | * how many quiescent states passed, just if there was at least | 104 | * how many quiescent states passed, just if there was at least |
85 | * one since the start of the grace period. Thus just a flag. | 105 | * one since the start of the grace period, this just sets a flag. |
86 | */ | 106 | */ |
87 | void rcu_qsctr_inc(int cpu) | 107 | void rcu_sched_qs(int cpu) |
88 | { | 108 | { |
89 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 109 | unsigned long flags; |
110 | struct rcu_data *rdp; | ||
111 | |||
112 | local_irq_save(flags); | ||
113 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
90 | rdp->passed_quiesc = 1; | 114 | rdp->passed_quiesc = 1; |
91 | rdp->passed_quiesc_completed = rdp->completed; | 115 | rdp->passed_quiesc_completed = rdp->completed; |
116 | rcu_preempt_qs(cpu); | ||
117 | local_irq_restore(flags); | ||
92 | } | 118 | } |
93 | 119 | ||
94 | void rcu_bh_qsctr_inc(int cpu) | 120 | void rcu_bh_qs(int cpu) |
95 | { | 121 | { |
96 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 122 | unsigned long flags; |
123 | struct rcu_data *rdp; | ||
124 | |||
125 | local_irq_save(flags); | ||
126 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
97 | rdp->passed_quiesc = 1; | 127 | rdp->passed_quiesc = 1; |
98 | rdp->passed_quiesc_completed = rdp->completed; | 128 | rdp->passed_quiesc_completed = rdp->completed; |
129 | local_irq_restore(flags); | ||
99 | } | 130 | } |
100 | 131 | ||
101 | #ifdef CONFIG_NO_HZ | 132 | #ifdef CONFIG_NO_HZ |
@@ -110,15 +141,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */ | |||
110 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 141 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
111 | 142 | ||
112 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 143 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
144 | static int rcu_pending(int cpu); | ||
113 | 145 | ||
114 | /* | 146 | /* |
115 | * Return the number of RCU batches processed thus far for debug & stats. | 147 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
116 | */ | 148 | */ |
117 | long rcu_batches_completed(void) | 149 | long rcu_batches_completed_sched(void) |
118 | { | 150 | { |
119 | return rcu_state.completed; | 151 | return rcu_sched_state.completed; |
120 | } | 152 | } |
121 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 153 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
122 | 154 | ||
123 | /* | 155 | /* |
124 | * Return the number of RCU BH batches processed thus far for debug & stats. | 156 | * Return the number of RCU BH batches processed thus far for debug & stats. |
@@ -181,6 +213,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
181 | return 1; | 213 | return 1; |
182 | } | 214 | } |
183 | 215 | ||
216 | /* If preemptable RCU, no point in sending reschedule IPI. */ | ||
217 | if (rdp->preemptable) | ||
218 | return 0; | ||
219 | |||
184 | /* The CPU is online, so send it a reschedule IPI. */ | 220 | /* The CPU is online, so send it a reschedule IPI. */ |
185 | if (rdp->cpu != smp_processor_id()) | 221 | if (rdp->cpu != smp_processor_id()) |
186 | smp_send_reschedule(rdp->cpu); | 222 | smp_send_reschedule(rdp->cpu); |
@@ -193,7 +229,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
193 | #endif /* #ifdef CONFIG_SMP */ | 229 | #endif /* #ifdef CONFIG_SMP */ |
194 | 230 | ||
195 | #ifdef CONFIG_NO_HZ | 231 | #ifdef CONFIG_NO_HZ |
196 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
197 | 232 | ||
198 | /** | 233 | /** |
199 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | 234 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz |
@@ -213,7 +248,7 @@ void rcu_enter_nohz(void) | |||
213 | rdtp = &__get_cpu_var(rcu_dynticks); | 248 | rdtp = &__get_cpu_var(rcu_dynticks); |
214 | rdtp->dynticks++; | 249 | rdtp->dynticks++; |
215 | rdtp->dynticks_nesting--; | 250 | rdtp->dynticks_nesting--; |
216 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 251 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
217 | local_irq_restore(flags); | 252 | local_irq_restore(flags); |
218 | } | 253 | } |
219 | 254 | ||
@@ -232,7 +267,7 @@ void rcu_exit_nohz(void) | |||
232 | rdtp = &__get_cpu_var(rcu_dynticks); | 267 | rdtp = &__get_cpu_var(rcu_dynticks); |
233 | rdtp->dynticks++; | 268 | rdtp->dynticks++; |
234 | rdtp->dynticks_nesting++; | 269 | rdtp->dynticks_nesting++; |
235 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 270 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
236 | local_irq_restore(flags); | 271 | local_irq_restore(flags); |
237 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 272 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
238 | } | 273 | } |
@@ -251,7 +286,7 @@ void rcu_nmi_enter(void) | |||
251 | if (rdtp->dynticks & 0x1) | 286 | if (rdtp->dynticks & 0x1) |
252 | return; | 287 | return; |
253 | rdtp->dynticks_nmi++; | 288 | rdtp->dynticks_nmi++; |
254 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | 289 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
255 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 290 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
256 | } | 291 | } |
257 | 292 | ||
@@ -270,7 +305,7 @@ void rcu_nmi_exit(void) | |||
270 | return; | 305 | return; |
271 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 306 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
272 | rdtp->dynticks_nmi++; | 307 | rdtp->dynticks_nmi++; |
273 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | 308 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
274 | } | 309 | } |
275 | 310 | ||
276 | /** | 311 | /** |
@@ -286,7 +321,7 @@ void rcu_irq_enter(void) | |||
286 | if (rdtp->dynticks_nesting++) | 321 | if (rdtp->dynticks_nesting++) |
287 | return; | 322 | return; |
288 | rdtp->dynticks++; | 323 | rdtp->dynticks++; |
289 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 324 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
290 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 325 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
291 | } | 326 | } |
292 | 327 | ||
@@ -305,10 +340,10 @@ void rcu_irq_exit(void) | |||
305 | return; | 340 | return; |
306 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 341 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
307 | rdtp->dynticks++; | 342 | rdtp->dynticks++; |
308 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 343 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
309 | 344 | ||
310 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 345 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
311 | if (__get_cpu_var(rcu_data).nxtlist || | 346 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
312 | __get_cpu_var(rcu_bh_data).nxtlist) | 347 | __get_cpu_var(rcu_bh_data).nxtlist) |
313 | set_need_resched(); | 348 | set_need_resched(); |
314 | } | 349 | } |
@@ -461,6 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
461 | 496 | ||
462 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 497 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
463 | for (; rnp_cur < rnp_end; rnp_cur++) { | 498 | for (; rnp_cur < rnp_end; rnp_cur++) { |
499 | rcu_print_task_stall(rnp); | ||
464 | if (rnp_cur->qsmask == 0) | 500 | if (rnp_cur->qsmask == 0) |
465 | continue; | 501 | continue; |
466 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 502 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) |
@@ -674,6 +710,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |||
674 | } | 710 | } |
675 | 711 | ||
676 | /* | 712 | /* |
713 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
714 | * the next grace period if one is needed. Note that the caller must | ||
715 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
716 | */ | ||
717 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
718 | __releases(rnp->lock) | ||
719 | { | ||
720 | rsp->completed = rsp->gpnum; | ||
721 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
722 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | ||
723 | } | ||
724 | |||
725 | /* | ||
677 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 726 | * Similar to cpu_quiet(), for which it is a helper function. Allows |
678 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 727 | * a group of CPUs to be quieted at one go, though all the CPUs in the |
679 | * group must be represented by the same leaf rcu_node structure. | 728 | * group must be represented by the same leaf rcu_node structure. |
@@ -694,7 +743,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
694 | return; | 743 | return; |
695 | } | 744 | } |
696 | rnp->qsmask &= ~mask; | 745 | rnp->qsmask &= ~mask; |
697 | if (rnp->qsmask != 0) { | 746 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
698 | 747 | ||
699 | /* Other bits still set at this level, so done. */ | 748 | /* Other bits still set at this level, so done. */ |
700 | spin_unlock_irqrestore(&rnp->lock, flags); | 749 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -714,14 +763,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
714 | 763 | ||
715 | /* | 764 | /* |
716 | * Get here if we are the last CPU to pass through a quiescent | 765 | * Get here if we are the last CPU to pass through a quiescent |
717 | * state for this grace period. Clean up and let rcu_start_gp() | 766 | * state for this grace period. Invoke cpu_quiet_msk_finish() |
718 | * start up the next grace period if one is needed. Note that | 767 | * to clean up and start the next grace period if one is needed. |
719 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
720 | * will release it. | ||
721 | */ | 768 | */ |
722 | rsp->completed = rsp->gpnum; | 769 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ |
723 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
724 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
725 | } | 770 | } |
726 | 771 | ||
727 | /* | 772 | /* |
@@ -828,11 +873,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
828 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 873 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
829 | rnp->qsmaskinit &= ~mask; | 874 | rnp->qsmaskinit &= ~mask; |
830 | if (rnp->qsmaskinit != 0) { | 875 | if (rnp->qsmaskinit != 0) { |
831 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 876 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
832 | break; | 877 | break; |
833 | } | 878 | } |
879 | rcu_preempt_offline_tasks(rsp, rnp); | ||
834 | mask = rnp->grpmask; | 880 | mask = rnp->grpmask; |
835 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 881 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
836 | rnp = rnp->parent; | 882 | rnp = rnp->parent; |
837 | } while (rnp != NULL); | 883 | } while (rnp != NULL); |
838 | lastcomp = rsp->completed; | 884 | lastcomp = rsp->completed; |
@@ -845,7 +891,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
845 | /* | 891 | /* |
846 | * Move callbacks from the outgoing CPU to the running CPU. | 892 | * Move callbacks from the outgoing CPU to the running CPU. |
847 | * Note that the outgoing CPU is now quiscent, so it is now | 893 | * Note that the outgoing CPU is now quiscent, so it is now |
848 | * (uncharacteristically) safe to access it rcu_data structure. | 894 | * (uncharacteristically) safe to access its rcu_data structure. |
849 | * Note also that we must carefully retain the order of the | 895 | * Note also that we must carefully retain the order of the |
850 | * outgoing CPU's callbacks in order for rcu_barrier() to work | 896 | * outgoing CPU's callbacks in order for rcu_barrier() to work |
851 | * correctly. Finally, note that we start all the callbacks | 897 | * correctly. Finally, note that we start all the callbacks |
@@ -876,8 +922,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
876 | */ | 922 | */ |
877 | static void rcu_offline_cpu(int cpu) | 923 | static void rcu_offline_cpu(int cpu) |
878 | { | 924 | { |
879 | __rcu_offline_cpu(cpu, &rcu_state); | 925 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
880 | __rcu_offline_cpu(cpu, &rcu_bh_state); | 926 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
927 | rcu_preempt_offline_cpu(cpu); | ||
881 | } | 928 | } |
882 | 929 | ||
883 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 930 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -963,6 +1010,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
963 | */ | 1010 | */ |
964 | void rcu_check_callbacks(int cpu, int user) | 1011 | void rcu_check_callbacks(int cpu, int user) |
965 | { | 1012 | { |
1013 | if (!rcu_pending(cpu)) | ||
1014 | return; /* if nothing for RCU to do. */ | ||
966 | if (user || | 1015 | if (user || |
967 | (idle_cpu(cpu) && rcu_scheduler_active && | 1016 | (idle_cpu(cpu) && rcu_scheduler_active && |
968 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1017 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -971,17 +1020,16 @@ void rcu_check_callbacks(int cpu, int user) | |||
971 | * Get here if this CPU took its interrupt from user | 1020 | * Get here if this CPU took its interrupt from user |
972 | * mode or from the idle loop, and if this is not a | 1021 | * mode or from the idle loop, and if this is not a |
973 | * nested interrupt. In this case, the CPU is in | 1022 | * nested interrupt. In this case, the CPU is in |
974 | * a quiescent state, so count it. | 1023 | * a quiescent state, so note it. |
975 | * | 1024 | * |
976 | * No memory barrier is required here because both | 1025 | * No memory barrier is required here because both |
977 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | 1026 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
978 | * only CPU-local variables that other CPUs neither | 1027 | * variables that other CPUs neither access nor modify, |
979 | * access nor modify, at least not while the corresponding | 1028 | * at least not while the corresponding CPU is online. |
980 | * CPU is online. | ||
981 | */ | 1029 | */ |
982 | 1030 | ||
983 | rcu_qsctr_inc(cpu); | 1031 | rcu_sched_qs(cpu); |
984 | rcu_bh_qsctr_inc(cpu); | 1032 | rcu_bh_qs(cpu); |
985 | 1033 | ||
986 | } else if (!in_softirq()) { | 1034 | } else if (!in_softirq()) { |
987 | 1035 | ||
@@ -989,11 +1037,12 @@ void rcu_check_callbacks(int cpu, int user) | |||
989 | * Get here if this CPU did not take its interrupt from | 1037 | * Get here if this CPU did not take its interrupt from |
990 | * softirq, in other words, if it is not interrupting | 1038 | * softirq, in other words, if it is not interrupting |
991 | * a rcu_bh read-side critical section. This is an _bh | 1039 | * a rcu_bh read-side critical section. This is an _bh |
992 | * critical section, so count it. | 1040 | * critical section, so note it. |
993 | */ | 1041 | */ |
994 | 1042 | ||
995 | rcu_bh_qsctr_inc(cpu); | 1043 | rcu_bh_qs(cpu); |
996 | } | 1044 | } |
1045 | rcu_preempt_check_callbacks(cpu); | ||
997 | raise_softirq(RCU_SOFTIRQ); | 1046 | raise_softirq(RCU_SOFTIRQ); |
998 | } | 1047 | } |
999 | 1048 | ||
@@ -1132,6 +1181,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1132 | { | 1181 | { |
1133 | unsigned long flags; | 1182 | unsigned long flags; |
1134 | 1183 | ||
1184 | WARN_ON_ONCE(rdp->beenonline == 0); | ||
1185 | |||
1135 | /* | 1186 | /* |
1136 | * If an RCU GP has gone long enough, go check for dyntick | 1187 | * If an RCU GP has gone long enough, go check for dyntick |
1137 | * idle CPUs and, if needed, send resched IPIs. | 1188 | * idle CPUs and, if needed, send resched IPIs. |
@@ -1170,8 +1221,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1170 | */ | 1221 | */ |
1171 | smp_mb(); /* See above block comment. */ | 1222 | smp_mb(); /* See above block comment. */ |
1172 | 1223 | ||
1173 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | 1224 | __rcu_process_callbacks(&rcu_sched_state, |
1225 | &__get_cpu_var(rcu_sched_data)); | ||
1174 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1226 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
1227 | rcu_preempt_process_callbacks(); | ||
1175 | 1228 | ||
1176 | /* | 1229 | /* |
1177 | * Memory references from any later RCU read-side critical sections | 1230 | * Memory references from any later RCU read-side critical sections |
@@ -1227,13 +1280,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1227 | } | 1280 | } |
1228 | 1281 | ||
1229 | /* | 1282 | /* |
1230 | * Queue an RCU callback for invocation after a grace period. | 1283 | * Queue an RCU-sched callback for invocation after a grace period. |
1231 | */ | 1284 | */ |
1232 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 1285 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
1233 | { | 1286 | { |
1234 | __call_rcu(head, func, &rcu_state); | 1287 | __call_rcu(head, func, &rcu_sched_state); |
1235 | } | 1288 | } |
1236 | EXPORT_SYMBOL_GPL(call_rcu); | 1289 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
1237 | 1290 | ||
1238 | /* | 1291 | /* |
1239 | * Queue an RCU for invocation after a quicker grace period. | 1292 | * Queue an RCU for invocation after a quicker grace period. |
@@ -1305,10 +1358,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1305 | * by the current CPU, returning 1 if so. This function is part of the | 1358 | * by the current CPU, returning 1 if so. This function is part of the |
1306 | * RCU implementation; it is -not- an exported member of the RCU API. | 1359 | * RCU implementation; it is -not- an exported member of the RCU API. |
1307 | */ | 1360 | */ |
1308 | int rcu_pending(int cpu) | 1361 | static int rcu_pending(int cpu) |
1309 | { | 1362 | { |
1310 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | 1363 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
1311 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | 1364 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
1365 | rcu_preempt_pending(cpu); | ||
1312 | } | 1366 | } |
1313 | 1367 | ||
1314 | /* | 1368 | /* |
@@ -1320,27 +1374,46 @@ int rcu_pending(int cpu) | |||
1320 | int rcu_needs_cpu(int cpu) | 1374 | int rcu_needs_cpu(int cpu) |
1321 | { | 1375 | { |
1322 | /* RCU callbacks either ready or pending? */ | 1376 | /* RCU callbacks either ready or pending? */ |
1323 | return per_cpu(rcu_data, cpu).nxtlist || | 1377 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
1324 | per_cpu(rcu_bh_data, cpu).nxtlist; | 1378 | per_cpu(rcu_bh_data, cpu).nxtlist || |
1379 | rcu_preempt_needs_cpu(cpu); | ||
1325 | } | 1380 | } |
1326 | 1381 | ||
1327 | /* | 1382 | /* |
1328 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | 1383 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
1329 | * approach so that we don't have to worry about how long the CPU has | ||
1330 | * been gone, or whether it ever was online previously. We do trust the | ||
1331 | * ->mynode field, as it is constant for a given struct rcu_data and | ||
1332 | * initialized during early boot. | ||
1333 | * | ||
1334 | * Note that only one online or offline event can be happening at a given | ||
1335 | * time. Note also that we can accept some slop in the rsp->completed | ||
1336 | * access due to the fact that this CPU cannot possibly have any RCU | ||
1337 | * callbacks in flight yet. | ||
1338 | */ | 1384 | */ |
1339 | static void __cpuinit | 1385 | static void __init |
1340 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1386 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
1341 | { | 1387 | { |
1342 | unsigned long flags; | 1388 | unsigned long flags; |
1343 | int i; | 1389 | int i; |
1390 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
1391 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1392 | |||
1393 | /* Set up local state, ensuring consistent view of global state. */ | ||
1394 | spin_lock_irqsave(&rnp->lock, flags); | ||
1395 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1396 | rdp->nxtlist = NULL; | ||
1397 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1398 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1399 | rdp->qlen = 0; | ||
1400 | #ifdef CONFIG_NO_HZ | ||
1401 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1402 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1403 | rdp->cpu = cpu; | ||
1404 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
1405 | } | ||
1406 | |||
1407 | /* | ||
1408 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | ||
1409 | * offline event can be happening at a given time. Note also that we | ||
1410 | * can accept some slop in the rsp->completed access due to the fact | ||
1411 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | ||
1412 | */ | ||
1413 | static void __cpuinit | ||
1414 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
1415 | { | ||
1416 | unsigned long flags; | ||
1344 | long lastcomp; | 1417 | long lastcomp; |
1345 | unsigned long mask; | 1418 | unsigned long mask; |
1346 | struct rcu_data *rdp = rsp->rda[cpu]; | 1419 | struct rcu_data *rdp = rsp->rda[cpu]; |
@@ -1354,17 +1427,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1354 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1427 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1355 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1428 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1356 | rdp->beenonline = 1; /* We have now been online. */ | 1429 | rdp->beenonline = 1; /* We have now been online. */ |
1430 | rdp->preemptable = preemptable; | ||
1357 | rdp->passed_quiesc_completed = lastcomp - 1; | 1431 | rdp->passed_quiesc_completed = lastcomp - 1; |
1358 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1359 | rdp->nxtlist = NULL; | ||
1360 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1361 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1362 | rdp->qlen = 0; | ||
1363 | rdp->blimit = blimit; | 1432 | rdp->blimit = blimit; |
1364 | #ifdef CONFIG_NO_HZ | ||
1365 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1366 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1367 | rdp->cpu = cpu; | ||
1368 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1433 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1369 | 1434 | ||
1370 | /* | 1435 | /* |
@@ -1405,16 +1470,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1405 | 1470 | ||
1406 | static void __cpuinit rcu_online_cpu(int cpu) | 1471 | static void __cpuinit rcu_online_cpu(int cpu) |
1407 | { | 1472 | { |
1408 | rcu_init_percpu_data(cpu, &rcu_state); | 1473 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
1409 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1474 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
1410 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1475 | rcu_preempt_init_percpu_data(cpu); |
1411 | } | 1476 | } |
1412 | 1477 | ||
1413 | /* | 1478 | /* |
1414 | * Handle CPU online/offline notifcation events. | 1479 | * Handle CPU online/offline notification events. |
1415 | */ | 1480 | */ |
1416 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1481 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1417 | unsigned long action, void *hcpu) | 1482 | unsigned long action, void *hcpu) |
1418 | { | 1483 | { |
1419 | long cpu = (long)hcpu; | 1484 | long cpu = (long)hcpu; |
1420 | 1485 | ||
@@ -1486,6 +1551,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1486 | rnp = rsp->level[i]; | 1551 | rnp = rsp->level[i]; |
1487 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1552 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1488 | spin_lock_init(&rnp->lock); | 1553 | spin_lock_init(&rnp->lock); |
1554 | rnp->gpnum = 0; | ||
1489 | rnp->qsmask = 0; | 1555 | rnp->qsmask = 0; |
1490 | rnp->qsmaskinit = 0; | 1556 | rnp->qsmaskinit = 0; |
1491 | rnp->grplo = j * cpustride; | 1557 | rnp->grplo = j * cpustride; |
@@ -1503,16 +1569,20 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1503 | j / rsp->levelspread[i - 1]; | 1569 | j / rsp->levelspread[i - 1]; |
1504 | } | 1570 | } |
1505 | rnp->level = i; | 1571 | rnp->level = i; |
1572 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | ||
1573 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1506 | } | 1574 | } |
1507 | } | 1575 | } |
1508 | } | 1576 | } |
1509 | 1577 | ||
1510 | /* | 1578 | /* |
1511 | * Helper macro for __rcu_init(). To be used nowhere else! | 1579 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
1512 | * Assigns leaf node pointers into each CPU's rcu_data structure. | 1580 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data |
1581 | * structure. | ||
1513 | */ | 1582 | */ |
1514 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | 1583 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1515 | do { \ | 1584 | do { \ |
1585 | rcu_init_one(rsp); \ | ||
1516 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1586 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1517 | j = 0; \ | 1587 | j = 0; \ |
1518 | for_each_possible_cpu(i) { \ | 1588 | for_each_possible_cpu(i) { \ |
@@ -1520,32 +1590,43 @@ do { \ | |||
1520 | j++; \ | 1590 | j++; \ |
1521 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | 1591 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ |
1522 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1592 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
1593 | rcu_boot_init_percpu_data(i, rsp); \ | ||
1523 | } \ | 1594 | } \ |
1524 | } while (0) | 1595 | } while (0) |
1525 | 1596 | ||
1526 | static struct notifier_block __cpuinitdata rcu_nb = { | 1597 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1527 | .notifier_call = rcu_cpu_notify, | 1598 | |
1528 | }; | 1599 | void __init __rcu_init_preempt(void) |
1600 | { | ||
1601 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
1602 | int j; | ||
1603 | struct rcu_node *rnp; | ||
1604 | |||
1605 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
1606 | } | ||
1607 | |||
1608 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1609 | |||
1610 | void __init __rcu_init_preempt(void) | ||
1611 | { | ||
1612 | } | ||
1613 | |||
1614 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1529 | 1615 | ||
1530 | void __init __rcu_init(void) | 1616 | void __init __rcu_init(void) |
1531 | { | 1617 | { |
1532 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | 1618 | int i; /* All used by RCU_INIT_FLAVOR(). */ |
1533 | int j; | 1619 | int j; |
1534 | struct rcu_node *rnp; | 1620 | struct rcu_node *rnp; |
1535 | 1621 | ||
1536 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 1622 | rcu_bootup_announce(); |
1537 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1623 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1538 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1624 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1539 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1625 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1540 | rcu_init_one(&rcu_state); | 1626 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1541 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | 1627 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1542 | rcu_init_one(&rcu_bh_state); | 1628 | __rcu_init_preempt(); |
1543 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | 1629 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1544 | |||
1545 | for_each_online_cpu(i) | ||
1546 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | ||
1547 | /* Register notifier for non-boot CPUs */ | ||
1548 | register_cpu_notifier(&rcu_nb); | ||
1549 | } | 1630 | } |
1550 | 1631 | ||
1551 | module_param(blimit, int, 0); | 1632 | module_param(blimit, int, 0); |