diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
commit | eee2775d9924b22643bd89b2e568cc5eed7e8a04 (patch) | |
tree | 095ad7851895c5d39596f3ff7ee1e078235a2501 /kernel/rcutree.c | |
parent | 53e16fbd30005905168d9b75555fdc7e0a2eac58 (diff) | |
parent | 7db905e636f08ea5bc9825c1f73d77802e8ccad5 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
rcu: Move end of special early-boot RCU operation earlier
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
rcu: Remove lockdep annotations from RCU's _notrace() API members
rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
rcu: Add CPU-offline processing for single-node configurations
rcu: Add "notrace" to RCU function headers used by ftrace
rcu: Remove CONFIG_PREEMPT_RCU
rcu: Merge preemptable-RCU functionality into hierarchical RCU
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
rcu: Use debugfs_remove_recursive() simplify code.
rcu: Merge per-RCU-flavor initialization into pre-existing macro
rcu: Fix online/offline indication for rcudata.csv trace file
rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
rcu: Renamings to increase RCU clarity
rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
rcu: Fix typo in rcu_irq_exit() comment header
rcu: Make rcupreempt_trace.c look at offline CPUs
...
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 273 |
1 files changed, 177 insertions, 96 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 9c5fa9fc57ec..6b11b07cfe7f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | 49 | ||
50 | #include "rcutree.h" | ||
51 | |||
50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
51 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
52 | struct lockdep_map rcu_lock_map = | 54 | struct lockdep_map rcu_lock_map = |
@@ -73,30 +75,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
73 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
74 | } | 76 | } |
75 | 77 | ||
76 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | 78 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
77 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
78 | 80 | ||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 83 | ||
84 | extern long rcu_batches_completed_sched(void); | ||
85 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
86 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | ||
87 | struct rcu_node *rnp, unsigned long flags); | ||
88 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | ||
89 | #ifdef CONFIG_HOTPLUG_CPU | ||
90 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); | ||
91 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
92 | static void __rcu_process_callbacks(struct rcu_state *rsp, | ||
93 | struct rcu_data *rdp); | ||
94 | static void __call_rcu(struct rcu_head *head, | ||
95 | void (*func)(struct rcu_head *rcu), | ||
96 | struct rcu_state *rsp); | ||
97 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); | ||
98 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | ||
99 | int preemptable); | ||
100 | |||
101 | #include "rcutree_plugin.h" | ||
102 | |||
82 | /* | 103 | /* |
83 | * Increment the quiescent state counter. | 104 | * Note a quiescent state. Because we do not need to know |
84 | * The counter is a bit degenerated: We do not need to know | ||
85 | * how many quiescent states passed, just if there was at least | 105 | * how many quiescent states passed, just if there was at least |
86 | * one since the start of the grace period. Thus just a flag. | 106 | * one since the start of the grace period, this just sets a flag. |
87 | */ | 107 | */ |
88 | void rcu_qsctr_inc(int cpu) | 108 | void rcu_sched_qs(int cpu) |
89 | { | 109 | { |
90 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 110 | unsigned long flags; |
111 | struct rcu_data *rdp; | ||
112 | |||
113 | local_irq_save(flags); | ||
114 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
91 | rdp->passed_quiesc = 1; | 115 | rdp->passed_quiesc = 1; |
92 | rdp->passed_quiesc_completed = rdp->completed; | 116 | rdp->passed_quiesc_completed = rdp->completed; |
117 | rcu_preempt_qs(cpu); | ||
118 | local_irq_restore(flags); | ||
93 | } | 119 | } |
94 | 120 | ||
95 | void rcu_bh_qsctr_inc(int cpu) | 121 | void rcu_bh_qs(int cpu) |
96 | { | 122 | { |
97 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 123 | unsigned long flags; |
124 | struct rcu_data *rdp; | ||
125 | |||
126 | local_irq_save(flags); | ||
127 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
98 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; |
99 | rdp->passed_quiesc_completed = rdp->completed; | 129 | rdp->passed_quiesc_completed = rdp->completed; |
130 | local_irq_restore(flags); | ||
100 | } | 131 | } |
101 | 132 | ||
102 | #ifdef CONFIG_NO_HZ | 133 | #ifdef CONFIG_NO_HZ |
@@ -111,15 +142,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */ | |||
111 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 142 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
112 | 143 | ||
113 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 144 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
145 | static int rcu_pending(int cpu); | ||
114 | 146 | ||
115 | /* | 147 | /* |
116 | * Return the number of RCU batches processed thus far for debug & stats. | 148 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
117 | */ | 149 | */ |
118 | long rcu_batches_completed(void) | 150 | long rcu_batches_completed_sched(void) |
119 | { | 151 | { |
120 | return rcu_state.completed; | 152 | return rcu_sched_state.completed; |
121 | } | 153 | } |
122 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 154 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
123 | 155 | ||
124 | /* | 156 | /* |
125 | * Return the number of RCU BH batches processed thus far for debug & stats. | 157 | * Return the number of RCU BH batches processed thus far for debug & stats. |
@@ -182,6 +214,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
182 | return 1; | 214 | return 1; |
183 | } | 215 | } |
184 | 216 | ||
217 | /* If preemptable RCU, no point in sending reschedule IPI. */ | ||
218 | if (rdp->preemptable) | ||
219 | return 0; | ||
220 | |||
185 | /* The CPU is online, so send it a reschedule IPI. */ | 221 | /* The CPU is online, so send it a reschedule IPI. */ |
186 | if (rdp->cpu != smp_processor_id()) | 222 | if (rdp->cpu != smp_processor_id()) |
187 | smp_send_reschedule(rdp->cpu); | 223 | smp_send_reschedule(rdp->cpu); |
@@ -194,7 +230,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
194 | #endif /* #ifdef CONFIG_SMP */ | 230 | #endif /* #ifdef CONFIG_SMP */ |
195 | 231 | ||
196 | #ifdef CONFIG_NO_HZ | 232 | #ifdef CONFIG_NO_HZ |
197 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
198 | 233 | ||
199 | /** | 234 | /** |
200 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | 235 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz |
@@ -214,7 +249,7 @@ void rcu_enter_nohz(void) | |||
214 | rdtp = &__get_cpu_var(rcu_dynticks); | 249 | rdtp = &__get_cpu_var(rcu_dynticks); |
215 | rdtp->dynticks++; | 250 | rdtp->dynticks++; |
216 | rdtp->dynticks_nesting--; | 251 | rdtp->dynticks_nesting--; |
217 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 252 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
218 | local_irq_restore(flags); | 253 | local_irq_restore(flags); |
219 | } | 254 | } |
220 | 255 | ||
@@ -233,7 +268,7 @@ void rcu_exit_nohz(void) | |||
233 | rdtp = &__get_cpu_var(rcu_dynticks); | 268 | rdtp = &__get_cpu_var(rcu_dynticks); |
234 | rdtp->dynticks++; | 269 | rdtp->dynticks++; |
235 | rdtp->dynticks_nesting++; | 270 | rdtp->dynticks_nesting++; |
236 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 271 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
237 | local_irq_restore(flags); | 272 | local_irq_restore(flags); |
238 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 273 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
239 | } | 274 | } |
@@ -252,7 +287,7 @@ void rcu_nmi_enter(void) | |||
252 | if (rdtp->dynticks & 0x1) | 287 | if (rdtp->dynticks & 0x1) |
253 | return; | 288 | return; |
254 | rdtp->dynticks_nmi++; | 289 | rdtp->dynticks_nmi++; |
255 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | 290 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
256 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 291 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
257 | } | 292 | } |
258 | 293 | ||
@@ -271,7 +306,7 @@ void rcu_nmi_exit(void) | |||
271 | return; | 306 | return; |
272 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 307 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
273 | rdtp->dynticks_nmi++; | 308 | rdtp->dynticks_nmi++; |
274 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | 309 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
275 | } | 310 | } |
276 | 311 | ||
277 | /** | 312 | /** |
@@ -287,7 +322,7 @@ void rcu_irq_enter(void) | |||
287 | if (rdtp->dynticks_nesting++) | 322 | if (rdtp->dynticks_nesting++) |
288 | return; | 323 | return; |
289 | rdtp->dynticks++; | 324 | rdtp->dynticks++; |
290 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 325 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
291 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 326 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
292 | } | 327 | } |
293 | 328 | ||
@@ -306,10 +341,10 @@ void rcu_irq_exit(void) | |||
306 | return; | 341 | return; |
307 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 342 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
308 | rdtp->dynticks++; | 343 | rdtp->dynticks++; |
309 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 344 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
310 | 345 | ||
311 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 346 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
312 | if (__get_cpu_var(rcu_data).nxtlist || | 347 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
313 | __get_cpu_var(rcu_bh_data).nxtlist) | 348 | __get_cpu_var(rcu_bh_data).nxtlist) |
314 | set_need_resched(); | 349 | set_need_resched(); |
315 | } | 350 | } |
@@ -462,6 +497,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
462 | 497 | ||
463 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 498 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
464 | for (; rnp_cur < rnp_end; rnp_cur++) { | 499 | for (; rnp_cur < rnp_end; rnp_cur++) { |
500 | rcu_print_task_stall(rnp); | ||
465 | if (rnp_cur->qsmask == 0) | 501 | if (rnp_cur->qsmask == 0) |
466 | continue; | 502 | continue; |
467 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 503 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) |
@@ -679,6 +715,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |||
679 | } | 715 | } |
680 | 716 | ||
681 | /* | 717 | /* |
718 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
719 | * the next grace period if one is needed. Note that the caller must | ||
720 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
721 | */ | ||
722 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
723 | __releases(rnp->lock) | ||
724 | { | ||
725 | rsp->completed = rsp->gpnum; | ||
726 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
727 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | ||
728 | } | ||
729 | |||
730 | /* | ||
682 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 731 | * Similar to cpu_quiet(), for which it is a helper function. Allows |
683 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 732 | * a group of CPUs to be quieted at one go, though all the CPUs in the |
684 | * group must be represented by the same leaf rcu_node structure. | 733 | * group must be represented by the same leaf rcu_node structure. |
@@ -699,7 +748,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
699 | return; | 748 | return; |
700 | } | 749 | } |
701 | rnp->qsmask &= ~mask; | 750 | rnp->qsmask &= ~mask; |
702 | if (rnp->qsmask != 0) { | 751 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
703 | 752 | ||
704 | /* Other bits still set at this level, so done. */ | 753 | /* Other bits still set at this level, so done. */ |
705 | spin_unlock_irqrestore(&rnp->lock, flags); | 754 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -719,14 +768,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
719 | 768 | ||
720 | /* | 769 | /* |
721 | * Get here if we are the last CPU to pass through a quiescent | 770 | * Get here if we are the last CPU to pass through a quiescent |
722 | * state for this grace period. Clean up and let rcu_start_gp() | 771 | * state for this grace period. Invoke cpu_quiet_msk_finish() |
723 | * start up the next grace period if one is needed. Note that | 772 | * to clean up and start the next grace period if one is needed. |
724 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
725 | * will release it. | ||
726 | */ | 773 | */ |
727 | rsp->completed = rsp->gpnum; | 774 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ |
728 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
729 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
730 | } | 775 | } |
731 | 776 | ||
732 | /* | 777 | /* |
@@ -833,11 +878,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
833 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 878 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
834 | rnp->qsmaskinit &= ~mask; | 879 | rnp->qsmaskinit &= ~mask; |
835 | if (rnp->qsmaskinit != 0) { | 880 | if (rnp->qsmaskinit != 0) { |
836 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 881 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
837 | break; | 882 | break; |
838 | } | 883 | } |
884 | rcu_preempt_offline_tasks(rsp, rnp); | ||
839 | mask = rnp->grpmask; | 885 | mask = rnp->grpmask; |
840 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 886 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
841 | rnp = rnp->parent; | 887 | rnp = rnp->parent; |
842 | } while (rnp != NULL); | 888 | } while (rnp != NULL); |
843 | lastcomp = rsp->completed; | 889 | lastcomp = rsp->completed; |
@@ -850,7 +896,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
850 | /* | 896 | /* |
851 | * Move callbacks from the outgoing CPU to the running CPU. | 897 | * Move callbacks from the outgoing CPU to the running CPU. |
852 | * Note that the outgoing CPU is now quiscent, so it is now | 898 | * Note that the outgoing CPU is now quiscent, so it is now |
853 | * (uncharacteristically) safe to access it rcu_data structure. | 899 | * (uncharacteristically) safe to access its rcu_data structure. |
854 | * Note also that we must carefully retain the order of the | 900 | * Note also that we must carefully retain the order of the |
855 | * outgoing CPU's callbacks in order for rcu_barrier() to work | 901 | * outgoing CPU's callbacks in order for rcu_barrier() to work |
856 | * correctly. Finally, note that we start all the callbacks | 902 | * correctly. Finally, note that we start all the callbacks |
@@ -881,8 +927,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
881 | */ | 927 | */ |
882 | static void rcu_offline_cpu(int cpu) | 928 | static void rcu_offline_cpu(int cpu) |
883 | { | 929 | { |
884 | __rcu_offline_cpu(cpu, &rcu_state); | 930 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
885 | __rcu_offline_cpu(cpu, &rcu_bh_state); | 931 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
932 | rcu_preempt_offline_cpu(cpu); | ||
886 | } | 933 | } |
887 | 934 | ||
888 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 935 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -968,6 +1015,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
968 | */ | 1015 | */ |
969 | void rcu_check_callbacks(int cpu, int user) | 1016 | void rcu_check_callbacks(int cpu, int user) |
970 | { | 1017 | { |
1018 | if (!rcu_pending(cpu)) | ||
1019 | return; /* if nothing for RCU to do. */ | ||
971 | if (user || | 1020 | if (user || |
972 | (idle_cpu(cpu) && rcu_scheduler_active && | 1021 | (idle_cpu(cpu) && rcu_scheduler_active && |
973 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1022 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -976,17 +1025,16 @@ void rcu_check_callbacks(int cpu, int user) | |||
976 | * Get here if this CPU took its interrupt from user | 1025 | * Get here if this CPU took its interrupt from user |
977 | * mode or from the idle loop, and if this is not a | 1026 | * mode or from the idle loop, and if this is not a |
978 | * nested interrupt. In this case, the CPU is in | 1027 | * nested interrupt. In this case, the CPU is in |
979 | * a quiescent state, so count it. | 1028 | * a quiescent state, so note it. |
980 | * | 1029 | * |
981 | * No memory barrier is required here because both | 1030 | * No memory barrier is required here because both |
982 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | 1031 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
983 | * only CPU-local variables that other CPUs neither | 1032 | * variables that other CPUs neither access nor modify, |
984 | * access nor modify, at least not while the corresponding | 1033 | * at least not while the corresponding CPU is online. |
985 | * CPU is online. | ||
986 | */ | 1034 | */ |
987 | 1035 | ||
988 | rcu_qsctr_inc(cpu); | 1036 | rcu_sched_qs(cpu); |
989 | rcu_bh_qsctr_inc(cpu); | 1037 | rcu_bh_qs(cpu); |
990 | 1038 | ||
991 | } else if (!in_softirq()) { | 1039 | } else if (!in_softirq()) { |
992 | 1040 | ||
@@ -994,11 +1042,12 @@ void rcu_check_callbacks(int cpu, int user) | |||
994 | * Get here if this CPU did not take its interrupt from | 1042 | * Get here if this CPU did not take its interrupt from |
995 | * softirq, in other words, if it is not interrupting | 1043 | * softirq, in other words, if it is not interrupting |
996 | * a rcu_bh read-side critical section. This is an _bh | 1044 | * a rcu_bh read-side critical section. This is an _bh |
997 | * critical section, so count it. | 1045 | * critical section, so note it. |
998 | */ | 1046 | */ |
999 | 1047 | ||
1000 | rcu_bh_qsctr_inc(cpu); | 1048 | rcu_bh_qs(cpu); |
1001 | } | 1049 | } |
1050 | rcu_preempt_check_callbacks(cpu); | ||
1002 | raise_softirq(RCU_SOFTIRQ); | 1051 | raise_softirq(RCU_SOFTIRQ); |
1003 | } | 1052 | } |
1004 | 1053 | ||
@@ -1137,6 +1186,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1137 | { | 1186 | { |
1138 | unsigned long flags; | 1187 | unsigned long flags; |
1139 | 1188 | ||
1189 | WARN_ON_ONCE(rdp->beenonline == 0); | ||
1190 | |||
1140 | /* | 1191 | /* |
1141 | * If an RCU GP has gone long enough, go check for dyntick | 1192 | * If an RCU GP has gone long enough, go check for dyntick |
1142 | * idle CPUs and, if needed, send resched IPIs. | 1193 | * idle CPUs and, if needed, send resched IPIs. |
@@ -1175,8 +1226,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1175 | */ | 1226 | */ |
1176 | smp_mb(); /* See above block comment. */ | 1227 | smp_mb(); /* See above block comment. */ |
1177 | 1228 | ||
1178 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | 1229 | __rcu_process_callbacks(&rcu_sched_state, |
1230 | &__get_cpu_var(rcu_sched_data)); | ||
1179 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1231 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
1232 | rcu_preempt_process_callbacks(); | ||
1180 | 1233 | ||
1181 | /* | 1234 | /* |
1182 | * Memory references from any later RCU read-side critical sections | 1235 | * Memory references from any later RCU read-side critical sections |
@@ -1232,13 +1285,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1232 | } | 1285 | } |
1233 | 1286 | ||
1234 | /* | 1287 | /* |
1235 | * Queue an RCU callback for invocation after a grace period. | 1288 | * Queue an RCU-sched callback for invocation after a grace period. |
1236 | */ | 1289 | */ |
1237 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 1290 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
1238 | { | 1291 | { |
1239 | __call_rcu(head, func, &rcu_state); | 1292 | __call_rcu(head, func, &rcu_sched_state); |
1240 | } | 1293 | } |
1241 | EXPORT_SYMBOL_GPL(call_rcu); | 1294 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
1242 | 1295 | ||
1243 | /* | 1296 | /* |
1244 | * Queue an RCU for invocation after a quicker grace period. | 1297 | * Queue an RCU for invocation after a quicker grace period. |
@@ -1310,10 +1363,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1310 | * by the current CPU, returning 1 if so. This function is part of the | 1363 | * by the current CPU, returning 1 if so. This function is part of the |
1311 | * RCU implementation; it is -not- an exported member of the RCU API. | 1364 | * RCU implementation; it is -not- an exported member of the RCU API. |
1312 | */ | 1365 | */ |
1313 | int rcu_pending(int cpu) | 1366 | static int rcu_pending(int cpu) |
1314 | { | 1367 | { |
1315 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | 1368 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
1316 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | 1369 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
1370 | rcu_preempt_pending(cpu); | ||
1317 | } | 1371 | } |
1318 | 1372 | ||
1319 | /* | 1373 | /* |
@@ -1325,27 +1379,46 @@ int rcu_pending(int cpu) | |||
1325 | int rcu_needs_cpu(int cpu) | 1379 | int rcu_needs_cpu(int cpu) |
1326 | { | 1380 | { |
1327 | /* RCU callbacks either ready or pending? */ | 1381 | /* RCU callbacks either ready or pending? */ |
1328 | return per_cpu(rcu_data, cpu).nxtlist || | 1382 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
1329 | per_cpu(rcu_bh_data, cpu).nxtlist; | 1383 | per_cpu(rcu_bh_data, cpu).nxtlist || |
1384 | rcu_preempt_needs_cpu(cpu); | ||
1330 | } | 1385 | } |
1331 | 1386 | ||
1332 | /* | 1387 | /* |
1333 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | 1388 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
1334 | * approach so that we don't have to worry about how long the CPU has | ||
1335 | * been gone, or whether it ever was online previously. We do trust the | ||
1336 | * ->mynode field, as it is constant for a given struct rcu_data and | ||
1337 | * initialized during early boot. | ||
1338 | * | ||
1339 | * Note that only one online or offline event can be happening at a given | ||
1340 | * time. Note also that we can accept some slop in the rsp->completed | ||
1341 | * access due to the fact that this CPU cannot possibly have any RCU | ||
1342 | * callbacks in flight yet. | ||
1343 | */ | 1389 | */ |
1344 | static void __cpuinit | 1390 | static void __init |
1345 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1391 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
1346 | { | 1392 | { |
1347 | unsigned long flags; | 1393 | unsigned long flags; |
1348 | int i; | 1394 | int i; |
1395 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
1396 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1397 | |||
1398 | /* Set up local state, ensuring consistent view of global state. */ | ||
1399 | spin_lock_irqsave(&rnp->lock, flags); | ||
1400 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1401 | rdp->nxtlist = NULL; | ||
1402 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1403 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1404 | rdp->qlen = 0; | ||
1405 | #ifdef CONFIG_NO_HZ | ||
1406 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1407 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1408 | rdp->cpu = cpu; | ||
1409 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
1410 | } | ||
1411 | |||
1412 | /* | ||
1413 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | ||
1414 | * offline event can be happening at a given time. Note also that we | ||
1415 | * can accept some slop in the rsp->completed access due to the fact | ||
1416 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | ||
1417 | */ | ||
1418 | static void __cpuinit | ||
1419 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
1420 | { | ||
1421 | unsigned long flags; | ||
1349 | long lastcomp; | 1422 | long lastcomp; |
1350 | unsigned long mask; | 1423 | unsigned long mask; |
1351 | struct rcu_data *rdp = rsp->rda[cpu]; | 1424 | struct rcu_data *rdp = rsp->rda[cpu]; |
@@ -1359,17 +1432,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1359 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1432 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1360 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1433 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1361 | rdp->beenonline = 1; /* We have now been online. */ | 1434 | rdp->beenonline = 1; /* We have now been online. */ |
1435 | rdp->preemptable = preemptable; | ||
1362 | rdp->passed_quiesc_completed = lastcomp - 1; | 1436 | rdp->passed_quiesc_completed = lastcomp - 1; |
1363 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1364 | rdp->nxtlist = NULL; | ||
1365 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1366 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1367 | rdp->qlen = 0; | ||
1368 | rdp->blimit = blimit; | 1437 | rdp->blimit = blimit; |
1369 | #ifdef CONFIG_NO_HZ | ||
1370 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1371 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1372 | rdp->cpu = cpu; | ||
1373 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1438 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1374 | 1439 | ||
1375 | /* | 1440 | /* |
@@ -1410,16 +1475,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1410 | 1475 | ||
1411 | static void __cpuinit rcu_online_cpu(int cpu) | 1476 | static void __cpuinit rcu_online_cpu(int cpu) |
1412 | { | 1477 | { |
1413 | rcu_init_percpu_data(cpu, &rcu_state); | 1478 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
1414 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1479 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
1415 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1480 | rcu_preempt_init_percpu_data(cpu); |
1416 | } | 1481 | } |
1417 | 1482 | ||
1418 | /* | 1483 | /* |
1419 | * Handle CPU online/offline notifcation events. | 1484 | * Handle CPU online/offline notification events. |
1420 | */ | 1485 | */ |
1421 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1486 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1422 | unsigned long action, void *hcpu) | 1487 | unsigned long action, void *hcpu) |
1423 | { | 1488 | { |
1424 | long cpu = (long)hcpu; | 1489 | long cpu = (long)hcpu; |
1425 | 1490 | ||
@@ -1491,6 +1556,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1491 | rnp = rsp->level[i]; | 1556 | rnp = rsp->level[i]; |
1492 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1557 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1493 | spin_lock_init(&rnp->lock); | 1558 | spin_lock_init(&rnp->lock); |
1559 | rnp->gpnum = 0; | ||
1494 | rnp->qsmask = 0; | 1560 | rnp->qsmask = 0; |
1495 | rnp->qsmaskinit = 0; | 1561 | rnp->qsmaskinit = 0; |
1496 | rnp->grplo = j * cpustride; | 1562 | rnp->grplo = j * cpustride; |
@@ -1508,16 +1574,20 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1508 | j / rsp->levelspread[i - 1]; | 1574 | j / rsp->levelspread[i - 1]; |
1509 | } | 1575 | } |
1510 | rnp->level = i; | 1576 | rnp->level = i; |
1577 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | ||
1578 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1511 | } | 1579 | } |
1512 | } | 1580 | } |
1513 | } | 1581 | } |
1514 | 1582 | ||
1515 | /* | 1583 | /* |
1516 | * Helper macro for __rcu_init(). To be used nowhere else! | 1584 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
1517 | * Assigns leaf node pointers into each CPU's rcu_data structure. | 1585 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data |
1586 | * structure. | ||
1518 | */ | 1587 | */ |
1519 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | 1588 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1520 | do { \ | 1589 | do { \ |
1590 | rcu_init_one(rsp); \ | ||
1521 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1591 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1522 | j = 0; \ | 1592 | j = 0; \ |
1523 | for_each_possible_cpu(i) { \ | 1593 | for_each_possible_cpu(i) { \ |
@@ -1525,32 +1595,43 @@ do { \ | |||
1525 | j++; \ | 1595 | j++; \ |
1526 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | 1596 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ |
1527 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1597 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
1598 | rcu_boot_init_percpu_data(i, rsp); \ | ||
1528 | } \ | 1599 | } \ |
1529 | } while (0) | 1600 | } while (0) |
1530 | 1601 | ||
1531 | static struct notifier_block __cpuinitdata rcu_nb = { | 1602 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1532 | .notifier_call = rcu_cpu_notify, | 1603 | |
1533 | }; | 1604 | void __init __rcu_init_preempt(void) |
1605 | { | ||
1606 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
1607 | int j; | ||
1608 | struct rcu_node *rnp; | ||
1609 | |||
1610 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
1611 | } | ||
1612 | |||
1613 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1614 | |||
1615 | void __init __rcu_init_preempt(void) | ||
1616 | { | ||
1617 | } | ||
1618 | |||
1619 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1534 | 1620 | ||
1535 | void __init __rcu_init(void) | 1621 | void __init __rcu_init(void) |
1536 | { | 1622 | { |
1537 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | 1623 | int i; /* All used by RCU_INIT_FLAVOR(). */ |
1538 | int j; | 1624 | int j; |
1539 | struct rcu_node *rnp; | 1625 | struct rcu_node *rnp; |
1540 | 1626 | ||
1541 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 1627 | rcu_bootup_announce(); |
1542 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1628 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1543 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1629 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1544 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1630 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1545 | rcu_init_one(&rcu_state); | 1631 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1546 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | 1632 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1547 | rcu_init_one(&rcu_bh_state); | 1633 | __rcu_init_preempt(); |
1548 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | 1634 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1549 | |||
1550 | for_each_online_cpu(i) | ||
1551 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | ||
1552 | /* Register notifier for non-boot CPUs */ | ||
1553 | register_cpu_notifier(&rcu_nb); | ||
1554 | } | 1635 | } |
1555 | 1636 | ||
1556 | module_param(blimit, int, 0); | 1637 | module_param(blimit, int, 0); |