diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
| commit | eee2775d9924b22643bd89b2e568cc5eed7e8a04 (patch) | |
| tree | 095ad7851895c5d39596f3ff7ee1e078235a2501 /kernel/rcupdate.c | |
| parent | 53e16fbd30005905168d9b75555fdc7e0a2eac58 (diff) | |
| parent | 7db905e636f08ea5bc9825c1f73d77802e8ccad5 (diff) | |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
rcu: Move end of special early-boot RCU operation earlier
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
rcu: Remove lockdep annotations from RCU's _notrace() API members
rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
rcu: Add CPU-offline processing for single-node configurations
rcu: Add "notrace" to RCU function headers used by ftrace
rcu: Remove CONFIG_PREEMPT_RCU
rcu: Merge preemptable-RCU functionality into hierarchical RCU
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
rcu: Use debugfs_remove_recursive() simplify code.
rcu: Merge per-RCU-flavor initialization into pre-existing macro
rcu: Fix online/offline indication for rcudata.csv trace file
rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
rcu: Renamings to increase RCU clarity
rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
rcu: Fix typo in rcu_irq_exit() comment header
rcu: Make rcupreempt_trace.c look at offline CPUs
...
Diffstat (limited to 'kernel/rcupdate.c')
| -rw-r--r-- | kernel/rcupdate.c | 44 |
1 files changed, 42 insertions, 2 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a967c9feb90a..bd5d5c8e5140 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -98,6 +98,30 @@ void synchronize_rcu(void) | |||
| 98 | } | 98 | } |
| 99 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 99 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 100 | 100 | ||
| 101 | /** | ||
| 102 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
| 103 | * | ||
| 104 | * Control will return to the caller some time after a full rcu_bh grace | ||
| 105 | * period has elapsed, in other words after all currently executing rcu_bh | ||
| 106 | * read-side critical sections have completed. RCU read-side critical | ||
| 107 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
| 108 | * and may be nested. | ||
| 109 | */ | ||
| 110 | void synchronize_rcu_bh(void) | ||
| 111 | { | ||
| 112 | struct rcu_synchronize rcu; | ||
| 113 | |||
| 114 | if (rcu_blocking_is_gp()) | ||
| 115 | return; | ||
| 116 | |||
| 117 | init_completion(&rcu.completion); | ||
| 118 | /* Will wake me after RCU finished. */ | ||
| 119 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
| 120 | /* Wait for it. */ | ||
| 121 | wait_for_completion(&rcu.completion); | ||
| 122 | } | ||
| 123 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
| 124 | |||
| 101 | static void rcu_barrier_callback(struct rcu_head *notused) | 125 | static void rcu_barrier_callback(struct rcu_head *notused) |
| 102 | { | 126 | { |
| 103 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 127 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
| @@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type) | |||
| 129 | static inline void wait_migrated_callbacks(void) | 153 | static inline void wait_migrated_callbacks(void) |
| 130 | { | 154 | { |
| 131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | 155 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); |
| 156 | smp_mb(); /* In case we didn't sleep. */ | ||
| 132 | } | 157 | } |
| 133 | 158 | ||
| 134 | /* | 159 | /* |
| @@ -192,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused) | |||
| 192 | wake_up(&rcu_migrate_wq); | 217 | wake_up(&rcu_migrate_wq); |
| 193 | } | 218 | } |
| 194 | 219 | ||
| 220 | extern int rcu_cpu_notify(struct notifier_block *self, | ||
| 221 | unsigned long action, void *hcpu); | ||
| 222 | |||
| 195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 223 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
| 196 | unsigned long action, void *hcpu) | 224 | unsigned long action, void *hcpu) |
| 197 | { | 225 | { |
| 226 | rcu_cpu_notify(self, action, hcpu); | ||
| 198 | if (action == CPU_DYING) { | 227 | if (action == CPU_DYING) { |
| 199 | /* | 228 | /* |
| 200 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | 229 | * preempt_disable() in on_each_cpu() prevents stop_machine(), |
| @@ -209,7 +238,8 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
| 209 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | 238 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); |
| 210 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | 239 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); |
| 211 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | 240 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); |
| 212 | } else if (action == CPU_POST_DEAD) { | 241 | } else if (action == CPU_DOWN_PREPARE) { |
| 242 | /* Don't need to wait until next removal operation. */ | ||
| 213 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | 243 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ |
| 214 | wait_migrated_callbacks(); | 244 | wait_migrated_callbacks(); |
| 215 | } | 245 | } |
| @@ -219,8 +249,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
| 219 | 249 | ||
| 220 | void __init rcu_init(void) | 250 | void __init rcu_init(void) |
| 221 | { | 251 | { |
| 252 | int i; | ||
| 253 | |||
| 222 | __rcu_init(); | 254 | __rcu_init(); |
| 223 | hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); | 255 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); |
| 256 | |||
| 257 | /* | ||
| 258 | * We don't need protection against CPU-hotplug here because | ||
| 259 | * this is called early in boot, before either interrupts | ||
| 260 | * or the scheduler are operational. | ||
| 261 | */ | ||
| 262 | for_each_online_cpu(i) | ||
| 263 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
| 224 | } | 264 | } |
| 225 | 265 | ||
| 226 | void rcu_scheduler_starting(void) | 266 | void rcu_scheduler_starting(void) |
