diff options
Diffstat (limited to 'kernel/rcupdate.c')
| -rw-r--r-- | kernel/rcupdate.c | 44 |
1 files changed, 42 insertions, 2 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a967c9feb90a..bd5d5c8e5140 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -98,6 +98,30 @@ void synchronize_rcu(void) | |||
| 98 | } | 98 | } |
| 99 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 99 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 100 | 100 | ||
| 101 | /** | ||
| 102 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
| 103 | * | ||
| 104 | * Control will return to the caller some time after a full rcu_bh grace | ||
| 105 | * period has elapsed, in other words after all currently executing rcu_bh | ||
| 106 | * read-side critical sections have completed. RCU read-side critical | ||
| 107 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
| 108 | * and may be nested. | ||
| 109 | */ | ||
| 110 | void synchronize_rcu_bh(void) | ||
| 111 | { | ||
| 112 | struct rcu_synchronize rcu; | ||
| 113 | |||
| 114 | if (rcu_blocking_is_gp()) | ||
| 115 | return; | ||
| 116 | |||
| 117 | init_completion(&rcu.completion); | ||
| 118 | /* Will wake me after RCU finished. */ | ||
| 119 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
| 120 | /* Wait for it. */ | ||
| 121 | wait_for_completion(&rcu.completion); | ||
| 122 | } | ||
| 123 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
| 124 | |||
| 101 | static void rcu_barrier_callback(struct rcu_head *notused) | 125 | static void rcu_barrier_callback(struct rcu_head *notused) |
| 102 | { | 126 | { |
| 103 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 127 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
| @@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type) | |||
| 129 | static inline void wait_migrated_callbacks(void) | 153 | static inline void wait_migrated_callbacks(void) |
| 130 | { | 154 | { |
| 131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | 155 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); |
| 156 | smp_mb(); /* In case we didn't sleep. */ | ||
| 132 | } | 157 | } |
| 133 | 158 | ||
| 134 | /* | 159 | /* |
| @@ -192,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused) | |||
| 192 | wake_up(&rcu_migrate_wq); | 217 | wake_up(&rcu_migrate_wq); |
| 193 | } | 218 | } |
| 194 | 219 | ||
| 220 | extern int rcu_cpu_notify(struct notifier_block *self, | ||
| 221 | unsigned long action, void *hcpu); | ||
| 222 | |||
| 195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 223 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
| 196 | unsigned long action, void *hcpu) | 224 | unsigned long action, void *hcpu) |
| 197 | { | 225 | { |
| 226 | rcu_cpu_notify(self, action, hcpu); | ||
| 198 | if (action == CPU_DYING) { | 227 | if (action == CPU_DYING) { |
| 199 | /* | 228 | /* |
| 200 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | 229 | * preempt_disable() in on_each_cpu() prevents stop_machine(), |
| @@ -209,7 +238,8 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
| 209 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | 238 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); |
| 210 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | 239 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); |
| 211 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | 240 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); |
| 212 | } else if (action == CPU_POST_DEAD) { | 241 | } else if (action == CPU_DOWN_PREPARE) { |
| 242 | /* Don't need to wait until next removal operation. */ | ||
| 213 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | 243 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ |
| 214 | wait_migrated_callbacks(); | 244 | wait_migrated_callbacks(); |
| 215 | } | 245 | } |
| @@ -219,8 +249,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
| 219 | 249 | ||
| 220 | void __init rcu_init(void) | 250 | void __init rcu_init(void) |
| 221 | { | 251 | { |
| 252 | int i; | ||
| 253 | |||
| 222 | __rcu_init(); | 254 | __rcu_init(); |
| 223 | hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); | 255 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); |
| 256 | |||
| 257 | /* | ||
| 258 | * We don't need protection against CPU-hotplug here because | ||
| 259 | * this is called early in boot, before either interrupts | ||
| 260 | * or the scheduler are operational. | ||
| 261 | */ | ||
| 262 | for_each_online_cpu(i) | ||
| 263 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
| 224 | } | 264 | } |
| 225 | 265 | ||
| 226 | void rcu_scheduler_starting(void) | 266 | void rcu_scheduler_starting(void) |
