aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cae8a059cf47..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 59int rcu_scheduler_active __read_mostly;
60 60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
61/* 65/*
62 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
63 * grace period has elapsed. 67 * grace period has elapsed.
@@ -122,6 +126,11 @@ static void rcu_barrier_func(void *type)
122 } 126 }
123} 127}
124 128
129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
133
125/* 134/*
126 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
127 * RCU callbacks of the specified type to complete. 136 * RCU callbacks of the specified type to complete.
@@ -147,6 +156,7 @@ static void _rcu_barrier(enum rcu_barrier type)
147 complete(&rcu_barrier_completion); 156 complete(&rcu_barrier_completion);
148 wait_for_completion(&rcu_barrier_completion); 157 wait_for_completion(&rcu_barrier_completion);
149 mutex_unlock(&rcu_barrier_mutex); 158 mutex_unlock(&rcu_barrier_mutex);
159 wait_migrated_callbacks();
150} 160}
151 161
152/** 162/**
@@ -176,9 +186,41 @@ void rcu_barrier_sched(void)
176} 186}
177EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
178 188
189static void rcu_migrate_callback(struct rcu_head *notused)
190{
191 if (atomic_dec_and_test(&rcu_migrate_type_count))
192 wake_up(&rcu_migrate_wq);
193}
194
195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
196 unsigned long action, void *hcpu)
197{
198 if (action == CPU_DYING) {
199 /*
200 * preempt_disable() in on_each_cpu() prevents stop_machine(),
201 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
202 * returns, all online cpus have queued rcu_barrier_func(),
203 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
204 *
205 * These callbacks ensure _rcu_barrier() waits for all
206 * RCU callbacks of the specified type to complete.
207 */
208 atomic_set(&rcu_migrate_type_count, 3);
209 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
210 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
211 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
212 } else if (action == CPU_POST_DEAD) {
213 /* rcu_migrate_head is protected by cpu_add_remove_lock */
214 wait_migrated_callbacks();
215 }
216
217 return NOTIFY_OK;
218}
219
179void __init rcu_init(void) 220void __init rcu_init(void)
180{ 221{
181 __rcu_init(); 222 __rcu_init();
223 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
182} 224}
183 225
184void rcu_scheduler_starting(void) 226void rcu_scheduler_starting(void)