aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c54
1 files changed, 54 insertions, 0 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d92a76a881aa..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,6 +44,7 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
47 48
48enum rcu_barrier { 49enum rcu_barrier {
49 RCU_BARRIER_STD, 50 RCU_BARRIER_STD,
@@ -55,6 +56,11 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
55static atomic_t rcu_barrier_cpu_count; 56static atomic_t rcu_barrier_cpu_count;
56static DEFINE_MUTEX(rcu_barrier_mutex); 57static DEFINE_MUTEX(rcu_barrier_mutex);
57static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly;
60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
58 64
59/* 65/*
60 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +86,10 @@ void wakeme_after_rcu(struct rcu_head *head)
80void synchronize_rcu(void) 86void synchronize_rcu(void)
81{ 87{
82 struct rcu_synchronize rcu; 88 struct rcu_synchronize rcu;
89
90 if (rcu_blocking_is_gp())
91 return;
92
83 init_completion(&rcu.completion); 93 init_completion(&rcu.completion);
84 /* Will wake me after RCU finished. */ 94 /* Will wake me after RCU finished. */
85 call_rcu(&rcu.head, wakeme_after_rcu); 95 call_rcu(&rcu.head, wakeme_after_rcu);
@@ -116,6 +126,11 @@ static void rcu_barrier_func(void *type)
116 } 126 }
117} 127}
118 128
129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
133
119/* 134/*
120 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
121 * RCU callbacks of the specified type to complete. 136 * RCU callbacks of the specified type to complete.
@@ -141,6 +156,7 @@ static void _rcu_barrier(enum rcu_barrier type)
141 complete(&rcu_barrier_completion); 156 complete(&rcu_barrier_completion);
142 wait_for_completion(&rcu_barrier_completion); 157 wait_for_completion(&rcu_barrier_completion);
143 mutex_unlock(&rcu_barrier_mutex); 158 mutex_unlock(&rcu_barrier_mutex);
159 wait_migrated_callbacks();
144} 160}
145 161
146/** 162/**
@@ -170,8 +186,46 @@ void rcu_barrier_sched(void)
170} 186}
171EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
172 188
189static void rcu_migrate_callback(struct rcu_head *notused)
190{
191 if (atomic_dec_and_test(&rcu_migrate_type_count))
192 wake_up(&rcu_migrate_wq);
193}
194
195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
196 unsigned long action, void *hcpu)
197{
198 if (action == CPU_DYING) {
199 /*
200 * preempt_disable() in on_each_cpu() prevents stop_machine(),
201 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
202 * returns, all online cpus have queued rcu_barrier_func(),
203 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
204 *
205 * These callbacks ensure _rcu_barrier() waits for all
206 * RCU callbacks of the specified type to complete.
207 */
208 atomic_set(&rcu_migrate_type_count, 3);
209 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
210 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
211 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
212 } else if (action == CPU_POST_DEAD) {
213 /* rcu_migrate_head is protected by cpu_add_remove_lock */
214 wait_migrated_callbacks();
215 }
216
217 return NOTIFY_OK;
218}
219
173void __init rcu_init(void) 220void __init rcu_init(void)
174{ 221{
175 __rcu_init(); 222 __rcu_init();
223 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
176} 224}
177 225
226void rcu_scheduler_starting(void)
227{
228 WARN_ON(num_online_cpus() != 1);
229 WARN_ON(nr_context_switches() > 0);
230 rcu_scheduler_active = 1;
231}