aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-10-07 00:48:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-07 02:11:20 -0400
commitd0ec774cb2599c858be9d923bb873cf6697520d8 (patch)
tree0897fd843622033a6db6ab43167e47c3236aa22d /kernel/rcupdate.c
parent322a2c100a8998158445599ea437fb556aa95b11 (diff)
rcu: Move rcu_barrier() to rcutree
Move the existing rcu_barrier() implementation to rcutree.c, consistent with the fact that the rcu_barrier() implementation is tied quite tightly to the RCU implementation. This opens the way to simplify and fix rcutree.c's rcu_barrier() implementation in a later patch. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12548908982563-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c120
1 files changed, 1 insertions, 119 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index e43242274466..400183346ad2 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -53,16 +53,8 @@ struct lockdep_map rcu_lock_map =
53EXPORT_SYMBOL_GPL(rcu_lock_map); 53EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif 54#endif
55 55
56static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
57static atomic_t rcu_barrier_cpu_count;
58static DEFINE_MUTEX(rcu_barrier_mutex);
59static struct completion rcu_barrier_completion;
60int rcu_scheduler_active __read_mostly; 56int rcu_scheduler_active __read_mostly;
61 57
62static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
63static struct rcu_head rcu_migrate_head[3];
64static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
65
66/* 58/*
67 * Awaken the corresponding synchronize_rcu() instance now that a 59 * Awaken the corresponding synchronize_rcu() instance now that a
68 * grace period has elapsed. 60 * grace period has elapsed.
@@ -165,120 +157,10 @@ void synchronize_rcu_bh(void)
165} 157}
166EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 158EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
167 159
168static void rcu_barrier_callback(struct rcu_head *notused)
169{
170 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
171 complete(&rcu_barrier_completion);
172}
173
174/*
175 * Called with preemption disabled, and from cross-cpu IRQ context.
176 */
177static void rcu_barrier_func(void *type)
178{
179 int cpu = smp_processor_id();
180 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
181 void (*call_rcu_func)(struct rcu_head *head,
182 void (*func)(struct rcu_head *head));
183
184 atomic_inc(&rcu_barrier_cpu_count);
185 call_rcu_func = type;
186 call_rcu_func(head, rcu_barrier_callback);
187}
188
189static inline void wait_migrated_callbacks(void)
190{
191 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
192 smp_mb(); /* In case we didn't sleep. */
193}
194
195/*
196 * Orchestrate the specified type of RCU barrier, waiting for all
197 * RCU callbacks of the specified type to complete.
198 */
199static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head,
200 void (*func)(struct rcu_head *head)))
201{
202 BUG_ON(in_interrupt());
203 /* Take cpucontrol mutex to protect against CPU hotplug */
204 mutex_lock(&rcu_barrier_mutex);
205 init_completion(&rcu_barrier_completion);
206 /*
207 * Initialize rcu_barrier_cpu_count to 1, then invoke
208 * rcu_barrier_func() on each CPU, so that each CPU also has
209 * incremented rcu_barrier_cpu_count. Only then is it safe to
210 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
211 * might complete its grace period before all of the other CPUs
212 * did their increment, causing this function to return too
213 * early.
214 */
215 atomic_set(&rcu_barrier_cpu_count, 1);
216 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
217 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
218 complete(&rcu_barrier_completion);
219 wait_for_completion(&rcu_barrier_completion);
220 mutex_unlock(&rcu_barrier_mutex);
221 wait_migrated_callbacks();
222}
223
224/**
225 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
226 */
227void rcu_barrier(void)
228{
229 _rcu_barrier(call_rcu);
230}
231EXPORT_SYMBOL_GPL(rcu_barrier);
232
233/**
234 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
235 */
236void rcu_barrier_bh(void)
237{
238 _rcu_barrier(call_rcu_bh);
239}
240EXPORT_SYMBOL_GPL(rcu_barrier_bh);
241
242/**
243 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
244 */
245void rcu_barrier_sched(void)
246{
247 _rcu_barrier(call_rcu_sched);
248}
249EXPORT_SYMBOL_GPL(rcu_barrier_sched);
250
251static void rcu_migrate_callback(struct rcu_head *notused)
252{
253 if (atomic_dec_and_test(&rcu_migrate_type_count))
254 wake_up(&rcu_migrate_wq);
255}
256
257static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 160static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
258 unsigned long action, void *hcpu) 161 unsigned long action, void *hcpu)
259{ 162{
260 rcu_cpu_notify(self, action, hcpu); 163 return rcu_cpu_notify(self, action, hcpu);
261 if (action == CPU_DYING) {
262 /*
263 * preempt_disable() in on_each_cpu() prevents stop_machine(),
264 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
265 * returns, all online cpus have queued rcu_barrier_func(),
266 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
267 *
268 * These callbacks ensure _rcu_barrier() waits for all
269 * RCU callbacks of the specified type to complete.
270 */
271 atomic_set(&rcu_migrate_type_count, 3);
272 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
273 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
274 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
275 } else if (action == CPU_DOWN_PREPARE) {
276 /* Don't need to wait until next removal operation. */
277 /* rcu_migrate_head is protected by cpu_add_remove_lock */
278 wait_migrated_callbacks();
279 }
280
281 return NOTIFY_OK;
282} 164}
283 165
284void __init rcu_init(void) 166void __init rcu_init(void)