aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /kernel/rcupdate.c
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c576
1 files changed, 35 insertions, 541 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f2c1a04e9b18..760dfc233a00 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2001 18 * Copyright IBM Corporation, 2001
19 * 19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com> 21 * Manfred Spraul <manfred@colorfullife.com>
@@ -35,165 +35,57 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/smp.h> 37#include <linux/smp.h>
38#include <linux/rcupdate.h>
39#include <linux/interrupt.h> 38#include <linux/interrupt.h>
40#include <linux/sched.h> 39#include <linux/sched.h>
41#include <asm/atomic.h> 40#include <asm/atomic.h>
42#include <linux/bitops.h> 41#include <linux/bitops.h>
43#include <linux/module.h>
44#include <linux/completion.h> 42#include <linux/completion.h>
45#include <linux/moduleparam.h>
46#include <linux/percpu.h> 43#include <linux/percpu.h>
47#include <linux/notifier.h> 44#include <linux/notifier.h>
48#include <linux/cpu.h> 45#include <linux/cpu.h>
49#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <linux/module.h>
50 48
51#ifdef CONFIG_DEBUG_LOCK_ALLOC 49struct rcu_synchronize {
52static struct lock_class_key rcu_lock_key; 50 struct rcu_head head;
53struct lockdep_map rcu_lock_map = 51 struct completion completion;
54 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
55
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Definition for rcupdate control block. */
60static struct rcu_ctrlblk rcu_ctrlblk = {
61 .cur = -300,
62 .completed = -300,
63 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
64 .cpumask = CPU_MASK_NONE,
65};
66static struct rcu_ctrlblk rcu_bh_ctrlblk = {
67 .cur = -300,
68 .completed = -300,
69 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
70 .cpumask = CPU_MASK_NONE,
71}; 52};
72 53
73DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 54static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
74DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
75
76/* Fake initialization required by compiler */
77static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
78static int blimit = 10;
79static int qhimark = 10000;
80static int qlowmark = 100;
81
82static atomic_t rcu_barrier_cpu_count; 55static atomic_t rcu_barrier_cpu_count;
83static DEFINE_MUTEX(rcu_barrier_mutex); 56static DEFINE_MUTEX(rcu_barrier_mutex);
84static struct completion rcu_barrier_completion; 57static struct completion rcu_barrier_completion;
85 58
86#ifdef CONFIG_SMP 59/* Because of FASTCALL declaration of complete, we use this wrapper */
87static void force_quiescent_state(struct rcu_data *rdp, 60static void wakeme_after_rcu(struct rcu_head *head)
88 struct rcu_ctrlblk *rcp)
89{
90 int cpu;
91 cpumask_t cpumask;
92 set_need_resched();
93 if (unlikely(!rcp->signaled)) {
94 rcp->signaled = 1;
95 /*
96 * Don't send IPI to itself. With irqs disabled,
97 * rdp->cpu is the current cpu.
98 */
99 cpumask = rcp->cpumask;
100 cpu_clear(rdp->cpu, cpumask);
101 for_each_cpu_mask(cpu, cpumask)
102 smp_send_reschedule(cpu);
103 }
104}
105#else
106static inline void force_quiescent_state(struct rcu_data *rdp,
107 struct rcu_ctrlblk *rcp)
108{ 61{
109 set_need_resched(); 62 struct rcu_synchronize *rcu;
63
64 rcu = container_of(head, struct rcu_synchronize, head);
65 complete(&rcu->completion);
110} 66}
111#endif
112 67
113/** 68/**
114 * call_rcu - Queue an RCU callback for invocation after a grace period. 69 * synchronize_rcu - wait until a grace period has elapsed.
115 * @head: structure to be used for queueing the RCU updates.
116 * @func: actual update function to be invoked after the grace period
117 * 70 *
118 * The update function will be invoked some time after a full grace 71 * Control will return to the caller some time after a full grace
119 * period elapses, in other words after all currently executing RCU 72 * period has elapsed, in other words after all currently executing RCU
120 * read-side critical sections have completed. RCU read-side critical 73 * read-side critical sections have completed. RCU read-side critical
121 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 74 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
122 * and may be nested. 75 * and may be nested.
123 */ 76 */
124void fastcall call_rcu(struct rcu_head *head, 77void synchronize_rcu(void)
125 void (*func)(struct rcu_head *rcu))
126{
127 unsigned long flags;
128 struct rcu_data *rdp;
129
130 head->func = func;
131 head->next = NULL;
132 local_irq_save(flags);
133 rdp = &__get_cpu_var(rcu_data);
134 *rdp->nxttail = head;
135 rdp->nxttail = &head->next;
136 if (unlikely(++rdp->qlen > qhimark)) {
137 rdp->blimit = INT_MAX;
138 force_quiescent_state(rdp, &rcu_ctrlblk);
139 }
140 local_irq_restore(flags);
141}
142
143/**
144 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
145 * @head: structure to be used for queueing the RCU updates.
146 * @func: actual update function to be invoked after the grace period
147 *
148 * The update function will be invoked some time after a full grace
149 * period elapses, in other words after all currently executing RCU
150 * read-side critical sections have completed. call_rcu_bh() assumes
151 * that the read-side critical sections end on completion of a softirq
152 * handler. This means that read-side critical sections in process
153 * context must not be interrupted by softirqs. This interface is to be
154 * used when most of the read-side critical sections are in softirq context.
155 * RCU read-side critical sections are delimited by rcu_read_lock() and
156 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
157 * and rcu_read_unlock_bh(), if in process context. These may be nested.
158 */
159void fastcall call_rcu_bh(struct rcu_head *head,
160 void (*func)(struct rcu_head *rcu))
161{ 78{
162 unsigned long flags; 79 struct rcu_synchronize rcu;
163 struct rcu_data *rdp;
164
165 head->func = func;
166 head->next = NULL;
167 local_irq_save(flags);
168 rdp = &__get_cpu_var(rcu_bh_data);
169 *rdp->nxttail = head;
170 rdp->nxttail = &head->next;
171
172 if (unlikely(++rdp->qlen > qhimark)) {
173 rdp->blimit = INT_MAX;
174 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
175 }
176
177 local_irq_restore(flags);
178}
179 80
180/* 81 init_completion(&rcu.completion);
181 * Return the number of RCU batches processed thus far. Useful 82 /* Will wake me after RCU finished */
182 * for debug and statistics. 83 call_rcu(&rcu.head, wakeme_after_rcu);
183 */
184long rcu_batches_completed(void)
185{
186 return rcu_ctrlblk.completed;
187}
188 84
189/* 85 /* Wait for it */
190 * Return the number of RCU batches processed thus far. Useful 86 wait_for_completion(&rcu.completion);
191 * for debug and statistics.
192 */
193long rcu_batches_completed_bh(void)
194{
195 return rcu_bh_ctrlblk.completed;
196} 87}
88EXPORT_SYMBOL_GPL(synchronize_rcu);
197 89
198static void rcu_barrier_callback(struct rcu_head *notused) 90static void rcu_barrier_callback(struct rcu_head *notused)
199{ 91{
@@ -207,10 +99,8 @@ static void rcu_barrier_callback(struct rcu_head *notused)
207static void rcu_barrier_func(void *notused) 99static void rcu_barrier_func(void *notused)
208{ 100{
209 int cpu = smp_processor_id(); 101 int cpu = smp_processor_id();
210 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 102 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
211 struct rcu_head *head;
212 103
213 head = &rdp->barrier;
214 atomic_inc(&rcu_barrier_cpu_count); 104 atomic_inc(&rcu_barrier_cpu_count);
215 call_rcu(head, rcu_barrier_callback); 105 call_rcu(head, rcu_barrier_callback);
216} 106}
@@ -225,420 +115,24 @@ void rcu_barrier(void)
225 mutex_lock(&rcu_barrier_mutex); 115 mutex_lock(&rcu_barrier_mutex);
226 init_completion(&rcu_barrier_completion); 116 init_completion(&rcu_barrier_completion);
227 atomic_set(&rcu_barrier_cpu_count, 0); 117 atomic_set(&rcu_barrier_cpu_count, 0);
118 /*
119 * The queueing of callbacks in all CPUs must be atomic with
120 * respect to RCU, otherwise one CPU may queue a callback,
121 * wait for a grace period, decrement barrier count and call
122 * complete(), while other CPUs have not yet queued anything.
123 * So, we need to make sure that grace periods cannot complete
124 * until all the callbacks are queued.
125 */
126 rcu_read_lock();
228 on_each_cpu(rcu_barrier_func, NULL, 0, 1); 127 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
128 rcu_read_unlock();
229 wait_for_completion(&rcu_barrier_completion); 129 wait_for_completion(&rcu_barrier_completion);
230 mutex_unlock(&rcu_barrier_mutex); 130 mutex_unlock(&rcu_barrier_mutex);
231} 131}
232EXPORT_SYMBOL_GPL(rcu_barrier); 132EXPORT_SYMBOL_GPL(rcu_barrier);
233 133
234/*
235 * Invoke the completed RCU callbacks. They are expected to be in
236 * a per-cpu list.
237 */
238static void rcu_do_batch(struct rcu_data *rdp)
239{
240 struct rcu_head *next, *list;
241 int count = 0;
242
243 list = rdp->donelist;
244 while (list) {
245 next = list->next;
246 prefetch(next);
247 list->func(list);
248 list = next;
249 if (++count >= rdp->blimit)
250 break;
251 }
252 rdp->donelist = list;
253
254 local_irq_disable();
255 rdp->qlen -= count;
256 local_irq_enable();
257 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
258 rdp->blimit = blimit;
259
260 if (!rdp->donelist)
261 rdp->donetail = &rdp->donelist;
262 else
263 tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
264}
265
266/*
267 * Grace period handling:
268 * The grace period handling consists out of two steps:
269 * - A new grace period is started.
270 * This is done by rcu_start_batch. The start is not broadcasted to
271 * all cpus, they must pick this up by comparing rcp->cur with
272 * rdp->quiescbatch. All cpus are recorded in the
273 * rcu_ctrlblk.cpumask bitmap.
274 * - All cpus must go through a quiescent state.
275 * Since the start of the grace period is not broadcasted, at least two
276 * calls to rcu_check_quiescent_state are required:
277 * The first call just notices that a new grace period is running. The
278 * following calls check if there was a quiescent state since the beginning
279 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
280 * the bitmap is empty, then the grace period is completed.
281 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
282 * period (if necessary).
283 */
284/*
285 * Register a new batch of callbacks, and start it up if there is currently no
286 * active batch and the batch to be registered has not already occurred.
287 * Caller must hold rcu_ctrlblk.lock.
288 */
289static void rcu_start_batch(struct rcu_ctrlblk *rcp)
290{
291 if (rcp->next_pending &&
292 rcp->completed == rcp->cur) {
293 rcp->next_pending = 0;
294 /*
295 * next_pending == 0 must be visible in
296 * __rcu_process_callbacks() before it can see new value of cur.
297 */
298 smp_wmb();
299 rcp->cur++;
300
301 /*
302 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
303 * Barrier Otherwise it can cause tickless idle CPUs to be
304 * included in rcp->cpumask, which will extend graceperiods
305 * unnecessarily.
306 */
307 smp_mb();
308 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
309
310 rcp->signaled = 0;
311 }
312}
313
314/*
315 * cpu went through a quiescent state since the beginning of the grace period.
316 * Clear it from the cpu mask and complete the grace period if it was the last
317 * cpu. Start another grace period if someone has further entries pending
318 */
319static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
320{
321 cpu_clear(cpu, rcp->cpumask);
322 if (cpus_empty(rcp->cpumask)) {
323 /* batch completed ! */
324 rcp->completed = rcp->cur;
325 rcu_start_batch(rcp);
326 }
327}
328
329/*
330 * Check if the cpu has gone through a quiescent state (say context
331 * switch). If so and if it already hasn't done so in this RCU
332 * quiescent cycle, then indicate that it has done so.
333 */
334static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
335 struct rcu_data *rdp)
336{
337 if (rdp->quiescbatch != rcp->cur) {
338 /* start new grace period: */
339 rdp->qs_pending = 1;
340 rdp->passed_quiesc = 0;
341 rdp->quiescbatch = rcp->cur;
342 return;
343 }
344
345 /* Grace period already completed for this cpu?
346 * qs_pending is checked instead of the actual bitmap to avoid
347 * cacheline trashing.
348 */
349 if (!rdp->qs_pending)
350 return;
351
352 /*
353 * Was there a quiescent state since the beginning of the grace
354 * period? If no, then exit and wait for the next call.
355 */
356 if (!rdp->passed_quiesc)
357 return;
358 rdp->qs_pending = 0;
359
360 spin_lock(&rcp->lock);
361 /*
362 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
363 * during cpu startup. Ignore the quiescent state.
364 */
365 if (likely(rdp->quiescbatch == rcp->cur))
366 cpu_quiet(rdp->cpu, rcp);
367
368 spin_unlock(&rcp->lock);
369}
370
371
372#ifdef CONFIG_HOTPLUG_CPU
373
374/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
375 * locking requirements, the list it's pulling from has to belong to a cpu
376 * which is dead and hence not processing interrupts.
377 */
378static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
379 struct rcu_head **tail)
380{
381 local_irq_disable();
382 *this_rdp->nxttail = list;
383 if (list)
384 this_rdp->nxttail = tail;
385 local_irq_enable();
386}
387
388static void __rcu_offline_cpu(struct rcu_data *this_rdp,
389 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
390{
391 /* if the cpu going offline owns the grace period
392 * we can block indefinitely waiting for it, so flush
393 * it here
394 */
395 spin_lock_bh(&rcp->lock);
396 if (rcp->cur != rcp->completed)
397 cpu_quiet(rdp->cpu, rcp);
398 spin_unlock_bh(&rcp->lock);
399 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
400 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
401 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
402}
403
404static void rcu_offline_cpu(int cpu)
405{
406 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
407 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
408
409 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
410 &per_cpu(rcu_data, cpu));
411 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
412 &per_cpu(rcu_bh_data, cpu));
413 put_cpu_var(rcu_data);
414 put_cpu_var(rcu_bh_data);
415 tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
416}
417
418#else
419
420static void rcu_offline_cpu(int cpu)
421{
422}
423
424#endif
425
426/*
427 * This does the RCU processing work from tasklet context.
428 */
429static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
430 struct rcu_data *rdp)
431{
432 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
433 *rdp->donetail = rdp->curlist;
434 rdp->donetail = rdp->curtail;
435 rdp->curlist = NULL;
436 rdp->curtail = &rdp->curlist;
437 }
438
439 if (rdp->nxtlist && !rdp->curlist) {
440 local_irq_disable();
441 rdp->curlist = rdp->nxtlist;
442 rdp->curtail = rdp->nxttail;
443 rdp->nxtlist = NULL;
444 rdp->nxttail = &rdp->nxtlist;
445 local_irq_enable();
446
447 /*
448 * start the next batch of callbacks
449 */
450
451 /* determine batch number */
452 rdp->batch = rcp->cur + 1;
453 /* see the comment and corresponding wmb() in
454 * the rcu_start_batch()
455 */
456 smp_rmb();
457
458 if (!rcp->next_pending) {
459 /* and start it/schedule start if it's a new batch */
460 spin_lock(&rcp->lock);
461 rcp->next_pending = 1;
462 rcu_start_batch(rcp);
463 spin_unlock(&rcp->lock);
464 }
465 }
466
467 rcu_check_quiescent_state(rcp, rdp);
468 if (rdp->donelist)
469 rcu_do_batch(rdp);
470}
471
472static void rcu_process_callbacks(unsigned long unused)
473{
474 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
475 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
476}
477
478static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
479{
480 /* This cpu has pending rcu entries and the grace period
481 * for them has completed.
482 */
483 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
484 return 1;
485
486 /* This cpu has no pending entries, but there are new entries */
487 if (!rdp->curlist && rdp->nxtlist)
488 return 1;
489
490 /* This cpu has finished callbacks to invoke */
491 if (rdp->donelist)
492 return 1;
493
494 /* The rcu core waits for a quiescent state from the cpu */
495 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
496 return 1;
497
498 /* nothing to do */
499 return 0;
500}
501
502/*
503 * Check to see if there is any immediate RCU-related work to be done
504 * by the current CPU, returning 1 if so. This function is part of the
505 * RCU implementation; it is -not- an exported member of the RCU API.
506 */
507int rcu_pending(int cpu)
508{
509 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
510 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
511}
512
513/*
514 * Check to see if any future RCU-related work will need to be done
515 * by the current CPU, even if none need be done immediately, returning
516 * 1 if so. This function is part of the RCU implementation; it is -not-
517 * an exported member of the RCU API.
518 */
519int rcu_needs_cpu(int cpu)
520{
521 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
522 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
523
524 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
525}
526
527void rcu_check_callbacks(int cpu, int user)
528{
529 if (user ||
530 (idle_cpu(cpu) && !in_softirq() &&
531 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
532 rcu_qsctr_inc(cpu);
533 rcu_bh_qsctr_inc(cpu);
534 } else if (!in_softirq())
535 rcu_bh_qsctr_inc(cpu);
536 tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
537}
538
539static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
540 struct rcu_data *rdp)
541{
542 memset(rdp, 0, sizeof(*rdp));
543 rdp->curtail = &rdp->curlist;
544 rdp->nxttail = &rdp->nxtlist;
545 rdp->donetail = &rdp->donelist;
546 rdp->quiescbatch = rcp->completed;
547 rdp->qs_pending = 0;
548 rdp->cpu = cpu;
549 rdp->blimit = blimit;
550}
551
552static void __cpuinit rcu_online_cpu(int cpu)
553{
554 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
555 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
556
557 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
558 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
559 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
560}
561
562static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
563 unsigned long action, void *hcpu)
564{
565 long cpu = (long)hcpu;
566 switch (action) {
567 case CPU_UP_PREPARE:
568 case CPU_UP_PREPARE_FROZEN:
569 rcu_online_cpu(cpu);
570 break;
571 case CPU_DEAD:
572 case CPU_DEAD_FROZEN:
573 rcu_offline_cpu(cpu);
574 break;
575 default:
576 break;
577 }
578 return NOTIFY_OK;
579}
580
581static struct notifier_block __cpuinitdata rcu_nb = {
582 .notifier_call = rcu_cpu_notify,
583};
584
585/*
586 * Initializes rcu mechanism. Assumed to be called early.
587 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
588 * Note that rcu_qsctr and friends are implicitly
589 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
590 */
591void __init rcu_init(void) 134void __init rcu_init(void)
592{ 135{
593 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 136 __rcu_init();
594 (void *)(long)smp_processor_id());
595 /* Register notifier for non-boot CPUs */
596 register_cpu_notifier(&rcu_nb);
597}
598
599struct rcu_synchronize {
600 struct rcu_head head;
601 struct completion completion;
602};
603
604/* Because of FASTCALL declaration of complete, we use this wrapper */
605static void wakeme_after_rcu(struct rcu_head *head)
606{
607 struct rcu_synchronize *rcu;
608
609 rcu = container_of(head, struct rcu_synchronize, head);
610 complete(&rcu->completion);
611} 137}
612 138
613/**
614 * synchronize_rcu - wait until a grace period has elapsed.
615 *
616 * Control will return to the caller some time after a full grace
617 * period has elapsed, in other words after all currently executing RCU
618 * read-side critical sections have completed. RCU read-side critical
619 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
620 * and may be nested.
621 *
622 * If your read-side code is not protected by rcu_read_lock(), do -not-
623 * use synchronize_rcu().
624 */
625void synchronize_rcu(void)
626{
627 struct rcu_synchronize rcu;
628
629 init_completion(&rcu.completion);
630 /* Will wake me after RCU finished */
631 call_rcu(&rcu.head, wakeme_after_rcu);
632
633 /* Wait for it */
634 wait_for_completion(&rcu.completion);
635}
636
637module_param(blimit, int, 0);
638module_param(qhimark, int, 0);
639module_param(qlowmark, int, 0);
640EXPORT_SYMBOL_GPL(rcu_batches_completed);
641EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
642EXPORT_SYMBOL_GPL(call_rcu);
643EXPORT_SYMBOL_GPL(call_rcu_bh);
644EXPORT_SYMBOL_GPL(synchronize_rcu);