aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rcupdate.h23
-rw-r--r--kernel/rcupdate.c16
2 files changed, 34 insertions, 5 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4d747433916b..fd276adf0fd5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -157,9 +157,9 @@ static inline int rcu_pending(int cpu)
157/** 157/**
158 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 158 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
159 * 159 *
160 * When synchronize_kernel() is invoked on one CPU while other CPUs 160 * When synchronize_rcu() is invoked on one CPU while other CPUs
161 * are within RCU read-side critical sections, then the 161 * are within RCU read-side critical sections, then the
162 * synchronize_kernel() is guaranteed to block until after all the other 162 * synchronize_rcu() is guaranteed to block until after all the other
163 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 163 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
164 * on one CPU while other CPUs are within RCU read-side critical 164 * on one CPU while other CPUs are within RCU read-side critical
165 * sections, invocation of the corresponding RCU callback is deferred 165 * sections, invocation of the corresponding RCU callback is deferred
@@ -256,6 +256,21 @@ static inline int rcu_pending(int cpu)
256 (p) = (v); \ 256 (p) = (v); \
257 }) 257 })
258 258
259/**
260 * synchronize_sched - block until all CPUs have exited any non-preemptive
261 * kernel code sequences.
262 *
263 * This means that all preempt_disable code sequences, including NMI and
264 * hardware-interrupt handlers, in progress on entry will have completed
265 * before this primitive returns. However, this does not guarantee that
266 * softirq handlers will have completed, since in some kernels
267 *
268 * This primitive provides the guarantees made by the (deprecated)
269 * synchronize_kernel() API. In contrast, synchronize_rcu() only
270 * guarantees that rcu_read_lock() sections will have completed.
271 */
272#define synchronize_sched() synchronize_rcu()
273
259extern void rcu_init(void); 274extern void rcu_init(void);
260extern void rcu_check_callbacks(int cpu, int user); 275extern void rcu_check_callbacks(int cpu, int user);
261extern void rcu_restart_cpu(int cpu); 276extern void rcu_restart_cpu(int cpu);
@@ -265,7 +280,9 @@ extern void FASTCALL(call_rcu(struct rcu_head *head,
265 void (*func)(struct rcu_head *head))); 280 void (*func)(struct rcu_head *head)));
266extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 281extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
267 void (*func)(struct rcu_head *head))); 282 void (*func)(struct rcu_head *head)));
268extern void synchronize_kernel(void); 283extern __deprecated_for_modules void synchronize_kernel(void);
284extern void synchronize_rcu(void);
285void synchronize_idle(void);
269 286
270#endif /* __KERNEL__ */ 287#endif /* __KERNEL__ */
271#endif /* __LINUX_RCUPDATE_H */ 288#endif /* __LINUX_RCUPDATE_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index ad497722f04f..f436993bd590 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -444,15 +444,18 @@ static void wakeme_after_rcu(struct rcu_head *head)
444} 444}
445 445
446/** 446/**
447 * synchronize_kernel - wait until a grace period has elapsed. 447 * synchronize_rcu - wait until a grace period has elapsed.
448 * 448 *
449 * Control will return to the caller some time after a full grace 449 * Control will return to the caller some time after a full grace
450 * period has elapsed, in other words after all currently executing RCU 450 * period has elapsed, in other words after all currently executing RCU
451 * read-side critical sections have completed. RCU read-side critical 451 * read-side critical sections have completed. RCU read-side critical
452 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 452 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
453 * and may be nested. 453 * and may be nested.
454 *
455 * If your read-side code is not protected by rcu_read_lock(), do -not-
456 * use synchronize_rcu().
454 */ 457 */
455void synchronize_kernel(void) 458void synchronize_rcu(void)
456{ 459{
457 struct rcu_synchronize rcu; 460 struct rcu_synchronize rcu;
458 461
@@ -464,7 +467,16 @@ void synchronize_kernel(void)
464 wait_for_completion(&rcu.completion); 467 wait_for_completion(&rcu.completion);
465} 468}
466 469
470/*
471 * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
472 */
473void synchronize_kernel(void)
474{
475 synchronize_rcu();
476}
477
467module_param(maxbatch, int, 0); 478module_param(maxbatch, int, 0);
468EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ 479EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
469EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ 480EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
481EXPORT_SYMBOL_GPL(synchronize_rcu);
470EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ 482EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */