aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h23
1 files changed, 20 insertions, 3 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4d747433916b..fd276adf0fd5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -157,9 +157,9 @@ static inline int rcu_pending(int cpu)
157/** 157/**
158 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 158 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
159 * 159 *
160 * When synchronize_kernel() is invoked on one CPU while other CPUs 160 * When synchronize_rcu() is invoked on one CPU while other CPUs
161 * are within RCU read-side critical sections, then the 161 * are within RCU read-side critical sections, then the
162 * synchronize_kernel() is guaranteed to block until after all the other 162 * synchronize_rcu() is guaranteed to block until after all the other
163 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 163 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
164 * on one CPU while other CPUs are within RCU read-side critical 164 * on one CPU while other CPUs are within RCU read-side critical
165 * sections, invocation of the corresponding RCU callback is deferred 165 * sections, invocation of the corresponding RCU callback is deferred
@@ -256,6 +256,21 @@ static inline int rcu_pending(int cpu)
256 (p) = (v); \ 256 (p) = (v); \
257 }) 257 })
258 258
259/**
260 * synchronize_sched - block until all CPUs have exited any non-preemptive
261 * kernel code sequences.
262 *
263 * This means that all preempt_disable code sequences, including NMI and
264 * hardware-interrupt handlers, in progress on entry will have completed
265 * before this primitive returns. However, this does not guarantee that
266 * softirq handlers will have completed, since in some kernels
267 *
268 * This primitive provides the guarantees made by the (deprecated)
269 * synchronize_kernel() API. In contrast, synchronize_rcu() only
270 * guarantees that rcu_read_lock() sections will have completed.
271 */
272#define synchronize_sched() synchronize_rcu()
273
259extern void rcu_init(void); 274extern void rcu_init(void);
260extern void rcu_check_callbacks(int cpu, int user); 275extern void rcu_check_callbacks(int cpu, int user);
261extern void rcu_restart_cpu(int cpu); 276extern void rcu_restart_cpu(int cpu);
@@ -265,7 +280,9 @@ extern void FASTCALL(call_rcu(struct rcu_head *head,
265 void (*func)(struct rcu_head *head))); 280 void (*func)(struct rcu_head *head)));
266extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 281extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
267 void (*func)(struct rcu_head *head))); 282 void (*func)(struct rcu_head *head)));
268extern void synchronize_kernel(void); 283extern __deprecated_for_modules void synchronize_kernel(void);
284extern void synchronize_rcu(void);
285void synchronize_idle(void);
269 286
270#endif /* __KERNEL__ */ 287#endif /* __KERNEL__ */
271#endif /* __LINUX_RCUPDATE_H */ 288#endif /* __LINUX_RCUPDATE_H */