aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h106
1 files changed, 65 insertions, 41 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..a4a819ffb2d1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,14 +47,12 @@
47#include <asm/barrier.h> 47#include <asm/barrier.h>
48 48
49extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
50#ifdef CONFIG_RCU_TORTURE_TEST
51extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53 50
54enum rcutorture_type { 51enum rcutorture_type {
55 RCU_FLAVOR, 52 RCU_FLAVOR,
56 RCU_BH_FLAVOR, 53 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR, 54 RCU_SCHED_FLAVOR,
55 RCU_TASKS_FLAVOR,
58 SRCU_FLAVOR, 56 SRCU_FLAVOR,
59 INVALID_RCU_FLAVOR 57 INVALID_RCU_FLAVOR
60}; 58};
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
197 195
198void synchronize_sched(void); 196void synchronize_sched(void);
199 197
198/**
199 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
200 * @head: structure to be used for queueing the RCU updates.
201 * @func: actual callback function to be invoked after the grace period
202 *
203 * The callback function will be invoked some time after a full grace
204 * period elapses, in other words after all currently executing RCU
205 * read-side critical sections have completed. call_rcu_tasks() assumes
206 * that the read-side critical sections end at a voluntary context
207 * switch (not a preemption!), entry into idle, or transition to usermode
208 * execution. As such, there are no read-side primitives analogous to
209 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
210 * to determine that all tasks have passed through a safe state, not so
211 * much for data-strcuture synchronization.
212 *
213 * See the description of call_rcu() for more detailed information on
214 * memory ordering guarantees.
215 */
216void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
217void synchronize_rcu_tasks(void);
218void rcu_barrier_tasks(void);
219
200#ifdef CONFIG_PREEMPT_RCU 220#ifdef CONFIG_PREEMPT_RCU
201 221
202void __rcu_read_lock(void); 222void __rcu_read_lock(void);
@@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void)
238 258
239/* Internal to kernel */ 259/* Internal to kernel */
240void rcu_init(void); 260void rcu_init(void);
241void rcu_sched_qs(int cpu); 261void rcu_sched_qs(void);
242void rcu_bh_qs(int cpu); 262void rcu_bh_qs(void);
243void rcu_check_callbacks(int cpu, int user); 263void rcu_check_callbacks(int cpu, int user);
244struct notifier_block; 264struct notifier_block;
245void rcu_idle_enter(void); 265void rcu_idle_enter(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
269 struct task_struct *next) { } 289 struct task_struct *next) { }
270#endif /* CONFIG_RCU_USER_QS */ 290#endif /* CONFIG_RCU_USER_QS */
271 291
292#ifdef CONFIG_RCU_NOCB_CPU
293void rcu_init_nohz(void);
294#else /* #ifdef CONFIG_RCU_NOCB_CPU */
295static inline void rcu_init_nohz(void)
296{
297}
298#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
299
272/** 300/**
273 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 301 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
274 * @a: Code that RCU needs to pay attention to. 302 * @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
294 rcu_irq_exit(); \ 322 rcu_irq_exit(); \
295 } while (0) 323 } while (0)
296 324
325/*
326 * Note a voluntary context switch for RCU-tasks benefit. This is a
327 * macro rather than an inline function to avoid #include hell.
328 */
329#ifdef CONFIG_TASKS_RCU
330#define TASKS_RCU(x) x
331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \
333 do { \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0)
340#endif /* #else #ifdef CONFIG_TASKS_RCU */
341
342/**
343 * cond_resched_rcu_qs - Report potential quiescent states to RCU
344 *
345 * This macro resembles cond_resched(), except that it is defined to
346 * report potential quiescent states to RCU-tasks even if the cond_resched()
347 * machinery were to be shut off, as some advocate for PREEMPT kernels.
348 */
349#define cond_resched_rcu_qs() \
350do { \
351 rcu_note_voluntary_context_switch(current); \
352 cond_resched(); \
353} while (0)
354
297#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
298bool __rcu_is_watching(void); 356bool __rcu_is_watching(void);
299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 357#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
349#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 407#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
350static inline bool rcu_lockdep_current_cpu_online(void) 408static inline bool rcu_lockdep_current_cpu_online(void)
351{ 409{
352 return 1; 410 return true;
353} 411}
354#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 412#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
355 413
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
371extern struct lockdep_map rcu_callback_map; 429extern struct lockdep_map rcu_callback_map;
372int debug_lockdep_rcu_enabled(void); 430int debug_lockdep_rcu_enabled(void);
373 431
374/** 432int rcu_read_lock_held(void);
375 * rcu_read_lock_held() - might we be in RCU read-side critical section?
376 *
377 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
378 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
379 * this assumes we are in an RCU read-side critical section unless it can
380 * prove otherwise. This is useful for debug checks in functions that
381 * require that they be called within an RCU read-side critical section.
382 *
383 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
384 * and while lockdep is disabled.
385 *
386 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
387 * occur in the same context, for example, it is illegal to invoke
388 * rcu_read_unlock() in process context if the matching rcu_read_lock()
389 * was invoked from within an irq handler.
390 *
391 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
392 * offline from an RCU perspective, so check for those as well.
393 */
394static inline int rcu_read_lock_held(void)
395{
396 if (!debug_lockdep_rcu_enabled())
397 return 1;
398 if (!rcu_is_watching())
399 return 0;
400 if (!rcu_lockdep_current_cpu_online())
401 return 0;
402 return lock_is_held(&rcu_lock_map);
403}
404
405/*
406 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
407 * hell.
408 */
409int rcu_read_lock_bh_held(void); 433int rcu_read_lock_bh_held(void);
410 434
411/** 435/**