aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-16 13:10:44 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-16 13:10:44 -0400
commit96b4672703ed4538c7fc25de36df4415a0ee237c (patch)
treee5bb8f4c3eb41c5741a7b232cff8e502f6509fc3 /include/linux/rcupdate.h
parente98d06dd6cd791b5138b0fc6c14a9c0b4d1f2e72 (diff)
parenta53dd6a65668850493cce94395c1b88a015eb338 (diff)
Merge branch 'rcu-tasks.2014.09.10a' into HEAD
rcu-tasks.2014.09.10a: Add RCU-tasks flavor of RCU.
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h57
1 files changed, 55 insertions, 2 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 334ff89aada0..5cafd60c1ee4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -55,6 +55,7 @@ enum rcutorture_type {
55 RCU_FLAVOR, 55 RCU_FLAVOR,
56 RCU_BH_FLAVOR, 56 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR, 57 RCU_SCHED_FLAVOR,
58 RCU_TASKS_FLAVOR,
58 SRCU_FLAVOR, 59 SRCU_FLAVOR,
59 INVALID_RCU_FLAVOR 60 INVALID_RCU_FLAVOR
60}; 61};
@@ -197,6 +198,28 @@ void call_rcu_sched(struct rcu_head *head,
197 198
198void synchronize_sched(void); 199void synchronize_sched(void);
199 200
201/**
202 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
203 * @head: structure to be used for queueing the RCU updates.
204 * @func: actual callback function to be invoked after the grace period
205 *
206 * The callback function will be invoked some time after a full grace
207 * period elapses, in other words after all currently executing RCU
208 * read-side critical sections have completed. call_rcu_tasks() assumes
209 * that the read-side critical sections end at a voluntary context
210 * switch (not a preemption!), entry into idle, or transition to usermode
211 * execution. As such, there are no read-side primitives analogous to
212 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
213 * to determine that all tasks have passed through a safe state, not so
214 * much for data-strcuture synchronization.
215 *
216 * See the description of call_rcu() for more detailed information on
217 * memory ordering guarantees.
218 */
219void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
220void synchronize_rcu_tasks(void);
221void rcu_barrier_tasks(void);
222
200#ifdef CONFIG_PREEMPT_RCU 223#ifdef CONFIG_PREEMPT_RCU
201 224
202void __rcu_read_lock(void); 225void __rcu_read_lock(void);
@@ -238,8 +261,8 @@ static inline int rcu_preempt_depth(void)
238 261
239/* Internal to kernel */ 262/* Internal to kernel */
240void rcu_init(void); 263void rcu_init(void);
241void rcu_sched_qs(int cpu); 264void rcu_sched_qs(void);
242void rcu_bh_qs(int cpu); 265void rcu_bh_qs(void);
243void rcu_check_callbacks(int cpu, int user); 266void rcu_check_callbacks(int cpu, int user);
244struct notifier_block; 267struct notifier_block;
245void rcu_idle_enter(void); 268void rcu_idle_enter(void);
@@ -302,6 +325,36 @@ static inline void rcu_init_nohz(void)
302 rcu_irq_exit(); \ 325 rcu_irq_exit(); \
303 } while (0) 326 } while (0)
304 327
328/*
329 * Note a voluntary context switch for RCU-tasks benefit. This is a
330 * macro rather than an inline function to avoid #include hell.
331 */
332#ifdef CONFIG_TASKS_RCU
333#define TASKS_RCU(x) x
334extern struct srcu_struct tasks_rcu_exit_srcu;
335#define rcu_note_voluntary_context_switch(t) \
336 do { \
337 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
338 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
339 } while (0)
340#else /* #ifdef CONFIG_TASKS_RCU */
341#define TASKS_RCU(x) do { } while (0)
342#define rcu_note_voluntary_context_switch(t) do { } while (0)
343#endif /* #else #ifdef CONFIG_TASKS_RCU */
344
345/**
346 * cond_resched_rcu_qs - Report potential quiescent states to RCU
347 *
348 * This macro resembles cond_resched(), except that it is defined to
349 * report potential quiescent states to RCU-tasks even if the cond_resched()
350 * machinery were to be shut off, as some advocate for PREEMPT kernels.
351 */
352#define cond_resched_rcu_qs() \
353do { \
354 rcu_note_voluntary_context_switch(current); \
355 cond_resched(); \
356} while (0)
357
305#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 358#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
306bool __rcu_is_watching(void); 359bool __rcu_is_watching(void);
307#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 360#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */