aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-06-27 16:42:20 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:19 -0400
commit8315f42295d2667a7f942f154b73a86fd7cb2227 (patch)
tree67057935dada3305e0dab95f546359b40cc29b96 /include
parent11ed7f934cb807f26da09547b5946c2e534d1dac (diff)
rcu: Add call_rcu_tasks()
This commit adds a new RCU-tasks flavor of RCU, which provides call_rcu_tasks(). This RCU flavor's quiescent states are voluntary context switch (not preemption!) and userspace execution (not the idle loop -- use some sort of schedule_on_each_cpu() if you need to handle the idle tasks. Note that unlike other RCU flavors, these quiescent states occur in tasks, not necessarily CPUs. Includes fixes from Steven Rostedt. This RCU flavor is assumed to have very infrequent latency-tolerant updaters. This assumption permits significant simplifications, including a single global callback list protected by a single global lock, along with a single task-private linked list containing all tasks that have not yet passed through a quiescent state. If experience shows this assumption to be incorrect, the required additional complexity will be added. Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/rcupdate.h36
-rw-r--r--include/linux/sched.h23
3 files changed, 57 insertions, 11 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..dffd9258ee60 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -117,6 +117,14 @@ extern struct group_info init_groups;
117#else 117#else
118#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
119#endif 119#endif
120#ifdef CONFIG_TASKS_RCU
121#define INIT_TASK_RCU_TASKS(tsk) \
122 .rcu_tasks_holdout = false, \
123 .rcu_tasks_holdout_list = \
124 LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list),
125#else
126#define INIT_TASK_RCU_TASKS(tsk)
127#endif
120 128
121extern struct cred init_cred; 129extern struct cred init_cred;
122 130
@@ -224,6 +232,7 @@ extern struct task_group root_task_group;
224 INIT_FTRACE_GRAPH \ 232 INIT_FTRACE_GRAPH \
225 INIT_TRACE_RECURSION \ 233 INIT_TRACE_RECURSION \
226 INIT_TASK_RCU_PREEMPT(tsk) \ 234 INIT_TASK_RCU_PREEMPT(tsk) \
235 INIT_TASK_RCU_TASKS(tsk) \
227 INIT_CPUSET_SEQ(tsk) \ 236 INIT_CPUSET_SEQ(tsk) \
228 INIT_RT_MUTEXES(tsk) \ 237 INIT_RT_MUTEXES(tsk) \
229 INIT_VTIME(tsk) \ 238 INIT_VTIME(tsk) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..3432063f4c87 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,6 +197,26 @@ void call_rcu_sched(struct rcu_head *head,
197 197
198void synchronize_sched(void); 198void synchronize_sched(void);
199 199
200/**
201 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
202 * @head: structure to be used for queueing the RCU updates.
203 * @func: actual callback function to be invoked after the grace period
204 *
205 * The callback function will be invoked some time after a full grace
206 * period elapses, in other words after all currently executing RCU
207 * read-side critical sections have completed. call_rcu_tasks() assumes
208 * that the read-side critical sections end at a voluntary context
209 * switch (not a preemption!), entry into idle, or transition to usermode
210 * execution. As such, there are no read-side primitives analogous to
211 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
212 * to determine that all tasks have passed through a safe state, not so
213 * much for data-strcuture synchronization.
214 *
215 * See the description of call_rcu() for more detailed information on
216 * memory ordering guarantees.
217 */
218void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
219
200#ifdef CONFIG_PREEMPT_RCU 220#ifdef CONFIG_PREEMPT_RCU
201 221
202void __rcu_read_lock(void); 222void __rcu_read_lock(void);
@@ -294,6 +314,22 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
294 rcu_irq_exit(); \ 314 rcu_irq_exit(); \
295 } while (0) 315 } while (0)
296 316
317/*
318 * Note a voluntary context switch for RCU-tasks benefit. This is a
319 * macro rather than an inline function to avoid #include hell.
320 */
321#ifdef CONFIG_TASKS_RCU
322#define rcu_note_voluntary_context_switch(t) \
323 do { \
324 preempt_disable(); /* Exclude synchronize_sched(); */ \
325 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
326 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
327 preempt_enable(); \
328 } while (0)
329#else /* #ifdef CONFIG_TASKS_RCU */
330#define rcu_note_voluntary_context_switch(t) do { } while (0)
331#endif /* #else #ifdef CONFIG_TASKS_RCU */
332
297#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 333#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
298bool __rcu_is_watching(void); 334bool __rcu_is_watching(void);
299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 335#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c2c885ee52b..eaacac4ae77d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1270,6 +1270,11 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1271 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_TASKS_RCU
1274 unsigned long rcu_tasks_nvcsw;
1275 bool rcu_tasks_holdout;
1276 struct list_head rcu_tasks_holdout_list;
1277#endif /* #ifdef CONFIG_TASKS_RCU */
1273 1278
1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1279#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1275 struct sched_info sched_info; 1280 struct sched_info sched_info;
@@ -2000,28 +2005,24 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
2000 unsigned int mask); 2005 unsigned int mask);
2001 2006
2002#ifdef CONFIG_PREEMPT_RCU 2007#ifdef CONFIG_PREEMPT_RCU
2003
2004#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 2008#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2005#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 2009#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2010#endif /* #ifdef CONFIG_PREEMPT_RCU */
2006 2011
2007static inline void rcu_copy_process(struct task_struct *p) 2012static inline void rcu_copy_process(struct task_struct *p)
2008{ 2013{
2014#ifdef CONFIG_PREEMPT_RCU
2009 p->rcu_read_lock_nesting = 0; 2015 p->rcu_read_lock_nesting = 0;
2010 p->rcu_read_unlock_special = 0; 2016 p->rcu_read_unlock_special = 0;
2011#ifdef CONFIG_TREE_PREEMPT_RCU
2012 p->rcu_blocked_node = NULL; 2017 p->rcu_blocked_node = NULL;
2013#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2014 INIT_LIST_HEAD(&p->rcu_node_entry); 2018 INIT_LIST_HEAD(&p->rcu_node_entry);
2019#endif /* #ifdef CONFIG_PREEMPT_RCU */
2020#ifdef CONFIG_TASKS_RCU
2021 p->rcu_tasks_holdout = false;
2022 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2023#endif /* #ifdef CONFIG_TASKS_RCU */
2015} 2024}
2016 2025
2017#else
2018
2019static inline void rcu_copy_process(struct task_struct *p)
2020{
2021}
2022
2023#endif
2024
2025static inline void tsk_restore_flags(struct task_struct *task, 2026static inline void tsk_restore_flags(struct task_struct *task,
2026 unsigned long orig_flags, unsigned long flags) 2027 unsigned long orig_flags, unsigned long flags)
2027{ 2028{