aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-06-27 16:42:20 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:19 -0400
commit8315f42295d2667a7f942f154b73a86fd7cb2227 (patch)
tree67057935dada3305e0dab95f546359b40cc29b96 /include/linux/sched.h
parent11ed7f934cb807f26da09547b5946c2e534d1dac (diff)
rcu: Add call_rcu_tasks()
This commit adds a new RCU-tasks flavor of RCU, which provides call_rcu_tasks(). This RCU flavor's quiescent states are voluntary context switch (not preemption!) and userspace execution (not the idle loop -- use some sort of schedule_on_each_cpu() if you need to handle the idle tasks. Note that unlike other RCU flavors, these quiescent states occur in tasks, not necessarily CPUs. Includes fixes from Steven Rostedt. This RCU flavor is assumed to have very infrequent latency-tolerant updaters. This assumption permits significant simplifications, including a single global callback list protected by a single global lock, along with a single task-private linked list containing all tasks that have not yet passed through a quiescent state. If experience shows this assumption to be incorrect, the required additional complexity will be added. Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h23
1 files changed, 12 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c2c885ee52b..eaacac4ae77d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1270,6 +1270,11 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1271 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_TASKS_RCU
1274 unsigned long rcu_tasks_nvcsw;
1275 bool rcu_tasks_holdout;
1276 struct list_head rcu_tasks_holdout_list;
1277#endif /* #ifdef CONFIG_TASKS_RCU */
1273 1278
1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1279#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1275 struct sched_info sched_info; 1280 struct sched_info sched_info;
@@ -2000,28 +2005,24 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
2000 unsigned int mask); 2005 unsigned int mask);
2001 2006
2002#ifdef CONFIG_PREEMPT_RCU 2007#ifdef CONFIG_PREEMPT_RCU
2003
2004#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 2008#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2005#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 2009#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2010#endif /* #ifdef CONFIG_PREEMPT_RCU */
2006 2011
2007static inline void rcu_copy_process(struct task_struct *p) 2012static inline void rcu_copy_process(struct task_struct *p)
2008{ 2013{
2014#ifdef CONFIG_PREEMPT_RCU
2009 p->rcu_read_lock_nesting = 0; 2015 p->rcu_read_lock_nesting = 0;
2010 p->rcu_read_unlock_special = 0; 2016 p->rcu_read_unlock_special = 0;
2011#ifdef CONFIG_TREE_PREEMPT_RCU
2012 p->rcu_blocked_node = NULL; 2017 p->rcu_blocked_node = NULL;
2013#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2014 INIT_LIST_HEAD(&p->rcu_node_entry); 2018 INIT_LIST_HEAD(&p->rcu_node_entry);
2019#endif /* #ifdef CONFIG_PREEMPT_RCU */
2020#ifdef CONFIG_TASKS_RCU
2021 p->rcu_tasks_holdout = false;
2022 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2023#endif /* #ifdef CONFIG_TASKS_RCU */
2015} 2024}
2016 2025
2017#else
2018
2019static inline void rcu_copy_process(struct task_struct *p)
2020{
2021}
2022
2023#endif
2024
2025static inline void tsk_restore_flags(struct task_struct *task, 2026static inline void tsk_restore_flags(struct task_struct *task,
2026 unsigned long orig_flags, unsigned long flags) 2027 unsigned long orig_flags, unsigned long flags)
2027{ 2028{