aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-08-14 19:01:53 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:34 -0400
commit1d082fd061884a587c490c4fc8a2056ce1e47624 (patch)
treea8f715a674a036b1b7500a16ea11381d40659e42 /include/linux/sched.h
parent4ff475ed4cf61a7f56bbfbc424147189d0022b38 (diff)
rcu: Remove local_irq_disable() in rcu_preempt_note_context_switch()
The rcu_preempt_note_context_switch() function is on a scheduling fast path, so it would be good to avoid disabling irqs. The reason that irqs are disabled is to synchronize process-level and irq-handler access to the task_struct ->rcu_read_unlock_special bitmask. This commit therefore makes ->rcu_read_unlock_special instead be a union of bools with a short allowing single-access checks in RCU's __rcu_read_unlock(). This results in the process-level and irq-handler accesses being simple loads and stores, so that irqs need no longer be disabled. This commit therefore removes the irq disabling from rcu_preempt_note_context_switch(). Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h16
1 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ec8b34722bcc..42888d715fb1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1212,6 +1212,13 @@ struct sched_dl_entity {
1212 struct hrtimer dl_timer; 1212 struct hrtimer dl_timer;
1213}; 1213};
1214 1214
1215union rcu_special {
1216 struct {
1217 bool blocked;
1218 bool need_qs;
1219 } b;
1220 short s;
1221};
1215struct rcu_node; 1222struct rcu_node;
1216 1223
1217enum perf_event_task_context { 1224enum perf_event_task_context {
@@ -1264,7 +1271,7 @@ struct task_struct {
1264 1271
1265#ifdef CONFIG_PREEMPT_RCU 1272#ifdef CONFIG_PREEMPT_RCU
1266 int rcu_read_lock_nesting; 1273 int rcu_read_lock_nesting;
1267 char rcu_read_unlock_special; 1274 union rcu_special rcu_read_unlock_special;
1268 struct list_head rcu_node_entry; 1275 struct list_head rcu_node_entry;
1269#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1276#endif /* #ifdef CONFIG_PREEMPT_RCU */
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1277#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -2005,16 +2012,11 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
2005extern void task_clear_jobctl_pending(struct task_struct *task, 2012extern void task_clear_jobctl_pending(struct task_struct *task,
2006 unsigned int mask); 2013 unsigned int mask);
2007 2014
2008#ifdef CONFIG_PREEMPT_RCU
2009#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2010#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2011#endif /* #ifdef CONFIG_PREEMPT_RCU */
2012
2013static inline void rcu_copy_process(struct task_struct *p) 2015static inline void rcu_copy_process(struct task_struct *p)
2014{ 2016{
2015#ifdef CONFIG_PREEMPT_RCU 2017#ifdef CONFIG_PREEMPT_RCU
2016 p->rcu_read_lock_nesting = 0; 2018 p->rcu_read_lock_nesting = 0;
2017 p->rcu_read_unlock_special = 0; 2019 p->rcu_read_unlock_special.s = 0;
2018 p->rcu_blocked_node = NULL; 2020 p->rcu_blocked_node = NULL;
2019 INIT_LIST_HEAD(&p->rcu_node_entry); 2021 INIT_LIST_HEAD(&p->rcu_node_entry);
2020#endif /* #ifdef CONFIG_PREEMPT_RCU */ 2022#endif /* #ifdef CONFIG_PREEMPT_RCU */