aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-02-19 03:46:41 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-02-25 05:27:16 -0500
commitabedf8e2419fb873d919dd74de2e84b510259339 (patch)
treef3844e71c29bef8dbb9031171c26574cb9087769 /kernel/rcu/tree.c
parent065bb78c5b09df54d1c32e03227deb777ddff57b (diff)
rcu: Use simple wait queues where possible in rcutree
As of commit dae6e64d2bcfd ("rcu: Introduce proper blocking to no-CBs kthreads GP waits") the RCU subsystem started making use of wait queues. Here we convert all additions of RCU wait queues to use simple wait queues, since they don't need the extra overhead of the full wait queue features. Originally this was done for RT kernels[1], since we would get things like... BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 in_atomic(): 1, irqs_disabled(): 1, pid: 8, name: rcu_preempt Pid: 8, comm: rcu_preempt Not tainted Call Trace: [<ffffffff8106c8d0>] __might_sleep+0xd0/0xf0 [<ffffffff817d77b4>] rt_spin_lock+0x24/0x50 [<ffffffff8106fcf6>] __wake_up+0x36/0x70 [<ffffffff810c4542>] rcu_gp_kthread+0x4d2/0x680 [<ffffffff8105f910>] ? __init_waitqueue_head+0x50/0x50 [<ffffffff810c4070>] ? rcu_gp_fqs+0x80/0x80 [<ffffffff8105eabb>] kthread+0xdb/0xe0 [<ffffffff8106b912>] ? finish_task_switch+0x52/0x100 [<ffffffff817e0754>] kernel_thread_helper+0x4/0x10 [<ffffffff8105e9e0>] ? __init_kthread_worker+0x60/0x60 [<ffffffff817e0750>] ? gs_change+0xb/0xb ...and hence simple wait queues were deployed on RT out of necessity (as simple wait uses a raw lock), but mainline might as well take advantage of the more streamline support as well. [1] This is a carry forward of work from v3.10-rt; the original conversion was by Thomas on an earlier -rt version, and Sebastian extended it to additional post-3.10 added RCU waiters; here I've added a commit log and unified the RCU changes into one, and uprev'd it to match mainline RCU. Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: linux-rt-users@vger.kernel.org Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1455871601-27484-6-git-send-email-wagi@monom.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8e8c6ec1d30f..9fd5b628a88d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1634,7 +1634,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1634 !READ_ONCE(rsp->gp_flags) || 1634 !READ_ONCE(rsp->gp_flags) ||
1635 !rsp->gp_kthread) 1635 !rsp->gp_kthread)
1636 return; 1636 return;
1637 wake_up(&rsp->gp_wq); 1637 swake_up(&rsp->gp_wq);
1638} 1638}
1639 1639
1640/* 1640/*
@@ -2009,7 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2009 int nocb = 0; 2009 int nocb = 0;
2010 struct rcu_data *rdp; 2010 struct rcu_data *rdp;
2011 struct rcu_node *rnp = rcu_get_root(rsp); 2011 struct rcu_node *rnp = rcu_get_root(rsp);
2012 wait_queue_head_t *sq; 2012 struct swait_queue_head *sq;
2013 2013
2014 WRITE_ONCE(rsp->gp_activity, jiffies); 2014 WRITE_ONCE(rsp->gp_activity, jiffies);
2015 raw_spin_lock_irq_rcu_node(rnp); 2015 raw_spin_lock_irq_rcu_node(rnp);
@@ -2094,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2094 READ_ONCE(rsp->gpnum), 2094 READ_ONCE(rsp->gpnum),
2095 TPS("reqwait")); 2095 TPS("reqwait"));
2096 rsp->gp_state = RCU_GP_WAIT_GPS; 2096 rsp->gp_state = RCU_GP_WAIT_GPS;
2097 wait_event_interruptible(rsp->gp_wq, 2097 swait_event_interruptible(rsp->gp_wq,
2098 READ_ONCE(rsp->gp_flags) & 2098 READ_ONCE(rsp->gp_flags) &
2099 RCU_GP_FLAG_INIT); 2099 RCU_GP_FLAG_INIT);
2100 rsp->gp_state = RCU_GP_DONE_GPS; 2100 rsp->gp_state = RCU_GP_DONE_GPS;
@@ -2124,7 +2124,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2124 READ_ONCE(rsp->gpnum), 2124 READ_ONCE(rsp->gpnum),
2125 TPS("fqswait")); 2125 TPS("fqswait"));
2126 rsp->gp_state = RCU_GP_WAIT_FQS; 2126 rsp->gp_state = RCU_GP_WAIT_FQS;
2127 ret = wait_event_interruptible_timeout(rsp->gp_wq, 2127 ret = swait_event_interruptible_timeout(rsp->gp_wq,
2128 rcu_gp_fqs_check_wake(rsp, &gf), j); 2128 rcu_gp_fqs_check_wake(rsp, &gf), j);
2129 rsp->gp_state = RCU_GP_DOING_FQS; 2129 rsp->gp_state = RCU_GP_DOING_FQS;
2130 /* Locking provides needed memory barriers. */ 2130 /* Locking provides needed memory barriers. */
@@ -2248,7 +2248,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2251 rcu_gp_kthread_wake(rsp); 2251 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2252} 2252}
2253 2253
2254/* 2254/*
@@ -2902,7 +2902,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2902 } 2902 }
2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2905 rcu_gp_kthread_wake(rsp); 2905 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2906} 2906}
2907 2907
2908/* 2908/*
@@ -3531,7 +3531,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3531 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3531 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3532 if (wake) { 3532 if (wake) {
3533 smp_mb(); /* EGP done before wake_up(). */ 3533 smp_mb(); /* EGP done before wake_up(). */
3534 wake_up(&rsp->expedited_wq); 3534 swake_up(&rsp->expedited_wq);
3535 } 3535 }
3536 break; 3536 break;
3537 } 3537 }
@@ -3782,7 +3782,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3782 jiffies_start = jiffies; 3782 jiffies_start = jiffies;
3783 3783
3784 for (;;) { 3784 for (;;) {
3785 ret = wait_event_interruptible_timeout( 3785 ret = swait_event_timeout(
3786 rsp->expedited_wq, 3786 rsp->expedited_wq,
3787 sync_rcu_preempt_exp_done(rnp_root), 3787 sync_rcu_preempt_exp_done(rnp_root),
3788 jiffies_stall); 3788 jiffies_stall);
@@ -3790,7 +3790,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3790 return; 3790 return;
3791 if (ret < 0) { 3791 if (ret < 0) {
3792 /* Hit a signal, disable CPU stall warnings. */ 3792 /* Hit a signal, disable CPU stall warnings. */
3793 wait_event(rsp->expedited_wq, 3793 swait_event(rsp->expedited_wq,
3794 sync_rcu_preempt_exp_done(rnp_root)); 3794 sync_rcu_preempt_exp_done(rnp_root));
3795 return; 3795 return;
3796 } 3796 }
@@ -4484,8 +4484,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
4484 } 4484 }
4485 } 4485 }
4486 4486
4487 init_waitqueue_head(&rsp->gp_wq); 4487 init_swait_queue_head(&rsp->gp_wq);
4488 init_waitqueue_head(&rsp->expedited_wq); 4488 init_swait_queue_head(&rsp->expedited_wq);
4489 rnp = rsp->level[rcu_num_lvls - 1]; 4489 rnp = rsp->level[rcu_num_lvls - 1];
4490 for_each_possible_cpu(i) { 4490 for_each_possible_cpu(i) {
4491 while (i > rnp->grphi) 4491 while (i > rnp->grphi)