aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-02-19 03:46:41 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-02-25 05:27:16 -0500
commitabedf8e2419fb873d919dd74de2e84b510259339 (patch)
treef3844e71c29bef8dbb9031171c26574cb9087769 /kernel/rcu
parent065bb78c5b09df54d1c32e03227deb777ddff57b (diff)
rcu: Use simple wait queues where possible in rcutree
As of commit dae6e64d2bcfd ("rcu: Introduce proper blocking to no-CBs kthreads GP waits") the RCU subsystem started making use of wait queues. Here we convert all additions of RCU wait queues to use simple wait queues, since they don't need the extra overhead of the full wait queue features. Originally this was done for RT kernels[1], since we would get things like... BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 in_atomic(): 1, irqs_disabled(): 1, pid: 8, name: rcu_preempt Pid: 8, comm: rcu_preempt Not tainted Call Trace: [<ffffffff8106c8d0>] __might_sleep+0xd0/0xf0 [<ffffffff817d77b4>] rt_spin_lock+0x24/0x50 [<ffffffff8106fcf6>] __wake_up+0x36/0x70 [<ffffffff810c4542>] rcu_gp_kthread+0x4d2/0x680 [<ffffffff8105f910>] ? __init_waitqueue_head+0x50/0x50 [<ffffffff810c4070>] ? rcu_gp_fqs+0x80/0x80 [<ffffffff8105eabb>] kthread+0xdb/0xe0 [<ffffffff8106b912>] ? finish_task_switch+0x52/0x100 [<ffffffff817e0754>] kernel_thread_helper+0x4/0x10 [<ffffffff8105e9e0>] ? __init_kthread_worker+0x60/0x60 [<ffffffff817e0750>] ? gs_change+0xb/0xb ...and hence simple wait queues were deployed on RT out of necessity (as simple wait uses a raw lock), but mainline might as well take advantage of the more streamline support as well. [1] This is a carry forward of work from v3.10-rt; the original conversion was by Thomas on an earlier -rt version, and Sebastian extended it to additional post-3.10 added RCU waiters; here I've added a commit log and unified the RCU changes into one, and uprev'd it to match mainline RCU. Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: linux-rt-users@vger.kernel.org Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1455871601-27484-6-git-send-email-wagi@monom.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/rcu/tree.h13
-rw-r--r--kernel/rcu/tree_plugin.h26
3 files changed, 31 insertions, 30 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8e8c6ec1d30f..9fd5b628a88d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1634,7 +1634,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1634 !READ_ONCE(rsp->gp_flags) || 1634 !READ_ONCE(rsp->gp_flags) ||
1635 !rsp->gp_kthread) 1635 !rsp->gp_kthread)
1636 return; 1636 return;
1637 wake_up(&rsp->gp_wq); 1637 swake_up(&rsp->gp_wq);
1638} 1638}
1639 1639
1640/* 1640/*
@@ -2009,7 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2009 int nocb = 0; 2009 int nocb = 0;
2010 struct rcu_data *rdp; 2010 struct rcu_data *rdp;
2011 struct rcu_node *rnp = rcu_get_root(rsp); 2011 struct rcu_node *rnp = rcu_get_root(rsp);
2012 wait_queue_head_t *sq; 2012 struct swait_queue_head *sq;
2013 2013
2014 WRITE_ONCE(rsp->gp_activity, jiffies); 2014 WRITE_ONCE(rsp->gp_activity, jiffies);
2015 raw_spin_lock_irq_rcu_node(rnp); 2015 raw_spin_lock_irq_rcu_node(rnp);
@@ -2094,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2094 READ_ONCE(rsp->gpnum), 2094 READ_ONCE(rsp->gpnum),
2095 TPS("reqwait")); 2095 TPS("reqwait"));
2096 rsp->gp_state = RCU_GP_WAIT_GPS; 2096 rsp->gp_state = RCU_GP_WAIT_GPS;
2097 wait_event_interruptible(rsp->gp_wq, 2097 swait_event_interruptible(rsp->gp_wq,
2098 READ_ONCE(rsp->gp_flags) & 2098 READ_ONCE(rsp->gp_flags) &
2099 RCU_GP_FLAG_INIT); 2099 RCU_GP_FLAG_INIT);
2100 rsp->gp_state = RCU_GP_DONE_GPS; 2100 rsp->gp_state = RCU_GP_DONE_GPS;
@@ -2124,7 +2124,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2124 READ_ONCE(rsp->gpnum), 2124 READ_ONCE(rsp->gpnum),
2125 TPS("fqswait")); 2125 TPS("fqswait"));
2126 rsp->gp_state = RCU_GP_WAIT_FQS; 2126 rsp->gp_state = RCU_GP_WAIT_FQS;
2127 ret = wait_event_interruptible_timeout(rsp->gp_wq, 2127 ret = swait_event_interruptible_timeout(rsp->gp_wq,
2128 rcu_gp_fqs_check_wake(rsp, &gf), j); 2128 rcu_gp_fqs_check_wake(rsp, &gf), j);
2129 rsp->gp_state = RCU_GP_DOING_FQS; 2129 rsp->gp_state = RCU_GP_DOING_FQS;
2130 /* Locking provides needed memory barriers. */ 2130 /* Locking provides needed memory barriers. */
@@ -2248,7 +2248,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2248 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2249 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2250 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2251 rcu_gp_kthread_wake(rsp); 2251 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2252} 2252}
2253 2253
2254/* 2254/*
@@ -2902,7 +2902,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2902 } 2902 }
2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2903 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2904 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2905 rcu_gp_kthread_wake(rsp); 2905 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
2906} 2906}
2907 2907
2908/* 2908/*
@@ -3531,7 +3531,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3531 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3531 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3532 if (wake) { 3532 if (wake) {
3533 smp_mb(); /* EGP done before wake_up(). */ 3533 smp_mb(); /* EGP done before wake_up(). */
3534 wake_up(&rsp->expedited_wq); 3534 swake_up(&rsp->expedited_wq);
3535 } 3535 }
3536 break; 3536 break;
3537 } 3537 }
@@ -3782,7 +3782,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3782 jiffies_start = jiffies; 3782 jiffies_start = jiffies;
3783 3783
3784 for (;;) { 3784 for (;;) {
3785 ret = wait_event_interruptible_timeout( 3785 ret = swait_event_timeout(
3786 rsp->expedited_wq, 3786 rsp->expedited_wq,
3787 sync_rcu_preempt_exp_done(rnp_root), 3787 sync_rcu_preempt_exp_done(rnp_root),
3788 jiffies_stall); 3788 jiffies_stall);
@@ -3790,7 +3790,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3790 return; 3790 return;
3791 if (ret < 0) { 3791 if (ret < 0) {
3792 /* Hit a signal, disable CPU stall warnings. */ 3792 /* Hit a signal, disable CPU stall warnings. */
3793 wait_event(rsp->expedited_wq, 3793 swait_event(rsp->expedited_wq,
3794 sync_rcu_preempt_exp_done(rnp_root)); 3794 sync_rcu_preempt_exp_done(rnp_root));
3795 return; 3795 return;
3796 } 3796 }
@@ -4484,8 +4484,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
4484 } 4484 }
4485 } 4485 }
4486 4486
4487 init_waitqueue_head(&rsp->gp_wq); 4487 init_swait_queue_head(&rsp->gp_wq);
4488 init_waitqueue_head(&rsp->expedited_wq); 4488 init_swait_queue_head(&rsp->expedited_wq);
4489 rnp = rsp->level[rcu_num_lvls - 1]; 4489 rnp = rsp->level[rcu_num_lvls - 1];
4490 for_each_possible_cpu(i) { 4490 for_each_possible_cpu(i) {
4491 while (i > rnp->grphi) 4491 while (i > rnp->grphi)
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 10dedfbef09d..bbd235d0e71f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,6 +27,7 @@
27#include <linux/threads.h> 27#include <linux/threads.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30#include <linux/swait.h>
30#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
31 32
32/* 33/*
@@ -243,7 +244,7 @@ struct rcu_node {
243 /* Refused to boost: not sure why, though. */ 244 /* Refused to boost: not sure why, though. */
244 /* This can happen due to race conditions. */ 245 /* This can happen due to race conditions. */
245#ifdef CONFIG_RCU_NOCB_CPU 246#ifdef CONFIG_RCU_NOCB_CPU
246 wait_queue_head_t nocb_gp_wq[2]; 247 struct swait_queue_head nocb_gp_wq[2];
247 /* Place for rcu_nocb_kthread() to wait GP. */ 248 /* Place for rcu_nocb_kthread() to wait GP. */
248#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 249#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
249 int need_future_gp[2]; 250 int need_future_gp[2];
@@ -399,7 +400,7 @@ struct rcu_data {
399 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ 400 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
400 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 401 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
401 struct rcu_head **nocb_follower_tail; 402 struct rcu_head **nocb_follower_tail;
402 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 403 struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
403 struct task_struct *nocb_kthread; 404 struct task_struct *nocb_kthread;
404 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 405 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
405 406
@@ -478,7 +479,7 @@ struct rcu_state {
478 unsigned long gpnum; /* Current gp number. */ 479 unsigned long gpnum; /* Current gp number. */
479 unsigned long completed; /* # of last completed gp. */ 480 unsigned long completed; /* # of last completed gp. */
480 struct task_struct *gp_kthread; /* Task for grace periods. */ 481 struct task_struct *gp_kthread; /* Task for grace periods. */
481 wait_queue_head_t gp_wq; /* Where GP task waits. */ 482 struct swait_queue_head gp_wq; /* Where GP task waits. */
482 short gp_flags; /* Commands for GP task. */ 483 short gp_flags; /* Commands for GP task. */
483 short gp_state; /* GP kthread sleep state. */ 484 short gp_state; /* GP kthread sleep state. */
484 485
@@ -506,7 +507,7 @@ struct rcu_state {
506 unsigned long expedited_sequence; /* Take a ticket. */ 507 unsigned long expedited_sequence; /* Take a ticket. */
507 atomic_long_t expedited_normal; /* # fallbacks to normal. */ 508 atomic_long_t expedited_normal; /* # fallbacks to normal. */
508 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 509 atomic_t expedited_need_qs; /* # CPUs left to check in. */
509 wait_queue_head_t expedited_wq; /* Wait for check-ins. */ 510 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
510 int ncpus_snap; /* # CPUs seen last time. */ 511 int ncpus_snap; /* # CPUs seen last time. */
511 512
512 unsigned long jiffies_force_qs; /* Time at which to invoke */ 513 unsigned long jiffies_force_qs; /* Time at which to invoke */
@@ -621,8 +622,8 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
621static void increment_cpu_stall_ticks(void); 622static void increment_cpu_stall_ticks(void);
622static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); 623static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
623static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 624static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
624static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp); 625static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
625static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq); 626static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
626static void rcu_init_one_nocb(struct rcu_node *rnp); 627static void rcu_init_one_nocb(struct rcu_node *rnp);
627static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 628static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
628 bool lazy, unsigned long flags); 629 bool lazy, unsigned long flags);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 631aff61abe9..080bd202d360 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1811,9 +1811,9 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1811 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1811 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1812 * grace period. 1812 * grace period.
1813 */ 1813 */
1814static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq) 1814static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1815{ 1815{
1816 wake_up_all(sq); 1816 swake_up_all(sq);
1817} 1817}
1818 1818
1819/* 1819/*
@@ -1829,15 +1829,15 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1829 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; 1829 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1830} 1830}
1831 1831
1832static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp) 1832static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1833{ 1833{
1834 return &rnp->nocb_gp_wq[rnp->completed & 0x1]; 1834 return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1835} 1835}
1836 1836
1837static void rcu_init_one_nocb(struct rcu_node *rnp) 1837static void rcu_init_one_nocb(struct rcu_node *rnp)
1838{ 1838{
1839 init_waitqueue_head(&rnp->nocb_gp_wq[0]); 1839 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1840 init_waitqueue_head(&rnp->nocb_gp_wq[1]); 1840 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1841} 1841}
1842 1842
1843#ifndef CONFIG_RCU_NOCB_CPU_ALL 1843#ifndef CONFIG_RCU_NOCB_CPU_ALL
@@ -1862,7 +1862,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1862 if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { 1862 if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1863 /* Prior smp_mb__after_atomic() orders against prior enqueue. */ 1863 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
1864 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1864 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1865 wake_up(&rdp_leader->nocb_wq); 1865 swake_up(&rdp_leader->nocb_wq);
1866 } 1866 }
1867} 1867}
1868 1868
@@ -2074,7 +2074,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2074 */ 2074 */
2075 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); 2075 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2076 for (;;) { 2076 for (;;) {
2077 wait_event_interruptible( 2077 swait_event_interruptible(
2078 rnp->nocb_gp_wq[c & 0x1], 2078 rnp->nocb_gp_wq[c & 0x1],
2079 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); 2079 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2080 if (likely(d)) 2080 if (likely(d))
@@ -2102,7 +2102,7 @@ wait_again:
2102 /* Wait for callbacks to appear. */ 2102 /* Wait for callbacks to appear. */
2103 if (!rcu_nocb_poll) { 2103 if (!rcu_nocb_poll) {
2104 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); 2104 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2105 wait_event_interruptible(my_rdp->nocb_wq, 2105 swait_event_interruptible(my_rdp->nocb_wq,
2106 !READ_ONCE(my_rdp->nocb_leader_sleep)); 2106 !READ_ONCE(my_rdp->nocb_leader_sleep));
2107 /* Memory barrier handled by smp_mb() calls below and repoll. */ 2107 /* Memory barrier handled by smp_mb() calls below and repoll. */
2108 } else if (firsttime) { 2108 } else if (firsttime) {
@@ -2177,7 +2177,7 @@ wait_again:
2177 * List was empty, wake up the follower. 2177 * List was empty, wake up the follower.
2178 * Memory barriers supplied by atomic_long_add(). 2178 * Memory barriers supplied by atomic_long_add().
2179 */ 2179 */
2180 wake_up(&rdp->nocb_wq); 2180 swake_up(&rdp->nocb_wq);
2181 } 2181 }
2182 } 2182 }
2183 2183
@@ -2198,7 +2198,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
2198 if (!rcu_nocb_poll) { 2198 if (!rcu_nocb_poll) {
2199 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2199 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2200 "FollowerSleep"); 2200 "FollowerSleep");
2201 wait_event_interruptible(rdp->nocb_wq, 2201 swait_event_interruptible(rdp->nocb_wq,
2202 READ_ONCE(rdp->nocb_follower_head)); 2202 READ_ONCE(rdp->nocb_follower_head));
2203 } else if (firsttime) { 2203 } else if (firsttime) {
2204 /* Don't drown trace log with "Poll"! */ 2204 /* Don't drown trace log with "Poll"! */
@@ -2357,7 +2357,7 @@ void __init rcu_init_nohz(void)
2357static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2357static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2358{ 2358{
2359 rdp->nocb_tail = &rdp->nocb_head; 2359 rdp->nocb_tail = &rdp->nocb_head;
2360 init_waitqueue_head(&rdp->nocb_wq); 2360 init_swait_queue_head(&rdp->nocb_wq);
2361 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2361 rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2362} 2362}
2363 2363
@@ -2507,7 +2507,7 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2507 return false; 2507 return false;
2508} 2508}
2509 2509
2510static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq) 2510static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
2511{ 2511{
2512} 2512}
2513 2513
@@ -2515,7 +2515,7 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2515{ 2515{
2516} 2516}
2517 2517
2518static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp) 2518static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2519{ 2519{
2520 return NULL; 2520 return NULL;
2521} 2521}