aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-01 14:26:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:20 -0400
commitbde6c3aa993066acb0d6ce32ecabe03b9d5df92d (patch)
tree09ec78a9cfa751ce36288c32c4e5112f34af8582 /kernel/rcu/tree.c
parent8315f42295d2667a7f942f154b73a86fd7cb2227 (diff)
rcu: Provide cond_resched_rcu_qs() to force quiescent states in long loops
RCU-tasks requires the occasional voluntary context switch from CPU-bound in-kernel tasks. In some cases, this requires instrumenting cond_resched(). However, there is some reluctance to countenance unconditionally instrumenting cond_resched() (see http://lwn.net/Articles/603252/), so this commit creates a separate cond_resched_rcu_qs() that may be used in place of cond_resched() in locations prone to long-duration in-kernel looping. This commit currently instruments only RCU-tasks. Future possibilities include also instrumenting RCU, RCU-bh, and RCU-sched in order to reduce IPI usage. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8ad91d1e317d..e23dad0661e2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1647,7 +1647,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1647 rnp->level, rnp->grplo, 1647 rnp->level, rnp->grplo,
1648 rnp->grphi, rnp->qsmask); 1648 rnp->grphi, rnp->qsmask);
1649 raw_spin_unlock_irq(&rnp->lock); 1649 raw_spin_unlock_irq(&rnp->lock);
1650 cond_resched(); 1650 cond_resched_rcu_qs();
1651 } 1651 }
1652 1652
1653 mutex_unlock(&rsp->onoff_mutex); 1653 mutex_unlock(&rsp->onoff_mutex);
@@ -1736,7 +1736,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1736 /* smp_mb() provided by prior unlock-lock pair. */ 1736 /* smp_mb() provided by prior unlock-lock pair. */
1737 nocb += rcu_future_gp_cleanup(rsp, rnp); 1737 nocb += rcu_future_gp_cleanup(rsp, rnp);
1738 raw_spin_unlock_irq(&rnp->lock); 1738 raw_spin_unlock_irq(&rnp->lock);
1739 cond_resched(); 1739 cond_resched_rcu_qs();
1740 } 1740 }
1741 rnp = rcu_get_root(rsp); 1741 rnp = rcu_get_root(rsp);
1742 raw_spin_lock_irq(&rnp->lock); 1742 raw_spin_lock_irq(&rnp->lock);
@@ -1785,7 +1785,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1785 /* Locking provides needed memory barrier. */ 1785 /* Locking provides needed memory barrier. */
1786 if (rcu_gp_init(rsp)) 1786 if (rcu_gp_init(rsp))
1787 break; 1787 break;
1788 cond_resched(); 1788 cond_resched_rcu_qs();
1789 flush_signals(current); 1789 flush_signals(current);
1790 trace_rcu_grace_period(rsp->name, 1790 trace_rcu_grace_period(rsp->name,
1791 ACCESS_ONCE(rsp->gpnum), 1791 ACCESS_ONCE(rsp->gpnum),
@@ -1828,10 +1828,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
1828 trace_rcu_grace_period(rsp->name, 1828 trace_rcu_grace_period(rsp->name,
1829 ACCESS_ONCE(rsp->gpnum), 1829 ACCESS_ONCE(rsp->gpnum),
1830 TPS("fqsend")); 1830 TPS("fqsend"));
1831 cond_resched(); 1831 cond_resched_rcu_qs();
1832 } else { 1832 } else {
1833 /* Deal with stray signal. */ 1833 /* Deal with stray signal. */
1834 cond_resched(); 1834 cond_resched_rcu_qs();
1835 flush_signals(current); 1835 flush_signals(current);
1836 trace_rcu_grace_period(rsp->name, 1836 trace_rcu_grace_period(rsp->name,
1837 ACCESS_ONCE(rsp->gpnum), 1837 ACCESS_ONCE(rsp->gpnum),
@@ -2434,7 +2434,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2434 struct rcu_node *rnp; 2434 struct rcu_node *rnp;
2435 2435
2436 rcu_for_each_leaf_node(rsp, rnp) { 2436 rcu_for_each_leaf_node(rsp, rnp) {
2437 cond_resched(); 2437 cond_resched_rcu_qs();
2438 mask = 0; 2438 mask = 0;
2439 raw_spin_lock_irqsave(&rnp->lock, flags); 2439 raw_spin_lock_irqsave(&rnp->lock, flags);
2440 smp_mb__after_unlock_lock(); 2440 smp_mb__after_unlock_lock();