aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-03-02 19:35:27 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 13:27:29 -0400
commitcee4393989333795ae04dc9f3b83a578afe3fca6 (patch)
tree9b3afa8f132956c603f83a1fb098f8d30a81d7c1 /kernel/rcu/tree.c
parent6fba2b3767ea6e3e1204855031492415cc4dce4f (diff)
rcu: Rename cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs()
Commit e31d28b6ab8f ("trace: Eliminate cond_resched_rcu_qs() in favor of cond_resched()") substituted cond_resched() for the earlier call to cond_resched_rcu_qs(). However, the new-age cond_resched() does not do anything to help RCU-tasks grace periods because (1) RCU-tasks is only enabled when CONFIG_PREEMPT=y and (2) cond_resched() is a complete no-op when preemption is enabled. This situation results in hangs when running the trace benchmarks. A number of potential fixes were discussed on LKML (https://lkml.kernel.org/r/20180224151240.0d63a059@vmware.local.home), including making cond_resched() not be a no-op; making cond_resched() not be a no-op, but only when running tracing benchmarks; reverting the aforementioned commit (which works because cond_resched_rcu_qs() does provide an RCU-tasks quiescent state; and adding a call to the scheduler/RCU rcu_note_voluntary_context_switch() function. All were deemed unsatisfactory, either due to added cond_resched() overhead or due to magic functions inviting cargo culting. This commit renames cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs(), which provides a clear hint as to what this function is doing and why and where it should be used, and then replaces the call to cond_resched() with cond_resched_tasks_rcu_qs() in the trace benchmark's benchmark_event_kthread() function. Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2a734692a581..c4db0e20b035 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1234,10 +1234,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1234 } 1234 }
1235 1235
1236 /* 1236 /*
1237 * Has this CPU encountered a cond_resched_rcu_qs() since the 1237 * Has this CPU encountered a cond_resched() since the beginning
1238 * beginning of the grace period? For this to be the case, 1238 * of the grace period? For this to be the case, the CPU has to
1239 * the CPU has to have noticed the current grace period. This 1239 * have noticed the current grace period. This might not be the
1240 * might not be the case for nohz_full CPUs looping in the kernel. 1240 * case for nohz_full CPUs looping in the kernel.
1241 */ 1241 */
1242 jtsq = jiffies_till_sched_qs; 1242 jtsq = jiffies_till_sched_qs;
1243 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); 1243 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
@@ -2049,7 +2049,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
2049 rnp->level, rnp->grplo, 2049 rnp->level, rnp->grplo,
2050 rnp->grphi, rnp->qsmask); 2050 rnp->grphi, rnp->qsmask);
2051 raw_spin_unlock_irq_rcu_node(rnp); 2051 raw_spin_unlock_irq_rcu_node(rnp);
2052 cond_resched_rcu_qs(); 2052 cond_resched_tasks_rcu_qs();
2053 WRITE_ONCE(rsp->gp_activity, jiffies); 2053 WRITE_ONCE(rsp->gp_activity, jiffies);
2054 } 2054 }
2055 2055
@@ -2151,7 +2151,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2151 sq = rcu_nocb_gp_get(rnp); 2151 sq = rcu_nocb_gp_get(rnp);
2152 raw_spin_unlock_irq_rcu_node(rnp); 2152 raw_spin_unlock_irq_rcu_node(rnp);
2153 rcu_nocb_gp_cleanup(sq); 2153 rcu_nocb_gp_cleanup(sq);
2154 cond_resched_rcu_qs(); 2154 cond_resched_tasks_rcu_qs();
2155 WRITE_ONCE(rsp->gp_activity, jiffies); 2155 WRITE_ONCE(rsp->gp_activity, jiffies);
2156 rcu_gp_slow(rsp, gp_cleanup_delay); 2156 rcu_gp_slow(rsp, gp_cleanup_delay);
2157 } 2157 }
@@ -2202,7 +2202,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2202 /* Locking provides needed memory barrier. */ 2202 /* Locking provides needed memory barrier. */
2203 if (rcu_gp_init(rsp)) 2203 if (rcu_gp_init(rsp))
2204 break; 2204 break;
2205 cond_resched_rcu_qs(); 2205 cond_resched_tasks_rcu_qs();
2206 WRITE_ONCE(rsp->gp_activity, jiffies); 2206 WRITE_ONCE(rsp->gp_activity, jiffies);
2207 WARN_ON(signal_pending(current)); 2207 WARN_ON(signal_pending(current));
2208 trace_rcu_grace_period(rsp->name, 2208 trace_rcu_grace_period(rsp->name,
@@ -2247,7 +2247,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2247 trace_rcu_grace_period(rsp->name, 2247 trace_rcu_grace_period(rsp->name,
2248 READ_ONCE(rsp->gpnum), 2248 READ_ONCE(rsp->gpnum),
2249 TPS("fqsend")); 2249 TPS("fqsend"));
2250 cond_resched_rcu_qs(); 2250 cond_resched_tasks_rcu_qs();
2251 WRITE_ONCE(rsp->gp_activity, jiffies); 2251 WRITE_ONCE(rsp->gp_activity, jiffies);
2252 ret = 0; /* Force full wait till next FQS. */ 2252 ret = 0; /* Force full wait till next FQS. */
2253 j = jiffies_till_next_fqs; 2253 j = jiffies_till_next_fqs;
@@ -2260,7 +2260,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2260 } 2260 }
2261 } else { 2261 } else {
2262 /* Deal with stray signal. */ 2262 /* Deal with stray signal. */
2263 cond_resched_rcu_qs(); 2263 cond_resched_tasks_rcu_qs();
2264 WRITE_ONCE(rsp->gp_activity, jiffies); 2264 WRITE_ONCE(rsp->gp_activity, jiffies);
2265 WARN_ON(signal_pending(current)); 2265 WARN_ON(signal_pending(current));
2266 trace_rcu_grace_period(rsp->name, 2266 trace_rcu_grace_period(rsp->name,
@@ -2782,7 +2782,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2782 struct rcu_node *rnp; 2782 struct rcu_node *rnp;
2783 2783
2784 rcu_for_each_leaf_node(rsp, rnp) { 2784 rcu_for_each_leaf_node(rsp, rnp) {
2785 cond_resched_rcu_qs(); 2785 cond_resched_tasks_rcu_qs();
2786 mask = 0; 2786 mask = 0;
2787 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2787 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2788 if (rnp->qsmask == 0) { 2788 if (rnp->qsmask == 0) {