aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-11 18:50:41 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-21 08:59:27 -0400
commitbcbfdd01dce5556a952fae84ef16fd0f12525e7b (patch)
treed674b23e7a573c6e5234acb5e914fc60e581594a /kernel/rcu/tree.c
parent0497b489b8255054f113fd31faeb72f6dbc50a68 (diff)
rcu: Make non-preemptive schedule be Tasks RCU quiescent state
Currently, a call to schedule() acts as a Tasks RCU quiescent state only if a context switch actually takes place. However, just the call to schedule() guarantees that the calling task has moved off of whatever tracing trampoline that it might have been one previously. This commit therefore plumbs schedule()'s "preempt" parameter into rcu_note_context_switch(), which then records the Tasks RCU quiescent state, but only if this call to schedule() was -not- due to a preemption. To avoid adding overhead to the common-case context-switch path, this commit hides the rcu_note_context_switch() check under an existing non-common-case check. Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3c23435d2083..891d97109e09 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -458,7 +458,7 @@ static void rcu_momentary_dyntick_idle(void)
458 * and requires special handling for preemptible RCU. 458 * and requires special handling for preemptible RCU.
459 * The caller must have disabled interrupts. 459 * The caller must have disabled interrupts.
460 */ 460 */
461void rcu_note_context_switch(void) 461void rcu_note_context_switch(bool preempt)
462{ 462{
463 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 463 barrier(); /* Avoid RCU read-side critical sections leaking down. */
464 trace_rcu_utilization(TPS("Start context switch")); 464 trace_rcu_utilization(TPS("Start context switch"));
@@ -471,6 +471,8 @@ void rcu_note_context_switch(void)
471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) 471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
472 rcu_momentary_dyntick_idle(); 472 rcu_momentary_dyntick_idle();
473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
474 if (!preempt)
475 rcu_note_voluntary_context_switch_lite(current);
474out: 476out:
475 trace_rcu_utilization(TPS("End context switch")); 477 trace_rcu_utilization(TPS("End context switch"));
476 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 478 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -1149,6 +1151,24 @@ bool notrace rcu_is_watching(void)
1149} 1151}
1150EXPORT_SYMBOL_GPL(rcu_is_watching); 1152EXPORT_SYMBOL_GPL(rcu_is_watching);
1151 1153
1154/*
1155 * If a holdout task is actually running, request an urgent quiescent
1156 * state from its CPU. This is unsynchronized, so migrations can cause
1157 * the request to go to the wrong CPU. Which is OK, all that will happen
1158 * is that the CPU's next context switch will be a bit slower and next
1159 * time around this task will generate another request.
1160 */
1161void rcu_request_urgent_qs_task(struct task_struct *t)
1162{
1163 int cpu;
1164
1165 barrier();
1166 cpu = task_cpu(t);
1167 if (!task_curr(t))
1168 return; /* This task is not running on that CPU. */
1169 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1170}
1171
1152#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1172#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1153 1173
1154/* 1174/*