aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3c23435d2083..891d97109e09 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -458,7 +458,7 @@ static void rcu_momentary_dyntick_idle(void)
458 * and requires special handling for preemptible RCU. 458 * and requires special handling for preemptible RCU.
459 * The caller must have disabled interrupts. 459 * The caller must have disabled interrupts.
460 */ 460 */
461void rcu_note_context_switch(void) 461void rcu_note_context_switch(bool preempt)
462{ 462{
463 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 463 barrier(); /* Avoid RCU read-side critical sections leaking down. */
464 trace_rcu_utilization(TPS("Start context switch")); 464 trace_rcu_utilization(TPS("Start context switch"));
@@ -471,6 +471,8 @@ void rcu_note_context_switch(void)
471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) 471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
472 rcu_momentary_dyntick_idle(); 472 rcu_momentary_dyntick_idle();
473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
474 if (!preempt)
475 rcu_note_voluntary_context_switch_lite(current);
474out: 476out:
475 trace_rcu_utilization(TPS("End context switch")); 477 trace_rcu_utilization(TPS("End context switch"));
476 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 478 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -1149,6 +1151,24 @@ bool notrace rcu_is_watching(void)
1149} 1151}
1150EXPORT_SYMBOL_GPL(rcu_is_watching); 1152EXPORT_SYMBOL_GPL(rcu_is_watching);
1151 1153
1154/*
1155 * If a holdout task is actually running, request an urgent quiescent
1156 * state from its CPU. This is unsynchronized, so migrations can cause
1157 * the request to go to the wrong CPU. Which is OK, all that will happen
1158 * is that the CPU's next context switch will be a bit slower and next
1159 * time around this task will generate another request.
1160 */
1161void rcu_request_urgent_qs_task(struct task_struct *t)
1162{
1163 int cpu;
1164
1165 barrier();
1166 cpu = task_cpu(t);
1167 if (!task_curr(t))
1168 return; /* This task is not running on that CPU. */
1169 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1170}
1171
1152#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1172#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1153 1173
1154/* 1174/*