aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-11 18:50:41 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-21 08:59:27 -0400
commitbcbfdd01dce5556a952fae84ef16fd0f12525e7b (patch)
treed674b23e7a573c6e5234acb5e914fc60e581594a
parent0497b489b8255054f113fd31faeb72f6dbc50a68 (diff)
rcu: Make non-preemptive schedule be Tasks RCU quiescent state
Currently, a call to schedule() acts as a Tasks RCU quiescent state only if a context switch actually takes place. However, just the call to schedule() guarantees that the calling task has moved off of whatever tracing trampoline that it might have been one previously. This commit therefore plumbs schedule()'s "preempt" parameter into rcu_note_context_switch(), which then records the Tasks RCU quiescent state, but only if this call to schedule() was -not- due to a preemption. To avoid adding overhead to the common-case context-switch path, this commit hides the rcu_note_context_switch() check under an existing non-common-case check. Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/rcutiny.h13
-rw-r--r--include/linux/rcutree.h5
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/rcu/update.c1
-rw-r--r--kernel/sched/core.c2
6 files changed, 43 insertions, 11 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e6146d0074f8..f531b29207da 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -363,15 +363,20 @@ static inline void rcu_init_nohz(void)
363#ifdef CONFIG_TASKS_RCU 363#ifdef CONFIG_TASKS_RCU
364#define TASKS_RCU(x) x 364#define TASKS_RCU(x) x
365extern struct srcu_struct tasks_rcu_exit_srcu; 365extern struct srcu_struct tasks_rcu_exit_srcu;
366#define rcu_note_voluntary_context_switch(t) \ 366#define rcu_note_voluntary_context_switch_lite(t) \
367 do { \ 367 do { \
368 rcu_all_qs(); \
369 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 368 if (READ_ONCE((t)->rcu_tasks_holdout)) \
370 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 369 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
371 } while (0) 370 } while (0)
371#define rcu_note_voluntary_context_switch(t) \
372 do { \
373 rcu_all_qs(); \
374 rcu_note_voluntary_context_switch_lite(t); \
375 } while (0)
372#else /* #ifdef CONFIG_TASKS_RCU */ 376#else /* #ifdef CONFIG_TASKS_RCU */
373#define TASKS_RCU(x) do { } while (0) 377#define TASKS_RCU(x) do { } while (0)
374#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 378#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
379#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
375#endif /* #else #ifdef CONFIG_TASKS_RCU */ 380#endif /* #else #ifdef CONFIG_TASKS_RCU */
376 381
377/** 382/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5219be250f00..74d9c3a1feee 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,10 +92,11 @@ static inline void kfree_call_rcu(struct rcu_head *head,
92 call_rcu(head, func); 92 call_rcu(head, func);
93} 93}
94 94
95static inline void rcu_note_context_switch(void) 95#define rcu_note_context_switch(preempt) \
96{ 96 do { \
97 rcu_sched_qs(); 97 rcu_sched_qs(); \
98} 98 rcu_note_voluntary_context_switch_lite(current); \
99 } while (0)
99 100
100/* 101/*
101 * Take advantage of the fact that there is only one CPU, which 102 * Take advantage of the fact that there is only one CPU, which
@@ -242,6 +243,10 @@ static inline bool rcu_is_watching(void)
242 243
243#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 244#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
244 245
246static inline void rcu_request_urgent_qs_task(struct task_struct *t)
247{
248}
249
245static inline void rcu_all_qs(void) 250static inline void rcu_all_qs(void)
246{ 251{
247 barrier(); /* Avoid RCU read-side critical sections leaking across. */ 252 barrier(); /* Avoid RCU read-side critical sections leaking across. */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 63a4e4cf40a5..0bacb6b2af69 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,7 +30,7 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_note_context_switch(void); 33void rcu_note_context_switch(bool preempt);
34int rcu_needs_cpu(u64 basem, u64 *nextevt); 34int rcu_needs_cpu(u64 basem, u64 *nextevt);
35void rcu_cpu_stall_reset(void); 35void rcu_cpu_stall_reset(void);
36 36
@@ -41,7 +41,7 @@ void rcu_cpu_stall_reset(void);
41 */ 41 */
42static inline void rcu_virt_note_context_switch(int cpu) 42static inline void rcu_virt_note_context_switch(int cpu)
43{ 43{
44 rcu_note_context_switch(); 44 rcu_note_context_switch(false);
45} 45}
46 46
47void synchronize_rcu_bh(void); 47void synchronize_rcu_bh(void);
@@ -108,6 +108,7 @@ void rcu_scheduler_starting(void);
108extern int rcu_scheduler_active __read_mostly; 108extern int rcu_scheduler_active __read_mostly;
109 109
110bool rcu_is_watching(void); 110bool rcu_is_watching(void);
111void rcu_request_urgent_qs_task(struct task_struct *t);
111 112
112void rcu_all_qs(void); 113void rcu_all_qs(void);
113 114
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3c23435d2083..891d97109e09 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -458,7 +458,7 @@ static void rcu_momentary_dyntick_idle(void)
458 * and requires special handling for preemptible RCU. 458 * and requires special handling for preemptible RCU.
459 * The caller must have disabled interrupts. 459 * The caller must have disabled interrupts.
460 */ 460 */
461void rcu_note_context_switch(void) 461void rcu_note_context_switch(bool preempt)
462{ 462{
463 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 463 barrier(); /* Avoid RCU read-side critical sections leaking down. */
464 trace_rcu_utilization(TPS("Start context switch")); 464 trace_rcu_utilization(TPS("Start context switch"));
@@ -471,6 +471,8 @@ void rcu_note_context_switch(void)
471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) 471 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
472 rcu_momentary_dyntick_idle(); 472 rcu_momentary_dyntick_idle();
473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 473 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
474 if (!preempt)
475 rcu_note_voluntary_context_switch_lite(current);
474out: 476out:
475 trace_rcu_utilization(TPS("End context switch")); 477 trace_rcu_utilization(TPS("End context switch"));
476 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 478 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -1149,6 +1151,24 @@ bool notrace rcu_is_watching(void)
1149} 1151}
1150EXPORT_SYMBOL_GPL(rcu_is_watching); 1152EXPORT_SYMBOL_GPL(rcu_is_watching);
1151 1153
1154/*
1155 * If a holdout task is actually running, request an urgent quiescent
1156 * state from its CPU. This is unsynchronized, so migrations can cause
1157 * the request to go to the wrong CPU. Which is OK, all that will happen
1158 * is that the CPU's next context switch will be a bit slower and next
1159 * time around this task will generate another request.
1160 */
1161void rcu_request_urgent_qs_task(struct task_struct *t)
1162{
1163 int cpu;
1164
1165 barrier();
1166 cpu = task_cpu(t);
1167 if (!task_curr(t))
1168 return; /* This task is not running on that CPU. */
1169 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1170}
1171
1152#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1172#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1153 1173
1154/* 1174/*
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index c5df0d756900..273e869ca21d 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -665,6 +665,7 @@ static void check_holdout_task(struct task_struct *t,
665 put_task_struct(t); 665 put_task_struct(t);
666 return; 666 return;
667 } 667 }
668 rcu_request_urgent_qs_task(t);
668 if (!needreport) 669 if (!needreport)
669 return; 670 return;
670 if (*firstreport) { 671 if (*firstreport) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3b31fc05a0f1..2adf7b6c04e7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3378,7 +3378,7 @@ static void __sched notrace __schedule(bool preempt)
3378 hrtick_clear(rq); 3378 hrtick_clear(rq);
3379 3379
3380 local_irq_disable(); 3380 local_irq_disable();
3381 rcu_note_context_switch(); 3381 rcu_note_context_switch(preempt);
3382 3382
3383 /* 3383 /*
3384 * Make sure that signal_pending_state()->signal_pending() below 3384 * Make sure that signal_pending_state()->signal_pending() below