aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-14 16:52:27 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 18:39:15 -0400
commit6f56f714db067056c80f5d71510118f82872e34c (patch)
tree831ac61392c65e088ed1ae8856b5f42de30fcd8d
parenta7538352da722fae5cc95ae6656ea2013f5b8b21 (diff)
rcu: Improve RCU-tasks naming and comments
The naming and comments associated with some RCU-tasks code make the faulty assumption that context switches due to cond_resched() are voluntary. As several people pointed out, this is not the case. This commit therefore updates function names and comments to better reflect current reality. Reported-by: Byungchul Park <byungchul.park@lge.com> Reported-by: Joel Fernandes <joel@joelfernandes.org> Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/rcu/update.c27
4 files changed, 22 insertions, 21 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index dacc90358b33..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -158,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
158 } while (0) 158 } while (0)
159 159
160/* 160/*
161 * Note a voluntary context switch for RCU-tasks benefit. This is a 161 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
162 * macro rather than an inline function to avoid #include hell. 162 * This is a macro rather than an inline function to avoid #include hell.
163 */ 163 */
164#ifdef CONFIG_TASKS_RCU 164#ifdef CONFIG_TASKS_RCU
165#define rcu_note_voluntary_context_switch_lite(t) \ 165#define rcu_tasks_qs(t) \
166 do { \ 166 do { \
167 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 167 if (READ_ONCE((t)->rcu_tasks_holdout)) \
168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -170,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
170#define rcu_note_voluntary_context_switch(t) \ 170#define rcu_note_voluntary_context_switch(t) \
171 do { \ 171 do { \
172 rcu_all_qs(); \ 172 rcu_all_qs(); \
173 rcu_note_voluntary_context_switch_lite(t); \ 173 rcu_tasks_qs(t); \
174 } while (0) 174 } while (0)
175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
176void synchronize_rcu_tasks(void); 176void synchronize_rcu_tasks(void);
177void exit_tasks_rcu_start(void); 177void exit_tasks_rcu_start(void);
178void exit_tasks_rcu_finish(void); 178void exit_tasks_rcu_finish(void);
179#else /* #ifdef CONFIG_TASKS_RCU */ 179#else /* #ifdef CONFIG_TASKS_RCU */
180#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) 180#define rcu_tasks_qs(t) do { } while (0)
181#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 181#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
182#define call_rcu_tasks call_rcu_sched 182#define call_rcu_tasks call_rcu_sched
183#define synchronize_rcu_tasks synchronize_sched 183#define synchronize_rcu_tasks synchronize_sched
@@ -194,7 +194,7 @@ static inline void exit_tasks_rcu_finish(void) { }
194 */ 194 */
195#define cond_resched_tasks_rcu_qs() \ 195#define cond_resched_tasks_rcu_qs() \
196do { \ 196do { \
197 rcu_note_voluntary_context_switch_lite(current); \ 197 rcu_tasks_qs(current); \
198 cond_resched(); \ 198 cond_resched(); \
199} while (0) 199} while (0)
200 200
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
93#define rcu_note_context_switch(preempt) \ 93#define rcu_note_context_switch(preempt) \
94 do { \ 94 do { \
95 rcu_sched_qs(); \ 95 rcu_sched_qs(); \
96 rcu_note_voluntary_context_switch_lite(current); \ 96 rcu_tasks_qs(current); \
97 } while (0) 97 } while (0)
98 98
99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) 99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6f2922168216..ccc061acf887 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -457,7 +457,7 @@ void rcu_note_context_switch(bool preempt)
457 rcu_momentary_dyntick_idle(); 457 rcu_momentary_dyntick_idle();
458 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 458 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
459 if (!preempt) 459 if (!preempt)
460 rcu_note_voluntary_context_switch_lite(current); 460 rcu_tasks_qs(current);
461out: 461out:
462 trace_rcu_utilization(TPS("End context switch")); 462 trace_rcu_utilization(TPS("End context switch"));
463 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 463 barrier(); /* Avoid RCU read-side critical sections leaking up. */
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4c230a60ece4..5783bdf86e5a 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
507#ifdef CONFIG_TASKS_RCU 507#ifdef CONFIG_TASKS_RCU
508 508
509/* 509/*
510 * Simple variant of RCU whose quiescent states are voluntary context switch, 510 * Simple variant of RCU whose quiescent states are voluntary context
511 * user-space execution, and idle. As such, grace periods can take one good 511 * switch, cond_resched_rcu_qs(), user-space execution, and idle.
512 * long time. There are no read-side primitives similar to rcu_read_lock() 512 * As such, grace periods can take one good long time. There are no
513 * and rcu_read_unlock() because this implementation is intended to get 513 * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
514 * the system into a safe state for some of the manipulations involved in 514 * because this implementation is intended to get the system into a safe
515 * tracing and the like. Finally, this implementation does not support 515 * state for some of the manipulations involved in tracing and the like.
516 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 516 * Finally, this implementation does not support high call_rcu_tasks()
517 * per-CPU callback lists will be needed. 517 * rates from multiple CPUs. If this is required, per-CPU callback lists
518 * will be needed.
518 */ 519 */
519 520
520/* Global list of callbacks and associated lock. */ 521/* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
542 * period elapses, in other words after all currently executing RCU 543 * period elapses, in other words after all currently executing RCU
543 * read-side critical sections have completed. call_rcu_tasks() assumes 544 * read-side critical sections have completed. call_rcu_tasks() assumes
544 * that the read-side critical sections end at a voluntary context 545 * that the read-side critical sections end at a voluntary context
545 * switch (not a preemption!), entry into idle, or transition to usermode 546 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
546 * execution. As such, there are no read-side primitives analogous to 547 * or transition to usermode execution. As such, there are no read-side
547 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended 548 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
548 * to determine that all tasks have passed through a safe state, not so 549 * this primitive is intended to determine that all tasks have passed
549 * much for data-strcuture synchronization. 550 * through a safe state, not so much for data-strcuture synchronization.
550 * 551 *
551 * See the description of call_rcu() for more detailed information on 552 * See the description of call_rcu() for more detailed information on
552 * memory ordering guarantees. 553 * memory ordering guarantees.