aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-01 15:22:23 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-07 19:27:21 -0400
commit53c6d4edf874d3cbc031a53738c6cba9277faea5 (patch)
tree0f0fc1c5ade60c7243c7d5756694d21a9cc0df8a
parentbde6c3aa993066acb0d6ce32ecabe03b9d5df92d (diff)
rcu: Add synchronous grace-period waiting for RCU-tasks
It turns out to be easier to add the synchronous grace-period waiting functions to RCU-tasks than to work around their absense in rcutorture, so this commit adds them. The key point is that the existence of call_rcu_tasks() means that rcutorture needs an rcu_barrier_tasks(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--kernel/rcu/update.c55
2 files changed, 57 insertions, 0 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 473350462d04..640152fedcde 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -216,6 +216,8 @@ void synchronize_sched(void);
216 * memory ordering guarantees. 216 * memory ordering guarantees.
217 */ 217 */
218void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); 218void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
219void synchronize_rcu_tasks(void);
220void rcu_barrier_tasks(void);
219 221
220#ifdef CONFIG_PREEMPT_RCU 222#ifdef CONFIG_PREEMPT_RCU
221 223
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 19b3dacb0753..5fd1ddbfcc55 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -381,6 +381,61 @@ void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
381} 381}
382EXPORT_SYMBOL_GPL(call_rcu_tasks); 382EXPORT_SYMBOL_GPL(call_rcu_tasks);
383 383
384/**
385 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
386 *
387 * Control will return to the caller some time after a full rcu-tasks
388 * grace period has elapsed, in other words after all currently
389 * executing rcu-tasks read-side critical sections have elapsed. These
390 * read-side critical sections are delimited by calls to schedule(),
391 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
392 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
393 *
394 * This is a very specialized primitive, intended only for a few uses in
395 * tracing and other situations requiring manipulation of function
396 * preambles and profiling hooks. The synchronize_rcu_tasks() function
397 * is not (yet) intended for heavy use from multiple CPUs.
398 *
399 * Note that this guarantee implies further memory-ordering guarantees.
400 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
401 * each CPU is guaranteed to have executed a full memory barrier since the
402 * end of its last RCU-tasks read-side critical section whose beginning
403 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
404 * having an RCU-tasks read-side critical section that extends beyond
405 * the return from synchronize_rcu_tasks() is guaranteed to have executed
406 * a full memory barrier after the beginning of synchronize_rcu_tasks()
407 * and before the beginning of that RCU-tasks read-side critical section.
408 * Note that these guarantees include CPUs that are offline, idle, or
409 * executing in user mode, as well as CPUs that are executing in the kernel.
410 *
411 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
412 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
413 * to have executed a full memory barrier during the execution of
414 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
415 * (but again only if the system has more than one CPU).
416 */
417void synchronize_rcu_tasks(void)
418{
419 /* Complain if the scheduler has not started. */
420 rcu_lockdep_assert(!rcu_scheduler_active,
421 "synchronize_rcu_tasks called too soon");
422
423 /* Wait for the grace period. */
424 wait_rcu_gp(call_rcu_tasks);
425}
426
427/**
428 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
429 *
430 * Although the current implementation is guaranteed to wait, it is not
431 * obligated to, for example, if there are no pending callbacks.
432 */
433void rcu_barrier_tasks(void)
434{
435 /* There is only one callback queue, so this is easy. ;-) */
436 synchronize_rcu_tasks();
437}
438
384/* See if the current task has stopped holding out, remove from list if so. */ 439/* See if the current task has stopped holding out, remove from list if so. */
385static void check_holdout_task(struct task_struct *t) 440static void check_holdout_task(struct task_struct *t)
386{ 441{