aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c42
-rw-r--r--kernel/rcu/tree_plugin.h33
-rw-r--r--kernel/rcu/update.c20
3 files changed, 86 insertions, 9 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 121c1436a7f3..5ebc830297c1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3223,8 +3223,24 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3223 local_irq_restore(flags); 3223 local_irq_restore(flags);
3224} 3224}
3225 3225
3226/* 3226/**
3227 * Queue an RCU-sched callback for invocation after a grace period. 3227 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
3228 * @head: structure to be used for queueing the RCU updates.
3229 * @func: actual callback function to be invoked after the grace period
3230 *
3231 * The callback function will be invoked some time after a full grace
3232 * period elapses, in other words after all currently executing RCU
3233 * read-side critical sections have completed. call_rcu_sched() assumes
3234 * that the read-side critical sections end on enabling of preemption
3235 * or on voluntary preemption.
3236 * RCU read-side critical sections are delimited by :
3237 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3238 * - anything that disables preemption.
3239 *
3240 * These may be nested.
3241 *
3242 * See the description of call_rcu() for more detailed information on
3243 * memory ordering guarantees.
3228 */ 3244 */
3229void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 3245void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3230{ 3246{
@@ -3232,8 +3248,26 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3232} 3248}
3233EXPORT_SYMBOL_GPL(call_rcu_sched); 3249EXPORT_SYMBOL_GPL(call_rcu_sched);
3234 3250
3235/* 3251/**
3236 * Queue an RCU callback for invocation after a quicker grace period. 3252 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
3253 * @head: structure to be used for queueing the RCU updates.
3254 * @func: actual callback function to be invoked after the grace period
3255 *
3256 * The callback function will be invoked some time after a full grace
3257 * period elapses, in other words after all currently executing RCU
3258 * read-side critical sections have completed. call_rcu_bh() assumes
3259 * that the read-side critical sections end on completion of a softirq
3260 * handler. This means that read-side critical sections in process
3261 * context must not be interrupted by softirqs. This interface is to be
3262 * used when most of the read-side critical sections are in softirq context.
3263 * RCU read-side critical sections are delimited by :
3264 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
3265 * OR
3266 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3267 * These may be nested.
3268 *
3269 * See the description of call_rcu() for more detailed information on
3270 * memory ordering guarantees.
3237 */ 3271 */
3238void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 3272void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3239{ 3273{
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 573fbe9640a0..116cf8339826 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -675,8 +675,37 @@ static void rcu_preempt_do_callbacks(void)
675 675
676#endif /* #ifdef CONFIG_RCU_BOOST */ 676#endif /* #ifdef CONFIG_RCU_BOOST */
677 677
678/* 678/**
679 * Queue a preemptible-RCU callback for invocation after a grace period. 679 * call_rcu() - Queue an RCU callback for invocation after a grace period.
680 * @head: structure to be used for queueing the RCU updates.
681 * @func: actual callback function to be invoked after the grace period
682 *
683 * The callback function will be invoked some time after a full grace
684 * period elapses, in other words after all pre-existing RCU read-side
685 * critical sections have completed. However, the callback function
686 * might well execute concurrently with RCU read-side critical sections
687 * that started after call_rcu() was invoked. RCU read-side critical
688 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
689 * and may be nested.
690 *
691 * Note that all CPUs must agree that the grace period extended beyond
692 * all pre-existing RCU read-side critical section. On systems with more
693 * than one CPU, this means that when "func()" is invoked, each CPU is
694 * guaranteed to have executed a full memory barrier since the end of its
695 * last RCU read-side critical section whose beginning preceded the call
696 * to call_rcu(). It also means that each CPU executing an RCU read-side
697 * critical section that continues beyond the start of "func()" must have
698 * executed a memory barrier after the call_rcu() but before the beginning
699 * of that RCU read-side critical section. Note that these guarantees
700 * include CPUs that are offline, idle, or executing in user mode, as
701 * well as CPUs that are executing in the kernel.
702 *
703 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
704 * resulting RCU callback function "func()", then both CPU A and CPU B are
705 * guaranteed to execute a full memory barrier during the time interval
706 * between the call to call_rcu() and the invocation of "func()" -- even
707 * if CPU A and CPU B are the same CPU (but again only if the system has
708 * more than one CPU).
680 */ 709 */
681void call_rcu(struct rcu_head *head, rcu_callback_t func) 710void call_rcu(struct rcu_head *head, rcu_callback_t func)
682{ 711{
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 123a9c4b5055..84dec2c8ad1b 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -576,9 +576,23 @@ module_param(rcu_task_stall_timeout, int, 0644);
576static void rcu_spawn_tasks_kthread(void); 576static void rcu_spawn_tasks_kthread(void);
577static struct task_struct *rcu_tasks_kthread_ptr; 577static struct task_struct *rcu_tasks_kthread_ptr;
578 578
579/* 579/**
580 * Post an RCU-tasks callback. First call must be from process context 580 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
581 * after the scheduler if fully operational. 581 * @rhp: structure to be used for queueing the RCU updates.
582 * @func: actual callback function to be invoked after the grace period
583 *
584 * The callback function will be invoked some time after a full grace
585 * period elapses, in other words after all currently executing RCU
586 * read-side critical sections have completed. call_rcu_tasks() assumes
587 * that the read-side critical sections end at a voluntary context
588 * switch (not a preemption!), entry into idle, or transition to usermode
589 * execution. As such, there are no read-side primitives analogous to
590 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
591 * to determine that all tasks have passed through a safe state, not so
592 * much for data-strcuture synchronization.
593 *
594 * See the description of call_rcu() for more detailed information on
595 * memory ordering guarantees.
582 */ 596 */
583void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 597void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
584{ 598{