aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c22
-rw-r--r--kernel/rcutree_plugin.h20
-rw-r--r--kernel/srcu.c27
3 files changed, 47 insertions, 22 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 708469a06860..df0e3c1bb68e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1961,15 +1961,21 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
1961 return 0; 1961 return 0;
1962} 1962}
1963 1963
1964/* 1964/**
1965 * Wait for an rcu-sched grace period to elapse, but use "big hammer" 1965 * synchronize_sched_expedited - Brute-force RCU-sched grace period
1966 * approach to force grace period to end quickly. This consumes 1966 *
1967 * significant time on all CPUs, and is thus not recommended for 1967 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
1968 * any sort of common-case code. 1968 * approach to force the grace period to end quickly. This consumes
1969 * significant time on all CPUs and is unfriendly to real-time workloads,
1970 * so is thus not recommended for any sort of common-case code. In fact,
1971 * if you are using synchronize_sched_expedited() in a loop, please
1972 * restructure your code to batch your updates, and then use a single
1973 * synchronize_sched() instead.
1969 * 1974 *
1970 * Note that it is illegal to call this function while holding any 1975 * Note that it is illegal to call this function while holding any lock
1971 * lock that is acquired by a CPU-hotplug notifier. Failing to 1976 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
1972 * observe this restriction will result in deadlock. 1977 * to call this function from a CPU-hotplug notifier. Failing to observe
1978 * these restriction will result in deadlock.
1973 * 1979 *
1974 * This implementation can be thought of as an application of ticket 1980 * This implementation can be thought of as an application of ticket
1975 * locking to RCU, with sync_sched_expedited_started and 1981 * locking to RCU, with sync_sched_expedited_started and
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 07f880445d8d..f7ceadf4986e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -835,10 +835,22 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
836} 836}
837 837
838/* 838/**
839 * Wait for an rcu-preempt grace period, but expedite it. The basic idea 839 * synchronize_rcu_expedited - Brute-force RCU grace period
840 * is to invoke synchronize_sched_expedited() to push all the tasks to 840 *
841 * the ->blkd_tasks lists and wait for this list to drain. 841 * Wait for an RCU-preempt grace period, but expedite it. The basic
842 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843 * the ->blkd_tasks lists and wait for this list to drain. This consumes
844 * significant time on all CPUs and is unfriendly to real-time workloads,
845 * so is thus not recommended for any sort of common-case code.
846 * In fact, if you are using synchronize_rcu_expedited() in a loop,
847 * please restructure your code to batch your updates, and then Use a
848 * single synchronize_rcu() instead.
849 *
850 * Note that it is illegal to call this function while holding any lock
851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
852 * to call this function from a CPU-hotplug notifier. Failing to observe
853 * these restriction will result in deadlock.
842 */ 854 */
843void synchronize_rcu_expedited(void) 855void synchronize_rcu_expedited(void)
844{ 856{
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3f99fa0e8ed3..ba35f3a4a1f4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -286,19 +286,26 @@ void synchronize_srcu(struct srcu_struct *sp)
286EXPORT_SYMBOL_GPL(synchronize_srcu); 286EXPORT_SYMBOL_GPL(synchronize_srcu);
287 287
288/** 288/**
289 * synchronize_srcu_expedited - like synchronize_srcu, but less patient 289 * synchronize_srcu_expedited - Brute-force SRCU grace period
290 * @sp: srcu_struct with which to synchronize. 290 * @sp: srcu_struct with which to synchronize.
291 * 291 *
292 * Flip the completed counter, and wait for the old count to drain to zero. 292 * Wait for an SRCU grace period to elapse, but use a "big hammer"
293 * As with classic RCU, the updater must use some separate means of 293 * approach to force the grace period to end quickly. This consumes
294 * synchronizing concurrent updates. Can block; must be called from 294 * significant time on all CPUs and is unfriendly to real-time workloads,
295 * process context. 295 * so is thus not recommended for any sort of common-case code. In fact,
296 * if you are using synchronize_srcu_expedited() in a loop, please
297 * restructure your code to batch your updates, and then use a single
298 * synchronize_srcu() instead.
296 * 299 *
297 * Note that it is illegal to call synchronize_srcu_expedited() 300 * Note that it is illegal to call this function while holding any lock
298 * from the corresponding SRCU read-side critical section; doing so 301 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
299 * will result in deadlock. However, it is perfectly legal to call 302 * to call this function from a CPU-hotplug notifier. Failing to observe
300 * synchronize_srcu_expedited() on one srcu_struct from some other 303 * these restriction will result in deadlock. It is also illegal to call
301 * srcu_struct's read-side critical section. 304 * synchronize_srcu_expedited() from the corresponding SRCU read-side
305 * critical section; doing so will result in deadlock. However, it is
306 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
307 * from some other srcu_struct's read-side critical section, as long as
308 * the resulting graph of srcu_structs is acyclic.
302 */ 309 */
303void synchronize_srcu_expedited(struct srcu_struct *sp) 310void synchronize_srcu_expedited(struct srcu_struct *sp)
304{ 311{