aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-01-31 17:00:41 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 12:06:08 -0500
commit236fefafe5d3d34b78ed2ccf5510909716112326 (patch)
tree9ff5ec129e59667341aa07120720b6e4a5ced4d9
parent2036d94a7b61ca5032ce90f2bda06afec0fe713e (diff)
rcu: Call out dangers of expedited RCU primitives
The expedited RCU primitives can be quite useful, but they have some high costs as well. This commit updates and creates docbook comments calling out the costs, and updates the RCU documentation as well. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--Documentation/RCU/checklist.txt14
-rw-r--r--include/linux/rcutree.h16
-rw-r--r--kernel/rcutree.c22
-rw-r--r--kernel/rcutree_plugin.h20
-rw-r--r--kernel/srcu.c27
5 files changed, 77 insertions, 22 deletions
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index bff2d8be1e18..5c8d74968090 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -180,6 +180,20 @@ over a rather long period of time, but improvements are always welcome!
180 operations that would not normally be undertaken while a real-time 180 operations that would not normally be undertaken while a real-time
181 workload is running. 181 workload is running.
182 182
183 In particular, if you find yourself invoking one of the expedited
184 primitives repeatedly in a loop, please do everyone a favor:
185 Restructure your code so that it batches the updates, allowing
186 a single non-expedited primitive to cover the entire batch.
187 This will very likely be faster than the loop containing the
188 expedited primitive, and will be much much easier on the rest
189 of the system, especially to real-time workloads running on
190 the rest of the system.
191
192 In addition, it is illegal to call the expedited forms from
193 a CPU-hotplug notifier, or while holding a lock that is acquired
194 by a CPU-hotplug notifier. Failing to observe this restriction
195 will result in deadlock.
196
1837. If the updater uses call_rcu() or synchronize_rcu(), then the 1977. If the updater uses call_rcu() or synchronize_rcu(), then the
184 corresponding readers must use rcu_read_lock() and 198 corresponding readers must use rcu_read_lock() and
185 rcu_read_unlock(). If the updater uses call_rcu_bh() or 199 rcu_read_unlock(). If the updater uses call_rcu_bh() or
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 73892483fd05..e8ee5dd0854c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -63,6 +63,22 @@ extern void synchronize_rcu_expedited(void);
63 63
64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
65 65
66/**
67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
68 *
69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
70 * approach to force the grace period to end quickly. This consumes
71 * significant time on all CPUs and is unfriendly to real-time workloads,
72 * so is thus not recommended for any sort of common-case code. In fact,
73 * if you are using synchronize_rcu_bh_expedited() in a loop, please
74 * restructure your code to batch your updates, and then use a single
75 * synchronize_rcu_bh() instead.
76 *
77 * Note that it is illegal to call this function while holding any lock
78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
79 * to call this function from a CPU-hotplug notifier. Failing to observe
80 * these restriction will result in deadlock.
81 */
66static inline void synchronize_rcu_bh_expedited(void) 82static inline void synchronize_rcu_bh_expedited(void)
67{ 83{
68 synchronize_sched_expedited(); 84 synchronize_sched_expedited();
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 708469a06860..df0e3c1bb68e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1961,15 +1961,21 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
1961 return 0; 1961 return 0;
1962} 1962}
1963 1963
1964/* 1964/**
1965 * Wait for an rcu-sched grace period to elapse, but use "big hammer" 1965 * synchronize_sched_expedited - Brute-force RCU-sched grace period
1966 * approach to force grace period to end quickly. This consumes 1966 *
1967 * significant time on all CPUs, and is thus not recommended for 1967 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
1968 * any sort of common-case code. 1968 * approach to force the grace period to end quickly. This consumes
1969 * significant time on all CPUs and is unfriendly to real-time workloads,
1970 * so is thus not recommended for any sort of common-case code. In fact,
1971 * if you are using synchronize_sched_expedited() in a loop, please
1972 * restructure your code to batch your updates, and then use a single
1973 * synchronize_sched() instead.
1969 * 1974 *
1970 * Note that it is illegal to call this function while holding any 1975 * Note that it is illegal to call this function while holding any lock
1971 * lock that is acquired by a CPU-hotplug notifier. Failing to 1976 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
1972 * observe this restriction will result in deadlock. 1977 * to call this function from a CPU-hotplug notifier. Failing to observe
1978 * these restriction will result in deadlock.
1973 * 1979 *
1974 * This implementation can be thought of as an application of ticket 1980 * This implementation can be thought of as an application of ticket
1975 * locking to RCU, with sync_sched_expedited_started and 1981 * locking to RCU, with sync_sched_expedited_started and
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 07f880445d8d..f7ceadf4986e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -835,10 +835,22 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
836} 836}
837 837
838/* 838/**
839 * Wait for an rcu-preempt grace period, but expedite it. The basic idea 839 * synchronize_rcu_expedited - Brute-force RCU grace period
840 * is to invoke synchronize_sched_expedited() to push all the tasks to 840 *
841 * the ->blkd_tasks lists and wait for this list to drain. 841 * Wait for an RCU-preempt grace period, but expedite it. The basic
842 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843 * the ->blkd_tasks lists and wait for this list to drain. This consumes
844 * significant time on all CPUs and is unfriendly to real-time workloads,
845 * so is thus not recommended for any sort of common-case code.
846 * In fact, if you are using synchronize_rcu_expedited() in a loop,
847 * please restructure your code to batch your updates, and then Use a
848 * single synchronize_rcu() instead.
849 *
850 * Note that it is illegal to call this function while holding any lock
851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
852 * to call this function from a CPU-hotplug notifier. Failing to observe
853 * these restriction will result in deadlock.
842 */ 854 */
843void synchronize_rcu_expedited(void) 855void synchronize_rcu_expedited(void)
844{ 856{
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3f99fa0e8ed3..ba35f3a4a1f4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -286,19 +286,26 @@ void synchronize_srcu(struct srcu_struct *sp)
286EXPORT_SYMBOL_GPL(synchronize_srcu); 286EXPORT_SYMBOL_GPL(synchronize_srcu);
287 287
288/** 288/**
289 * synchronize_srcu_expedited - like synchronize_srcu, but less patient 289 * synchronize_srcu_expedited - Brute-force SRCU grace period
290 * @sp: srcu_struct with which to synchronize. 290 * @sp: srcu_struct with which to synchronize.
291 * 291 *
292 * Flip the completed counter, and wait for the old count to drain to zero. 292 * Wait for an SRCU grace period to elapse, but use a "big hammer"
293 * As with classic RCU, the updater must use some separate means of 293 * approach to force the grace period to end quickly. This consumes
294 * synchronizing concurrent updates. Can block; must be called from 294 * significant time on all CPUs and is unfriendly to real-time workloads,
295 * process context. 295 * so is thus not recommended for any sort of common-case code. In fact,
296 * if you are using synchronize_srcu_expedited() in a loop, please
297 * restructure your code to batch your updates, and then use a single
298 * synchronize_srcu() instead.
296 * 299 *
297 * Note that it is illegal to call synchronize_srcu_expedited() 300 * Note that it is illegal to call this function while holding any lock
298 * from the corresponding SRCU read-side critical section; doing so 301 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
299 * will result in deadlock. However, it is perfectly legal to call 302 * to call this function from a CPU-hotplug notifier. Failing to observe
300 * synchronize_srcu_expedited() on one srcu_struct from some other 303 * these restriction will result in deadlock. It is also illegal to call
301 * srcu_struct's read-side critical section. 304 * synchronize_srcu_expedited() from the corresponding SRCU read-side
305 * critical section; doing so will result in deadlock. However, it is
306 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
307 * from some other srcu_struct's read-side critical section, as long as
308 * the resulting graph of srcu_structs is acyclic.
302 */ 309 */
303void synchronize_srcu_expedited(struct srcu_struct *sp) 310void synchronize_srcu_expedited(struct srcu_struct *sp)
304{ 311{