diff options
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r-- | kernel/srcu.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c index 0febf61e1aa3..ba35f3a4a1f4 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -172,6 +172,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) | |||
172 | { | 172 | { |
173 | int idx; | 173 | int idx; |
174 | 174 | ||
175 | rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && | ||
176 | !lock_is_held(&rcu_bh_lock_map) && | ||
177 | !lock_is_held(&rcu_lock_map) && | ||
178 | !lock_is_held(&rcu_sched_lock_map), | ||
179 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); | ||
180 | |||
175 | idx = sp->completed; | 181 | idx = sp->completed; |
176 | mutex_lock(&sp->mutex); | 182 | mutex_lock(&sp->mutex); |
177 | 183 | ||
@@ -280,19 +286,26 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
280 | EXPORT_SYMBOL_GPL(synchronize_srcu); | 286 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
281 | 287 | ||
282 | /** | 288 | /** |
283 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient | 289 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
284 | * @sp: srcu_struct with which to synchronize. | 290 | * @sp: srcu_struct with which to synchronize. |
285 | * | 291 | * |
286 | * Flip the completed counter, and wait for the old count to drain to zero. | 292 | * Wait for an SRCU grace period to elapse, but use a "big hammer" |
287 | * As with classic RCU, the updater must use some separate means of | 293 | * approach to force the grace period to end quickly. This consumes |
288 | * synchronizing concurrent updates. Can block; must be called from | 294 | * significant time on all CPUs and is unfriendly to real-time workloads, |
289 | * process context. | 295 | * so is thus not recommended for any sort of common-case code. In fact, |
296 | * if you are using synchronize_srcu_expedited() in a loop, please | ||
297 | * restructure your code to batch your updates, and then use a single | ||
298 | * synchronize_srcu() instead. | ||
290 | * | 299 | * |
291 | * Note that it is illegal to call synchronize_srcu_expedited() | 300 | * Note that it is illegal to call this function while holding any lock |
292 | * from the corresponding SRCU read-side critical section; doing so | 301 | * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal |
293 | * will result in deadlock. However, it is perfectly legal to call | 302 | * to call this function from a CPU-hotplug notifier. Failing to observe |
294 | * synchronize_srcu_expedited() on one srcu_struct from some other | 303 | * these restriction will result in deadlock. It is also illegal to call |
295 | * srcu_struct's read-side critical section. | 304 | * synchronize_srcu_expedited() from the corresponding SRCU read-side |
305 | * critical section; doing so will result in deadlock. However, it is | ||
306 | * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct | ||
307 | * from some other srcu_struct's read-side critical section, as long as | ||
308 | * the resulting graph of srcu_structs is acyclic. | ||
296 | */ | 309 | */ |
297 | void synchronize_srcu_expedited(struct srcu_struct *sp) | 310 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
298 | { | 311 | { |