diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-02-13 03:45:59 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-02-13 03:45:59 -0500 |
commit | ac0e32024b8f40987b3db7d2defdc6b5153ba354 (patch) | |
tree | 9712b0a5f7c3b5d90f2625af369774cdb1334e6f | |
parent | 0351096eb0584ba927a00a37e18be7af135250c3 (diff) | |
parent | 7a6b55e7108b3476d13ee9501ec69dbe1605d774 (diff) |
Merge branch 'rcu/srcu' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull SRCU changes from Paul E. McKenney.
" These include debugging aids, updates that move towards the goal
of permitting srcu_read_lock() and srcu_read_unlock() to be used
from idle and offline CPUs, and a few small fixes. "
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/srcu.h | 26 | ||||
-rw-r--r-- | kernel/srcu.c | 37 |
2 files changed, 19 insertions, 44 deletions
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 6eb691b08358..04f4121a23ae 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp); | |||
151 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | 151 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
152 | * and while lockdep is disabled. | 152 | * and while lockdep is disabled. |
153 | * | 153 | * |
154 | * Note that if the CPU is in the idle loop from an RCU point of view | 154 | * Note that SRCU is based on its own statemachine and it doesn't |
155 | * (ie: that we are in the section between rcu_idle_enter() and | 155 | * relies on normal RCU, it can be called from the CPU which |
156 | * rcu_idle_exit()) then srcu_read_lock_held() returns false even if | 156 | * is in the idle loop from an RCU point of view or offline. |
157 | * the CPU did an srcu_read_lock(). The reason for this is that RCU | ||
158 | * ignores CPUs that are in such a section, considering these as in | ||
159 | * extended quiescent state, so such a CPU is effectively never in an | ||
160 | * RCU read-side critical section regardless of what RCU primitives it | ||
161 | * invokes. This state of affairs is required --- we need to keep an | ||
162 | * RCU-free window in idle where the CPU may possibly enter into low | ||
163 | * power mode. This way we can notice an extended quiescent state to | ||
164 | * other CPUs that started a grace period. Otherwise we would delay any | ||
165 | * grace period as long as we run in the idle task. | ||
166 | * | ||
167 | * Similarly, we avoid claiming an SRCU read lock held if the current | ||
168 | * CPU is offline. | ||
169 | */ | 157 | */ |
170 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 158 | static inline int srcu_read_lock_held(struct srcu_struct *sp) |
171 | { | 159 | { |
172 | if (!debug_lockdep_rcu_enabled()) | 160 | if (!debug_lockdep_rcu_enabled()) |
173 | return 1; | 161 | return 1; |
174 | if (rcu_is_cpu_idle()) | ||
175 | return 0; | ||
176 | if (!rcu_lockdep_current_cpu_online()) | ||
177 | return 0; | ||
178 | return lock_is_held(&sp->dep_map); | 162 | return lock_is_held(&sp->dep_map); |
179 | } | 163 | } |
180 | 164 | ||
@@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | |||
236 | int retval = __srcu_read_lock(sp); | 220 | int retval = __srcu_read_lock(sp); |
237 | 221 | ||
238 | rcu_lock_acquire(&(sp)->dep_map); | 222 | rcu_lock_acquire(&(sp)->dep_map); |
239 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
240 | "srcu_read_lock() used illegally while idle"); | ||
241 | return retval; | 223 | return retval; |
242 | } | 224 | } |
243 | 225 | ||
@@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | |||
251 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | 233 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) |
252 | __releases(sp) | 234 | __releases(sp) |
253 | { | 235 | { |
254 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
255 | "srcu_read_unlock() used illegally while idle"); | ||
256 | rcu_lock_release(&(sp)->dep_map); | 236 | rcu_lock_release(&(sp)->dep_map); |
257 | __srcu_read_unlock(sp, idx); | 237 | __srcu_read_unlock(sp, idx); |
258 | } | 238 | } |
diff --git a/kernel/srcu.c b/kernel/srcu.c index 2b859828cdc3..01d5ccb8bfe3 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp) | |||
282 | */ | 282 | */ |
283 | void cleanup_srcu_struct(struct srcu_struct *sp) | 283 | void cleanup_srcu_struct(struct srcu_struct *sp) |
284 | { | 284 | { |
285 | int sum; | 285 | if (WARN_ON(srcu_readers_active(sp))) |
286 | 286 | return; /* Leakage unless caller handles error. */ | |
287 | sum = srcu_readers_active(sp); | ||
288 | WARN_ON(sum); /* Leakage unless caller handles error. */ | ||
289 | if (sum != 0) | ||
290 | return; | ||
291 | free_percpu(sp->per_cpu_ref); | 287 | free_percpu(sp->per_cpu_ref); |
292 | sp->per_cpu_ref = NULL; | 288 | sp->per_cpu_ref = NULL; |
293 | } | 289 | } |
@@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp) | |||
302 | { | 298 | { |
303 | int idx; | 299 | int idx; |
304 | 300 | ||
301 | idx = ACCESS_ONCE(sp->completed) & 0x1; | ||
305 | preempt_disable(); | 302 | preempt_disable(); |
306 | idx = rcu_dereference_index_check(sp->completed, | ||
307 | rcu_read_lock_sched_held()) & 0x1; | ||
308 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; | 303 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; |
309 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ | 304 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
310 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; | 305 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; |
@@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); | |||
321 | */ | 316 | */ |
322 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | 317 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
323 | { | 318 | { |
324 | preempt_disable(); | ||
325 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | 319 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
326 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; | 320 | this_cpu_dec(sp->per_cpu_ref->c[idx]); |
327 | preempt_enable(); | ||
328 | } | 321 | } |
329 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 322 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
330 | 323 | ||
@@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
423 | !lock_is_held(&rcu_sched_lock_map), | 416 | !lock_is_held(&rcu_sched_lock_map), |
424 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); | 417 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); |
425 | 418 | ||
419 | might_sleep(); | ||
426 | init_completion(&rcu.completion); | 420 | init_completion(&rcu.completion); |
427 | 421 | ||
428 | head->next = NULL; | 422 | head->next = NULL; |
@@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
455 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 449 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
456 | * @sp: srcu_struct with which to synchronize. | 450 | * @sp: srcu_struct with which to synchronize. |
457 | * | 451 | * |
458 | * Flip the completed counter, and wait for the old count to drain to zero. | 452 | * Wait for the count to drain to zero of both indexes. To avoid the |
459 | * As with classic RCU, the updater must use some separate means of | 453 | * possible starvation of synchronize_srcu(), it waits for the count of |
460 | * synchronizing concurrent updates. Can block; must be called from | 454 | * the index=((->completed & 1) ^ 1) to drain to zero at first, |
461 | * process context. | 455 | * and then flip the completed and wait for the count of the other index. |
456 | * | ||
457 | * Can block; must be called from process context. | ||
462 | * | 458 | * |
463 | * Note that it is illegal to call synchronize_srcu() from the corresponding | 459 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
464 | * SRCU read-side critical section; doing so will result in deadlock. | 460 | * SRCU read-side critical section; doing so will result in deadlock. |
@@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); | |||
480 | * Wait for an SRCU grace period to elapse, but be more aggressive about | 476 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
481 | * spinning rather than blocking when waiting. | 477 | * spinning rather than blocking when waiting. |
482 | * | 478 | * |
483 | * Note that it is illegal to call this function while holding any lock | 479 | * Note that it is also illegal to call synchronize_srcu_expedited() |
484 | * that is acquired by a CPU-hotplug notifier. It is also illegal to call | 480 | * from the corresponding SRCU read-side critical section; |
485 | * synchronize_srcu_expedited() from the corresponding SRCU read-side | 481 | * doing so will result in deadlock. However, it is perfectly legal |
486 | * critical section; doing so will result in deadlock. However, it is | 482 | * to call synchronize_srcu_expedited() on one srcu_struct from some |
487 | * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct | 483 | * other srcu_struct's read-side critical section, as long as |
488 | * from some other srcu_struct's read-side critical section, as long as | ||
489 | * the resulting graph of srcu_structs is acyclic. | 484 | * the resulting graph of srcu_structs is acyclic. |
490 | */ | 485 | */ |
491 | void synchronize_srcu_expedited(struct srcu_struct *sp) | 486 | void synchronize_srcu_expedited(struct srcu_struct *sp) |