diff options
Diffstat (limited to 'kernel/srcu.c')
| -rw-r--r-- | kernel/srcu.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c index 2b859828cdc3..01d5ccb8bfe3 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
| @@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp) | |||
| 282 | */ | 282 | */ |
| 283 | void cleanup_srcu_struct(struct srcu_struct *sp) | 283 | void cleanup_srcu_struct(struct srcu_struct *sp) |
| 284 | { | 284 | { |
| 285 | int sum; | 285 | if (WARN_ON(srcu_readers_active(sp))) |
| 286 | 286 | return; /* Leakage unless caller handles error. */ | |
| 287 | sum = srcu_readers_active(sp); | ||
| 288 | WARN_ON(sum); /* Leakage unless caller handles error. */ | ||
| 289 | if (sum != 0) | ||
| 290 | return; | ||
| 291 | free_percpu(sp->per_cpu_ref); | 287 | free_percpu(sp->per_cpu_ref); |
| 292 | sp->per_cpu_ref = NULL; | 288 | sp->per_cpu_ref = NULL; |
| 293 | } | 289 | } |
| @@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp) | |||
| 302 | { | 298 | { |
| 303 | int idx; | 299 | int idx; |
| 304 | 300 | ||
| 301 | idx = ACCESS_ONCE(sp->completed) & 0x1; | ||
| 305 | preempt_disable(); | 302 | preempt_disable(); |
| 306 | idx = rcu_dereference_index_check(sp->completed, | ||
| 307 | rcu_read_lock_sched_held()) & 0x1; | ||
| 308 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; | 303 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; |
| 309 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ | 304 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 310 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; | 305 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; |
| @@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); | |||
| 321 | */ | 316 | */ |
| 322 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | 317 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 323 | { | 318 | { |
| 324 | preempt_disable(); | ||
| 325 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | 319 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
| 326 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; | 320 | this_cpu_dec(sp->per_cpu_ref->c[idx]); |
| 327 | preempt_enable(); | ||
| 328 | } | 321 | } |
| 329 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 322 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 330 | 323 | ||
| @@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
| 423 | !lock_is_held(&rcu_sched_lock_map), | 416 | !lock_is_held(&rcu_sched_lock_map), |
| 424 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); | 417 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); |
| 425 | 418 | ||
| 419 | might_sleep(); | ||
| 426 | init_completion(&rcu.completion); | 420 | init_completion(&rcu.completion); |
| 427 | 421 | ||
| 428 | head->next = NULL; | 422 | head->next = NULL; |
| @@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
| 455 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 449 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
| 456 | * @sp: srcu_struct with which to synchronize. | 450 | * @sp: srcu_struct with which to synchronize. |
| 457 | * | 451 | * |
| 458 | * Flip the completed counter, and wait for the old count to drain to zero. | 452 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 459 | * As with classic RCU, the updater must use some separate means of | 453 | * possible starvation of synchronize_srcu(), it waits for the count of |
| 460 | * synchronizing concurrent updates. Can block; must be called from | 454 | * the index=((->completed & 1) ^ 1) to drain to zero at first, |
| 461 | * process context. | 455 | * and then flip the completed and wait for the count of the other index. |
| 456 | * | ||
| 457 | * Can block; must be called from process context. | ||
| 462 | * | 458 | * |
| 463 | * Note that it is illegal to call synchronize_srcu() from the corresponding | 459 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 464 | * SRCU read-side critical section; doing so will result in deadlock. | 460 | * SRCU read-side critical section; doing so will result in deadlock. |
| @@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); | |||
| 480 | * Wait for an SRCU grace period to elapse, but be more aggressive about | 476 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 481 | * spinning rather than blocking when waiting. | 477 | * spinning rather than blocking when waiting. |
| 482 | * | 478 | * |
| 483 | * Note that it is illegal to call this function while holding any lock | 479 | * Note that it is also illegal to call synchronize_srcu_expedited() |
| 484 | * that is acquired by a CPU-hotplug notifier. It is also illegal to call | 480 | * from the corresponding SRCU read-side critical section; |
| 485 | * synchronize_srcu_expedited() from the corresponding SRCU read-side | 481 | * doing so will result in deadlock. However, it is perfectly legal |
| 486 | * critical section; doing so will result in deadlock. However, it is | 482 | * to call synchronize_srcu_expedited() on one srcu_struct from some |
| 487 | * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct | 483 | * other srcu_struct's read-side critical section, as long as |
| 488 | * from some other srcu_struct's read-side critical section, as long as | ||
| 489 | * the resulting graph of srcu_structs is acyclic. | 484 | * the resulting graph of srcu_structs is acyclic. |
| 490 | */ | 485 | */ |
| 491 | void synchronize_srcu_expedited(struct srcu_struct *sp) | 486 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
