diff options
| author | Ingo Molnar <mingo@kernel.org> | 2017-08-21 03:45:19 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2017-08-21 03:45:19 -0400 |
| commit | 94edf6f3c20c9c8ee301bde04150a91bab4bf32c (patch) | |
| tree | 4a2af658258cf42fde24c1224e44c3e6a18c2792 /include/linux/spinlock.h | |
| parent | d5da6457bfadf64ff78f1816ae8329dbbba19513 (diff) | |
| parent | 656e7c0c0a2e8d899f87fd7f081ea7a711146604 (diff) | |
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney:
- Removal of spin_unlock_wait()
- SRCU updates
- Torture-test updates
- Documentation updates
- Miscellaneous fixes
- CPU-hotplug fixes
- Miscellaneous non-RCU fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/spinlock.h')
| -rw-r--r-- | include/linux/spinlock.h | 31 |
1 files changed, 0 insertions, 31 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d9510e8522d4..ef018a6e4985 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -130,12 +130,6 @@ do { \ | |||
| 130 | #define smp_mb__before_spinlock() smp_wmb() | 130 | #define smp_mb__before_spinlock() smp_wmb() |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | /** | ||
| 134 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked | ||
| 135 | * @lock: the spinlock in question. | ||
| 136 | */ | ||
| 137 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) | ||
| 138 | |||
| 139 | #ifdef CONFIG_DEBUG_SPINLOCK | 133 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 140 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); | 134 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
| 141 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) | 135 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
| @@ -369,31 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) | |||
| 369 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | 363 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
| 370 | }) | 364 | }) |
| 371 | 365 | ||
| 372 | /** | ||
| 373 | * spin_unlock_wait - Interpose between successive critical sections | ||
| 374 | * @lock: the spinlock whose critical sections are to be interposed. | ||
| 375 | * | ||
| 376 | * Semantically this is equivalent to a spin_lock() immediately | ||
| 377 | * followed by a spin_unlock(). However, most architectures have | ||
| 378 | * more efficient implementations in which the spin_unlock_wait() | ||
| 379 | * cannot block concurrent lock acquisition, and in some cases | ||
| 380 | * where spin_unlock_wait() does not write to the lock variable. | ||
| 381 | * Nevertheless, spin_unlock_wait() can have high overhead, so if | ||
| 382 | * you feel the need to use it, please check to see if there is | ||
| 383 | * a better way to get your job done. | ||
| 384 | * | ||
| 385 | * The ordering guarantees provided by spin_unlock_wait() are: | ||
| 386 | * | ||
| 387 | * 1. All accesses preceding the spin_unlock_wait() happen before | ||
| 388 | * any accesses in later critical sections for this same lock. | ||
| 389 | * 2. All accesses following the spin_unlock_wait() happen after | ||
| 390 | * any accesses in earlier critical sections for this same lock. | ||
| 391 | */ | ||
| 392 | static __always_inline void spin_unlock_wait(spinlock_t *lock) | ||
| 393 | { | ||
| 394 | raw_spin_unlock_wait(&lock->rlock); | ||
| 395 | } | ||
| 396 | |||
| 397 | static __always_inline int spin_is_locked(spinlock_t *lock) | 366 | static __always_inline int spin_is_locked(spinlock_t *lock) |
| 398 | { | 367 | { |
| 399 | return raw_spin_is_locked(&lock->rlock); | 368 | return raw_spin_is_locked(&lock->rlock); |
