diff options
Diffstat (limited to 'include/asm-generic/qspinlock.h')
| -rw-r--r-- | include/asm-generic/qspinlock.h | 53 |
1 files changed, 17 insertions, 36 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 6bd05700d8c9..05f05f17a7c2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
| @@ -22,37 +22,33 @@ | |||
| 22 | #include <asm-generic/qspinlock_types.h> | 22 | #include <asm-generic/qspinlock_types.h> |
| 23 | 23 | ||
| 24 | /** | 24 | /** |
| 25 | * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock | ||
| 26 | * @lock : Pointer to queued spinlock structure | ||
| 27 | * | ||
| 28 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 29 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 30 | */ | ||
| 31 | #ifndef queued_spin_unlock_wait | ||
| 32 | extern void queued_spin_unlock_wait(struct qspinlock *lock); | ||
| 33 | #endif | ||
| 34 | |||
| 35 | /** | ||
| 25 | * queued_spin_is_locked - is the spinlock locked? | 36 | * queued_spin_is_locked - is the spinlock locked? |
| 26 | * @lock: Pointer to queued spinlock structure | 37 | * @lock: Pointer to queued spinlock structure |
| 27 | * Return: 1 if it is locked, 0 otherwise | 38 | * Return: 1 if it is locked, 0 otherwise |
| 28 | */ | 39 | */ |
| 40 | #ifndef queued_spin_is_locked | ||
| 29 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | 41 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) |
| 30 | { | 42 | { |
| 31 | /* | 43 | /* |
| 32 | * queued_spin_lock_slowpath() can ACQUIRE the lock before | 44 | * See queued_spin_unlock_wait(). |
| 33 | * issuing the unordered store that sets _Q_LOCKED_VAL. | ||
| 34 | * | ||
| 35 | * See both smp_cond_acquire() sites for more detail. | ||
| 36 | * | ||
| 37 | * This however means that in code like: | ||
| 38 | * | ||
| 39 | * spin_lock(A) spin_lock(B) | ||
| 40 | * spin_unlock_wait(B) spin_is_locked(A) | ||
| 41 | * do_something() do_something() | ||
| 42 | * | ||
| 43 | * Both CPUs can end up running do_something() because the store | ||
| 44 | * setting _Q_LOCKED_VAL will pass through the loads in | ||
| 45 | * spin_unlock_wait() and/or spin_is_locked(). | ||
| 46 | * | 45 | * |
| 47 | * Avoid this by issuing a full memory barrier between the spin_lock() | 46 | * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
| 48 | * and the loads in spin_unlock_wait() and spin_is_locked(). | 47 | * isn't immediately observable. |
| 49 | * | ||
| 50 | * Note that regular mutual exclusion doesn't care about this | ||
| 51 | * delayed store. | ||
| 52 | */ | 48 | */ |
| 53 | smp_mb(); | 49 | return atomic_read(&lock->val); |
| 54 | return atomic_read(&lock->val) & _Q_LOCKED_MASK; | ||
| 55 | } | 50 | } |
| 51 | #endif | ||
| 56 | 52 | ||
| 57 | /** | 53 | /** |
| 58 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | 54 | * queued_spin_value_unlocked - is the spinlock structure unlocked? |
| @@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) | |||
| 122 | } | 118 | } |
| 123 | #endif | 119 | #endif |
| 124 | 120 | ||
| 125 | /** | ||
| 126 | * queued_spin_unlock_wait - wait until current lock holder releases the lock | ||
| 127 | * @lock : Pointer to queued spinlock structure | ||
| 128 | * | ||
| 129 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 130 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 131 | */ | ||
| 132 | static inline void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 133 | { | ||
| 134 | /* See queued_spin_is_locked() */ | ||
| 135 | smp_mb(); | ||
| 136 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 137 | cpu_relax(); | ||
| 138 | } | ||
| 139 | |||
| 140 | #ifndef virt_spin_lock | 121 | #ifndef virt_spin_lock |
| 141 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) | 122 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
| 142 | { | 123 | { |
