diff options
| -rw-r--r-- | kernel/locking/qspinlock_paravirt.h | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index df19ae4debd0..489a87884337 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h | |||
| @@ -287,20 +287,21 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) | |||
| 287 | { | 287 | { |
| 288 | struct __qspinlock *l = (void *)lock; | 288 | struct __qspinlock *l = (void *)lock; |
| 289 | struct pv_node *node; | 289 | struct pv_node *node; |
| 290 | u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); | 290 | u8 locked; |
| 291 | 291 | ||
| 292 | /* | 292 | /* |
| 293 | * We must not unlock if SLOW, because in that case we must first | 293 | * We must not unlock if SLOW, because in that case we must first |
| 294 | * unhash. Otherwise it would be possible to have multiple @lock | 294 | * unhash. Otherwise it would be possible to have multiple @lock |
| 295 | * entries, which would be BAD. | 295 | * entries, which would be BAD. |
| 296 | */ | 296 | */ |
| 297 | if (likely(lockval == _Q_LOCKED_VAL)) | 297 | locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); |
| 298 | if (likely(locked == _Q_LOCKED_VAL)) | ||
| 298 | return; | 299 | return; |
| 299 | 300 | ||
| 300 | if (unlikely(lockval != _Q_SLOW_VAL)) { | 301 | if (unlikely(locked != _Q_SLOW_VAL)) { |
| 301 | if (debug_locks_silent) | 302 | WARN(!debug_locks_silent, |
| 302 | return; | 303 | "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", |
| 303 | WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val)); | 304 | (unsigned long)lock, atomic_read(&lock->val)); |
| 304 | return; | 305 | return; |
| 305 | } | 306 | } |
| 306 | 307 | ||
