diff options
| -rw-r--r-- | arch/arm64/include/asm/spinlock.h | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index d5c894253e73..e875a5a551d7 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
| @@ -30,20 +30,39 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 30 | { | 30 | { |
| 31 | unsigned int tmp; | 31 | unsigned int tmp; |
| 32 | arch_spinlock_t lockval; | 32 | arch_spinlock_t lockval; |
| 33 | u32 owner; | ||
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| 35 | * Ensure prior spin_lock operations to other locks have completed | 36 | * Ensure prior spin_lock operations to other locks have completed |
| 36 | * on this CPU before we test whether "lock" is locked. | 37 | * on this CPU before we test whether "lock" is locked. |
| 37 | */ | 38 | */ |
| 38 | smp_mb(); | 39 | smp_mb(); |
| 40 | owner = READ_ONCE(lock->owner) << 16; | ||
| 39 | 41 | ||
| 40 | asm volatile( | 42 | asm volatile( |
| 41 | " sevl\n" | 43 | " sevl\n" |
| 42 | "1: wfe\n" | 44 | "1: wfe\n" |
| 43 | "2: ldaxr %w0, %2\n" | 45 | "2: ldaxr %w0, %2\n" |
| 46 | /* Is the lock free? */ | ||
| 44 | " eor %w1, %w0, %w0, ror #16\n" | 47 | " eor %w1, %w0, %w0, ror #16\n" |
| 45 | " cbnz %w1, 1b\n" | 48 | " cbz %w1, 3f\n" |
| 46 | /* Serialise against any concurrent lockers */ | 49 | /* Lock taken -- has there been a subsequent unlock->lock transition? */ |
| 50 | " eor %w1, %w3, %w0, lsl #16\n" | ||
| 51 | " cbz %w1, 1b\n" | ||
| 52 | /* | ||
| 53 | * The owner has been updated, so there was an unlock->lock | ||
| 54 | * transition that we missed. That means we can rely on the | ||
| 55 | * store-release of the unlock operation paired with the | ||
| 56 | * load-acquire of the lock operation to publish any of our | ||
| 57 | * previous stores to the new lock owner and therefore don't | ||
| 58 | * need to bother with the writeback below. | ||
| 59 | */ | ||
| 60 | " b 4f\n" | ||
| 61 | "3:\n" | ||
| 62 | /* | ||
| 63 | * Serialise against any concurrent lockers by writing back the | ||
| 64 | * unlocked lock value | ||
| 65 | */ | ||
| 47 | ARM64_LSE_ATOMIC_INSN( | 66 | ARM64_LSE_ATOMIC_INSN( |
| 48 | /* LL/SC */ | 67 | /* LL/SC */ |
| 49 | " stxr %w1, %w0, %2\n" | 68 | " stxr %w1, %w0, %2\n" |
| @@ -53,9 +72,11 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 53 | " mov %w1, %w0\n" | 72 | " mov %w1, %w0\n" |
| 54 | " cas %w0, %w0, %2\n" | 73 | " cas %w0, %w0, %2\n" |
| 55 | " eor %w1, %w1, %w0\n") | 74 | " eor %w1, %w1, %w0\n") |
| 75 | /* Somebody else wrote to the lock, GOTO 10 and reload the value */ | ||
| 56 | " cbnz %w1, 2b\n" | 76 | " cbnz %w1, 2b\n" |
| 77 | "4:" | ||
| 57 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | 78 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
| 58 | : | 79 | : "r" (owner) |
| 59 | : "memory"); | 80 | : "memory"); |
| 60 | } | 81 | } |
| 61 | 82 | ||
