diff options
| -rw-r--r-- | arch/ia64/include/asm/spinlock.h | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 4fa502739d64..239ecdc9516d 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
| @@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
| 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | ||
| 79 | { | ||
| 80 | int *p = (int *)&lock->lock, ticket; | ||
| 81 | |||
| 82 | ia64_invala(); | ||
| 83 | |||
| 84 | for (;;) { | ||
| 85 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); | ||
| 86 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
| 87 | return; | ||
| 88 | cpu_relax(); | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 78 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
| 79 | { | 93 | { |
| 80 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
| @@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |||
| 123 | 137 | ||
| 124 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
| 125 | { | 139 | { |
| 126 | while (__raw_spin_is_locked(lock)) | 140 | __ticket_spin_unlock_wait(lock); |
| 127 | cpu_relax(); | ||
| 128 | } | 141 | } |
| 129 | 142 | ||
| 130 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
