aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include/asm/spinlock.h')
-rw-r--r--arch/ia64/include/asm/spinlock.h21
1 files changed, 0 insertions, 21 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index ca9e76149a4a..df2c121164b8 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -76,22 +76,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
76 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 76 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
77} 77}
78 78
79static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
80{
81 int *p = (int *)&lock->lock, ticket;
82
83 ia64_invala();
84
85 for (;;) {
86 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
87 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
88 return;
89 cpu_relax();
90 }
91
92 smp_acquire__after_ctrl_dep();
93}
94
95static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 79static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
96{ 80{
97 long tmp = ACCESS_ONCE(lock->lock); 81 long tmp = ACCESS_ONCE(lock->lock);
@@ -143,11 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
143 arch_spin_lock(lock); 127 arch_spin_lock(lock);
144} 128}
145 129
146static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
147{
148 __ticket_spin_unlock_wait(lock);
149}
150
151#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 130#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
152#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) 131#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
153 132