aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2009-10-12 12:51:41 -0400
committerTony Luck <tony.luck@intel.com>2009-10-13 17:28:31 -0400
commit1502f08edc040b6ba4b986454416564088995e79 (patch)
treec6bc0d0c01d9bbc5503b86db71ca977d1b89e0f1 /arch/ia64/include
parent9d40ee200a527ce08ab8c793ba8ae3e242edbb0e (diff)
[IA64] SMT friendly version of spin_unlock_wait()
We can be kinder to SMT systems in spin_unlock_wait. Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/spinlock.h17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 4fa502739d64..239ecdc9516d 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
79{
80 int *p = (int *)&lock->lock, ticket;
81
82 ia64_invala();
83
84 for (;;) {
85 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
86 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
87 return;
88 cpu_relax();
89 }
90}
91
78static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
79{ 93{
80 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
@@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
123 137
124static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
125{ 139{
126 while (__raw_spin_is_locked(lock)) 140 __ticket_spin_unlock_wait(lock);
127 cpu_relax();
128} 141}
129 142
130#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)