aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/spinlock.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-07-13 17:07:45 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-08-29 16:46:07 -0400
commit2994488fe5bb721de1ded53af1a2fc41f47f6ddc (patch)
tree9629e17795bbe0e6e12b31ebf3b433fcea51668a /arch/x86/include/asm/spinlock.h
parentc576a3ea905c25d50339503e0e5c7fef724e0147 (diff)
x86, ticketlock: Convert __ticket_spin_lock to use xadd()
Convert the two variants of __ticket_spin_lock() to use xadd(), which has the effect of making them identical, so remove the duplicate function. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r--arch/x86/include/asm/spinlock.h35
1 files changed, 5 insertions, 30 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 5240cdefa683..b69e0b473de6 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -54,26 +54,22 @@
54 * save some instructions and make the code more elegant. There really isn't 54 * save some instructions and make the code more elegant. There really isn't
55 * much between them in performance though, especially as locks are out of line. 55 * much between them in performance though, especially as locks are out of line.
56 */ 56 */
57#if (NR_CPUS < 256)
58static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 57static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
59{ 58{
60 register union { 59 register struct __raw_tickets inc = { .tail = 1 };
61 struct __raw_tickets tickets;
62 unsigned short slock;
63 } inc = { .slock = 1 << TICKET_SHIFT };
64 60
65 asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" 61 inc = xadd(&lock->tickets, inc);
66 : "+Q" (inc), "+m" (lock->slock) : : "memory", "cc");
67 62
68 for (;;) { 63 for (;;) {
69 if (inc.tickets.head == inc.tickets.tail) 64 if (inc.head == inc.tail)
70 break; 65 break;
71 cpu_relax(); 66 cpu_relax();
72 inc.tickets.head = ACCESS_ONCE(lock->tickets.head); 67 inc.head = ACCESS_ONCE(lock->tickets.head);
73 } 68 }
74 barrier(); /* make sure nothing creeps before the lock is taken */ 69 barrier(); /* make sure nothing creeps before the lock is taken */
75} 70}
76 71
72#if (NR_CPUS < 256)
77static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 73static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
78{ 74{
79 unsigned int tmp, new; 75 unsigned int tmp, new;
@@ -101,27 +97,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
101 : "memory", "cc"); 97 : "memory", "cc");
102} 98}
103#else 99#else
104static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
105{
106 unsigned inc = 1 << TICKET_SHIFT;
107 __ticket_t tmp;
108
109 asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
110 : "+r" (inc), "+m" (lock->slock)
111 : : "memory", "cc");
112
113 tmp = inc;
114 inc >>= TICKET_SHIFT;
115
116 for (;;) {
117 if ((__ticket_t)inc == tmp)
118 break;
119 cpu_relax();
120 tmp = ACCESS_ONCE(lock->tickets.head);
121 }
122 barrier(); /* make sure nothing creeps before the lock is taken */
123}
124
125static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 100static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
126{ 101{
127 unsigned tmp; 102 unsigned tmp;