diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-07-13 18:14:26 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-08-29 16:46:34 -0400 |
commit | 229855d6f3b40d01a903120c433d75e483a0b06d (patch) | |
tree | c11667543b5c5953c71e986a8a98a2c4600dbdad /arch/x86/include/asm/spinlock.h | |
parent | 2994488fe5bb721de1ded53af1a2fc41f47f6ddc (diff) |
x86, ticketlock: Make __ticket_spin_trylock common
Make trylock code common regardless of ticket size.
(Also, rename arch_spinlock.slock to head_tail.)
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 51 |
1 files changed, 12 insertions, 39 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index b69e0b473de6..f5695eeb83ff 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -69,60 +69,33 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) | |||
69 | barrier(); /* make sure nothing creeps before the lock is taken */ | 69 | barrier(); /* make sure nothing creeps before the lock is taken */ |
70 | } | 70 | } |
71 | 71 | ||
72 | #if (NR_CPUS < 256) | ||
73 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 72 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
74 | { | 73 | { |
75 | unsigned int tmp, new; | 74 | arch_spinlock_t old, new; |
76 | 75 | ||
77 | asm volatile("movzwl %2, %0\n\t" | 76 | old.tickets = ACCESS_ONCE(lock->tickets); |
78 | "cmpb %h0,%b0\n\t" | 77 | if (old.tickets.head != old.tickets.tail) |
79 | "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" | 78 | return 0; |
80 | "jne 1f\n\t" | ||
81 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" | ||
82 | "1:" | ||
83 | "sete %b1\n\t" | ||
84 | "movzbl %b1,%0\n\t" | ||
85 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) | ||
86 | : | ||
87 | : "memory", "cc"); | ||
88 | 79 | ||
89 | return tmp; | 80 | new.head_tail = old.head_tail + (1 << TICKET_SHIFT); |
81 | |||
82 | /* cmpxchg is a full barrier, so nothing can move before it */ | ||
83 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; | ||
90 | } | 84 | } |
91 | 85 | ||
86 | #if (NR_CPUS < 256) | ||
92 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | 87 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
93 | { | 88 | { |
94 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" | 89 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
95 | : "+m" (lock->slock) | 90 | : "+m" (lock->head_tail) |
96 | : | 91 | : |
97 | : "memory", "cc"); | 92 | : "memory", "cc"); |
98 | } | 93 | } |
99 | #else | 94 | #else |
100 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | ||
101 | { | ||
102 | unsigned tmp; | ||
103 | unsigned new; | ||
104 | |||
105 | asm volatile("movl %2,%0\n\t" | ||
106 | "movl %0,%1\n\t" | ||
107 | "roll $16, %0\n\t" | ||
108 | "cmpl %0,%1\n\t" | ||
109 | "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" | ||
110 | "jne 1f\n\t" | ||
111 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" | ||
112 | "1:" | ||
113 | "sete %b1\n\t" | ||
114 | "movzbl %b1,%0\n\t" | ||
115 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) | ||
116 | : | ||
117 | : "memory", "cc"); | ||
118 | |||
119 | return tmp; | ||
120 | } | ||
121 | |||
122 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | 95 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
123 | { | 96 | { |
124 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" | 97 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
125 | : "+m" (lock->slock) | 98 | : "+m" (lock->head_tail) |
126 | : | 99 | : |
127 | : "memory", "cc"); | 100 | : "memory", "cc"); |
128 | } | 101 | } |