aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/spinlock.h')
-rw-r--r--include/asm-x86/spinlock.h65
1 files changed, 64 insertions, 1 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 9726144cdaba..4f9a9861799a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -184,7 +184,70 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
184 184
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186 186
187#ifndef CONFIG_PARAVIRT 187#ifdef CONFIG_PARAVIRT
188/*
189 * Define virtualization-friendly old-style lock byte lock, for use in
190 * pv_lock_ops if desired.
191 *
192 * This differs from the pre-2.6.24 spinlock by always using xchgb
193 * rather than decb to take the lock; this allows it to use a
194 * zero-initialized lock structure. It also maintains a 1-byte
195 * contention counter, so that we can implement
196 * __byte_spin_is_contended.
197 */
198struct __byte_spinlock {
199 s8 lock;
200 s8 spinners;
201};
202
203static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 return bl->lock != 0;
207}
208
209static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
210{
211 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
212 return bl->spinners != 0;
213}
214
215static inline void __byte_spin_lock(raw_spinlock_t *lock)
216{
217 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
218 s8 val = 1;
219
220 asm("1: xchgb %1, %0\n"
221 " test %1,%1\n"
222 " jz 3f\n"
223 " " LOCK_PREFIX "incb %2\n"
224 "2: rep;nop\n"
225 " cmpb $1, %0\n"
226 " je 2b\n"
227 " " LOCK_PREFIX "decb %2\n"
228 " jmp 1b\n"
229 "3:"
230 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
231}
232
233static inline int __byte_spin_trylock(raw_spinlock_t *lock)
234{
235 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
236 u8 old = 1;
237
238 asm("xchgb %1,%0"
239 : "+m" (bl->lock), "+q" (old) : : "memory");
240
241 return old == 0;
242}
243
244static inline void __byte_spin_unlock(raw_spinlock_t *lock)
245{
246 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
247 smp_wmb();
248 bl->lock = 0;
249}
250#else /* !CONFIG_PARAVIRT */
188static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 251static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
189{ 252{
190 return __ticket_spin_is_locked(lock); 253 return __ticket_spin_is_locked(lock);