aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-07 15:07:51 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-16 05:15:53 -0400
commit8efcbab674de2bee45a2e4cdf97de16b8e609ac8 (patch)
tree1b45a9446e471bfdba21e7982c87aef47d2f8313 /include
parent74d4affde8feb8d5bdebf7fba8e90e4eae3b7b1d (diff)
paravirt: introduce a "lock-byte" spinlock implementation
Implement a version of the old spinlock algorithm, in which everyone spins waiting for a lock byte. In order to be compatible with the ticket-lock's use of a zero initializer, this uses the convention of '0' for unlocked and '1' for locked. This algorithm is much better than ticket locks in a virtual envionment, because it doesn't interact badly with the vcpu scheduler. If there are multiple vcpus spinning on a lock and the lock is released, the next vcpu to be scheduled will take the lock, rather than cycling around until the next ticketed vcpu gets it. To use this, you must call paravirt_use_bytelocks() very early, before any spinlocks have been taken. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <clameter@linux-foundation.org> Cc: Petr Tesarik <ptesarik@suse.cz> Cc: Virtualization <virtualization@lists.linux-foundation.org> Cc: Xen devel <xen-devel@lists.xensource.com> Cc: Thomas Friebel <thomas.friebel@amd.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/paravirt.h2
-rw-r--r--include/asm-x86/spinlock.h65
2 files changed, 66 insertions, 1 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index feb6bb66c5e2..65ed02cdbbd7 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -1385,6 +1385,8 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1385void _paravirt_nop(void); 1385void _paravirt_nop(void);
1386#define paravirt_nop ((void *)_paravirt_nop) 1386#define paravirt_nop ((void *)_paravirt_nop)
1387 1387
1388void paravirt_use_bytelocks(void);
1389
1388static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 1390static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1389{ 1391{
1390 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 1392 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 9726144cdaba..4f9a9861799a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -184,7 +184,70 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
184 184
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186 186
187#ifndef CONFIG_PARAVIRT 187#ifdef CONFIG_PARAVIRT
188/*
189 * Define virtualization-friendly old-style lock byte lock, for use in
190 * pv_lock_ops if desired.
191 *
192 * This differs from the pre-2.6.24 spinlock by always using xchgb
193 * rather than decb to take the lock; this allows it to use a
194 * zero-initialized lock structure. It also maintains a 1-byte
195 * contention counter, so that we can implement
196 * __byte_spin_is_contended.
197 */
198struct __byte_spinlock {
199 s8 lock;
200 s8 spinners;
201};
202
203static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 return bl->lock != 0;
207}
208
209static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
210{
211 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
212 return bl->spinners != 0;
213}
214
215static inline void __byte_spin_lock(raw_spinlock_t *lock)
216{
217 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
218 s8 val = 1;
219
220 asm("1: xchgb %1, %0\n"
221 " test %1,%1\n"
222 " jz 3f\n"
223 " " LOCK_PREFIX "incb %2\n"
224 "2: rep;nop\n"
225 " cmpb $1, %0\n"
226 " je 2b\n"
227 " " LOCK_PREFIX "decb %2\n"
228 " jmp 1b\n"
229 "3:"
230 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
231}
232
233static inline int __byte_spin_trylock(raw_spinlock_t *lock)
234{
235 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
236 u8 old = 1;
237
238 asm("xchgb %1,%0"
239 : "+m" (bl->lock), "+q" (old) : : "memory");
240
241 return old == 0;
242}
243
244static inline void __byte_spin_unlock(raw_spinlock_t *lock)
245{
246 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
247 smp_wmb();
248 bl->lock = 0;
249}
250#else /* !CONFIG_PARAVIRT */
188static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 251static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
189{ 252{
190 return __ticket_spin_is_locked(lock); 253 return __ticket_spin_is_locked(lock);