diff options
author | Peter Zijlstra (Intel) <peterz@infradead.org> | 2015-04-24 14:56:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-08 06:36:58 -0400 |
commit | 2aa79af64263190eec610422b07f60e99a7d230a (patch) | |
tree | 423641cd1ae46f066739d62d743f39c7eb58c5c8 | |
parent | 2c83e8e9492dc823be1d96d4c5ef75d16d3866a0 (diff) |
locking/qspinlock: Revert to test-and-set on hypervisors
When we detect a hypervisor (!paravirt, see qspinlock paravirt support
patches), revert to a simple test-and-set lock to avoid the horrors
of queue preemption.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-8-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/qspinlock.h | 14 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 7 | ||||
-rw-r--r-- | kernel/locking/qspinlock.c | 3 |
3 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index e2aee8273664..f079b7020e3f 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_X86_QSPINLOCK_H | 1 | #ifndef _ASM_X86_QSPINLOCK_H |
2 | #define _ASM_X86_QSPINLOCK_H | 2 | #define _ASM_X86_QSPINLOCK_H |
3 | 3 | ||
4 | #include <asm/cpufeature.h> | ||
4 | #include <asm-generic/qspinlock_types.h> | 5 | #include <asm-generic/qspinlock_types.h> |
5 | 6 | ||
6 | #define queued_spin_unlock queued_spin_unlock | 7 | #define queued_spin_unlock queued_spin_unlock |
@@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock) | |||
15 | smp_store_release((u8 *)lock, 0); | 16 | smp_store_release((u8 *)lock, 0); |
16 | } | 17 | } |
17 | 18 | ||
19 | #define virt_queued_spin_lock virt_queued_spin_lock | ||
20 | |||
21 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) | ||
22 | { | ||
23 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
24 | return false; | ||
25 | |||
26 | while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) | ||
27 | cpu_relax(); | ||
28 | |||
29 | return true; | ||
30 | } | ||
31 | |||
18 | #include <asm-generic/qspinlock.h> | 32 | #include <asm-generic/qspinlock.h> |
19 | 33 | ||
20 | #endif /* _ASM_X86_QSPINLOCK_H */ | 34 | #endif /* _ASM_X86_QSPINLOCK_H */ |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 569abcd47a9a..83bfb87f5bf1 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
@@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) | |||
111 | cpu_relax(); | 111 | cpu_relax(); |
112 | } | 112 | } |
113 | 113 | ||
114 | #ifndef virt_queued_spin_lock | ||
115 | static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) | ||
116 | { | ||
117 | return false; | ||
118 | } | ||
119 | #endif | ||
120 | |||
114 | /* | 121 | /* |
115 | * Initializier | 122 | * Initializier |
116 | */ | 123 | */ |
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 033872113ebb..fd31a474145d 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
@@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |||
249 | 249 | ||
250 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); | 250 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); |
251 | 251 | ||
252 | if (virt_queued_spin_lock(lock)) | ||
253 | return; | ||
254 | |||
252 | /* | 255 | /* |
253 | * wait for in-progress pending->locked hand-overs | 256 | * wait for in-progress pending->locked hand-overs |
254 | * | 257 | * |