aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-04 11:25:23 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-11 01:49:42 -0400
commit43b3f02899f74ae9914a39547cc5492156f0027a (patch)
tree0db5018c95f8d243b974f42abda1c0c618abeee5
parentedcd591c77a48da753456f92daf8bb50fe9bac93 (diff)
locking/qspinlock/x86: Fix performance regression under unaccelerated VMs
Dave ran into horrible performance on a VM without PARAVIRT_SPINLOCKS set and Linus noted that the test-and-set implementation was retarded. One should spin on the variable with a load, not a RMW. While there, remove 'queued' from the name, as the lock isn't queued at all, but a simple test-and-set. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reported-by: Dave Chinner <david@fromorbit.com> Tested-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hp.com> Cc: stable@vger.kernel.org # v4.2+ Link: http://lkml.kernel.org/r/20150904152523.GR18673@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/qspinlock.h16
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--kernel/locking/qspinlock.c2
3 files changed, 15 insertions, 7 deletions
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9d51fae1cba3..8dde3bdc4a05 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -39,15 +39,23 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
39} 39}
40#endif 40#endif
41 41
42#define virt_queued_spin_lock virt_queued_spin_lock 42#define virt_spin_lock virt_spin_lock
43 43
44static inline bool virt_queued_spin_lock(struct qspinlock *lock) 44static inline bool virt_spin_lock(struct qspinlock *lock)
45{ 45{
46 if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 46 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
47 return false; 47 return false;
48 48
49 while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) 49 /*
50 cpu_relax(); 50 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
51 * back to a Test-and-Set spinlock, because fair locks have
52 * horrible lock 'holder' preemption issues.
53 */
54
55 do {
56 while (atomic_read(&lock->val) != 0)
57 cpu_relax();
58 } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
51 59
52 return true; 60 return true;
53} 61}
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87f5bf1..e2aadbc7151f 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
111 cpu_relax(); 111 cpu_relax();
112} 112}
113 113
114#ifndef virt_queued_spin_lock 114#ifndef virt_spin_lock
115static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) 115static __always_inline bool virt_spin_lock(struct qspinlock *lock)
116{ 116{
117 return false; 117 return false;
118} 118}
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 337c8818541d..87e9ce6a63c5 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
289 if (pv_enabled()) 289 if (pv_enabled())
290 goto queue; 290 goto queue;
291 291
292 if (virt_queued_spin_lock(lock)) 292 if (virt_spin_lock(lock))
293 return; 293 return;
294 294
295 /* 295 /*