aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2015-04-24 14:56:39 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-08 06:37:17 -0400
commitbf0c7c34adc286bec3a5a38c00c773ba1b2d0396 (patch)
treefb54e64d6b8418e673918b1c1d702b3cb4a505d7
parentf233f7f1581e78fd9b4023f2e7d8c1ed89020cc9 (diff)
locking/pvqspinlock, x86: Enable PV qspinlock for KVM
This patch adds the necessary KVM specific code to allow KVM to support the CPU halting and kicking operations needed by the queue spinlock PV code. Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-11-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/kvm.c43
-rw-r--r--kernel/Kconfig.locks2
2 files changed, 44 insertions, 1 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9435620062df..6c21d931bd24 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -584,6 +584,39 @@ static void kvm_kick_cpu(int cpu)
584 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); 584 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
585} 585}
586 586
587
588#ifdef CONFIG_QUEUED_SPINLOCK
589
590#include <asm/qspinlock.h>
591
592static void kvm_wait(u8 *ptr, u8 val)
593{
594 unsigned long flags;
595
596 if (in_nmi())
597 return;
598
599 local_irq_save(flags);
600
601 if (READ_ONCE(*ptr) != val)
602 goto out;
603
604 /*
605 * halt until it's our turn and kicked. Note that we do safe halt
606 * for irq enabled case to avoid hang when lock info is overwritten
607 * in irq spinlock slowpath and no spurious interrupt occur to save us.
608 */
609 if (arch_irqs_disabled_flags(flags))
610 halt();
611 else
612 safe_halt();
613
614out:
615 local_irq_restore(flags);
616}
617
618#else /* !CONFIG_QUEUED_SPINLOCK */
619
587enum kvm_contention_stat { 620enum kvm_contention_stat {
588 TAKEN_SLOW, 621 TAKEN_SLOW,
589 TAKEN_SLOW_PICKUP, 622 TAKEN_SLOW_PICKUP,
@@ -817,6 +850,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
817 } 850 }
818} 851}
819 852
853#endif /* !CONFIG_QUEUED_SPINLOCK */
854
820/* 855/*
821 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 856 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
822 */ 857 */
@@ -828,8 +863,16 @@ void __init kvm_spinlock_init(void)
828 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 863 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
829 return; 864 return;
830 865
866#ifdef CONFIG_QUEUED_SPINLOCK
867 __pv_init_lock_hash();
868 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
869 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
870 pv_lock_ops.wait = kvm_wait;
871 pv_lock_ops.kick = kvm_kick_cpu;
872#else /* !CONFIG_QUEUED_SPINLOCK */
831 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 873 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
832 pv_lock_ops.unlock_kick = kvm_unlock_kick; 874 pv_lock_ops.unlock_kick = kvm_unlock_kick;
875#endif
833} 876}
834 877
835static __init int kvm_spinlock_init_jump(void) 878static __init int kvm_spinlock_init_jump(void)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 95fdad866a98..4379eef9334d 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK
240 240
241config QUEUED_SPINLOCK 241config QUEUED_SPINLOCK
242 def_bool y if ARCH_USE_QUEUED_SPINLOCK 242 def_bool y if ARCH_USE_QUEUED_SPINLOCK
243 depends on SMP && !PARAVIRT_SPINLOCKS 243 depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN)
244 244
245config ARCH_USE_QUEUE_RWLOCK 245config ARCH_USE_QUEUE_RWLOCK
246 bool 246 bool