aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9435620062df..6c21d931bd24 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -584,6 +584,39 @@ static void kvm_kick_cpu(int cpu)
584 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); 584 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
585} 585}
586 586
587
588#ifdef CONFIG_QUEUED_SPINLOCK
589
590#include <asm/qspinlock.h>
591
592static void kvm_wait(u8 *ptr, u8 val)
593{
594 unsigned long flags;
595
596 if (in_nmi())
597 return;
598
599 local_irq_save(flags);
600
601 if (READ_ONCE(*ptr) != val)
602 goto out;
603
604 /*
605 * halt until it's our turn and kicked. Note that we do safe halt
606 * for irq enabled case to avoid hang when lock info is overwritten
607 * in irq spinlock slowpath and no spurious interrupt occur to save us.
608 */
609 if (arch_irqs_disabled_flags(flags))
610 halt();
611 else
612 safe_halt();
613
614out:
615 local_irq_restore(flags);
616}
617
618#else /* !CONFIG_QUEUED_SPINLOCK */
619
587enum kvm_contention_stat { 620enum kvm_contention_stat {
588 TAKEN_SLOW, 621 TAKEN_SLOW,
589 TAKEN_SLOW_PICKUP, 622 TAKEN_SLOW_PICKUP,
@@ -817,6 +850,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
817 } 850 }
818} 851}
819 852
853#endif /* !CONFIG_QUEUED_SPINLOCK */
854
820/* 855/*
821 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 856 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
822 */ 857 */
@@ -828,8 +863,16 @@ void __init kvm_spinlock_init(void)
828 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 863 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
829 return; 864 return;
830 865
866#ifdef CONFIG_QUEUED_SPINLOCK
867 __pv_init_lock_hash();
868 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
869 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
870 pv_lock_ops.wait = kvm_wait;
871 pv_lock_ops.kick = kvm_kick_cpu;
872#else /* !CONFIG_QUEUED_SPINLOCK */
831 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 873 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
832 pv_lock_ops.unlock_kick = kvm_unlock_kick; 874 pv_lock_ops.unlock_kick = kvm_unlock_kick;
875#endif
833} 876}
834 877
835static __init int kvm_spinlock_init_jump(void) 878static __init int kvm_spinlock_init_jump(void)