aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--arch/x86/hyperv/Makefile4
-rw-r--r--arch/x86/hyperv/hv_spinlock.c88
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c14
5 files changed, 113 insertions, 0 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9871e649ffef..4cdf6a673592 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1385,6 +1385,11 @@
1385 hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs. 1385 hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
1386 If specified, z/VM IUCV HVC accepts connections 1386 If specified, z/VM IUCV HVC accepts connections
1387 from listed z/VM user IDs only. 1387 from listed z/VM user IDs only.
1388
1389 hv_nopvspin [X86,HYPER_V] Disables the paravirt spinlock optimizations
1390 which allow the hypervisor to 'idle' the
1391 guest on lock contention.
1392
1388 keep_bootcon [KNL] 1393 keep_bootcon [KNL]
1389 Do not unregister boot console at start. This is only 1394 Do not unregister boot console at start. This is only
1390 useful for debugging when something happens in the window 1395 useful for debugging when something happens in the window
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index b21ee65c4101..1c11f9420a82 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,2 +1,6 @@
1obj-y := hv_init.o mmu.o nested.o 1obj-y := hv_init.o mmu.o nested.o
2obj-$(CONFIG_X86_64) += hv_apic.o 2obj-$(CONFIG_X86_64) += hv_apic.o
3
4ifdef CONFIG_X86_64
5obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
6endif
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
new file mode 100644
index 000000000000..a861b0456b1a
--- /dev/null
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -0,0 +1,88 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Hyper-V specific spinlock code.
5 *
6 * Copyright (C) 2018, Intel, Inc.
7 *
8 * Author : Yi Sun <yi.y.sun@intel.com>
9 */
10
11#define pr_fmt(fmt) "Hyper-V: " fmt
12
13#include <linux/spinlock.h>
14
15#include <asm/mshyperv.h>
16#include <asm/paravirt.h>
17#include <asm/apic.h>
18
19static bool __initdata hv_pvspin = true;
20
21static void hv_qlock_kick(int cpu)
22{
23 apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
24}
25
26static void hv_qlock_wait(u8 *byte, u8 val)
27{
28 unsigned long msr_val;
29 unsigned long flags;
30
31 if (in_nmi())
32 return;
33
34 /*
35 * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
36 * vCPU can be put into 'idle' state. This 'idle' state is
37 * terminated by an IPI, usually from hv_qlock_kick(), even if
38 * interrupts are disabled on the vCPU.
39 *
40 * To prevent a race against the unlock path it is required to
41 * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
42 * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
43 * the lock value check and the rdmsrl() then the vCPU might be put
44 * into 'idle' state by the hypervisor and kept in that state for
45 * an unspecified amount of time.
46 */
47 local_irq_save(flags);
48 /*
49 * Only issue the rdmsrl() when the lock state has not changed.
50 */
51 if (READ_ONCE(*byte) == val)
52 rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
53 local_irq_restore(flags);
54}
55
56/*
57 * Hyper-V does not support this so far.
58 */
59bool hv_vcpu_is_preempted(int vcpu)
60{
61 return false;
62}
63PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
64
65void __init hv_init_spinlocks(void)
66{
67 if (!hv_pvspin || !apic ||
68 !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
69 !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
70 pr_info("PV spinlocks disabled\n");
71 return;
72 }
73 pr_info("PV spinlocks enabled\n");
74
75 __pv_init_lock_hash();
76 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
77 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
78 pv_ops.lock.wait = hv_qlock_wait;
79 pv_ops.lock.kick = hv_qlock_kick;
80 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
81}
82
83static __init int hv_parse_nopvspin(char *arg)
84{
85 hv_pvspin = false;
86 return 0;
87}
88early_param("hv_nopvspin", hv_parse_nopvspin);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f37704497d8f..0d6271cce198 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -351,6 +351,8 @@ int hyperv_flush_guest_mapping(u64 as);
351 351
352#ifdef CONFIG_X86_64 352#ifdef CONFIG_X86_64
353void hv_apic_init(void); 353void hv_apic_init(void);
354void __init hv_init_spinlocks(void);
355bool hv_vcpu_is_preempted(int vcpu);
354#else 356#else
355static inline void hv_apic_init(void) {} 357static inline void hv_apic_init(void) {}
356#endif 358#endif
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index ad12733f6058..1c72f3819eb1 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -199,6 +199,16 @@ static unsigned long hv_get_tsc_khz(void)
199 return freq / 1000; 199 return freq / 1000;
200} 200}
201 201
202#if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
203static void __init hv_smp_prepare_boot_cpu(void)
204{
205 native_smp_prepare_boot_cpu();
206#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
207 hv_init_spinlocks();
208#endif
209}
210#endif
211
202static void __init ms_hyperv_init_platform(void) 212static void __init ms_hyperv_init_platform(void)
203{ 213{
204 int hv_host_info_eax; 214 int hv_host_info_eax;
@@ -303,6 +313,10 @@ static void __init ms_hyperv_init_platform(void)
303 if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) 313 if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
304 alloc_intr_gate(HYPERV_STIMER0_VECTOR, 314 alloc_intr_gate(HYPERV_STIMER0_VECTOR,
305 hv_stimer0_callback_vector); 315 hv_stimer0_callback_vector);
316
317# ifdef CONFIG_SMP
318 smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
319# endif
306#endif 320#endif
307} 321}
308 322