aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt9
-rw-r--r--Documentation/virtual/kvm/arm/psci.txt30
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kvm/guest.c13
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kvm/guest.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--include/kvm/arm_psci.h16
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--virt/kvm/arm/arm.c15
-rw-r--r--virt/kvm/arm/psci.c60
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
17 files changed, 189 insertions, 32 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1c7958b57fe9..758bf403a169 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
1960ARM 64-bit FP registers have the following id bit patterns: 1960ARM 64-bit FP registers have the following id bit patterns:
1961 0x4030 0000 0012 0 <regno:12> 1961 0x4030 0000 0012 0 <regno:12>
1962 1962
1963ARM firmware pseudo-registers have the following bit pattern:
1964 0x4030 0000 0014 <regno:16>
1965
1963 1966
1964arm64 registers are mapped using the lower 32 bits. The upper 16 of 1967arm64 registers are mapped using the lower 32 bits. The upper 16 of
1965that is the register group type, or coprocessor number: 1968that is the register group type, or coprocessor number:
@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
1976arm64 system registers have the following id bit patterns: 1979arm64 system registers have the following id bit patterns:
1977 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> 1980 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
1978 1981
1982arm64 firmware pseudo-registers have the following bit pattern:
1983 0x6030 0000 0014 <regno:16>
1984
1979 1985
1980MIPS registers are mapped using the lower 32 bits. The upper 16 of that is 1986MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
1981the register group type: 1987the register group type:
@@ -2510,7 +2516,8 @@ Possible features:
2510 and execute guest code when KVM_RUN is called. 2516 and execute guest code when KVM_RUN is called.
2511 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. 2517 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
2512 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). 2518 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
2513 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. 2519 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
2520 backward compatible with v0.2) for the CPU.
2514 Depends on KVM_CAP_ARM_PSCI_0_2. 2521 Depends on KVM_CAP_ARM_PSCI_0_2.
2515 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. 2522 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
2516 Depends on KVM_CAP_ARM_PMU_V3. 2523 Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 000000000000..aafdab887b04
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
1KVM implements the PSCI (Power State Coordination Interface)
2specification in order to provide services such as CPU on/off, reset
3and power-off to the guest.
4
5The PSCI specification is regularly updated to provide new features,
6and KVM implements these updates if they make sense from a virtualization
7point of view.
8
9This means that a guest booted on two different versions of KVM can
10observe two different "firmware" revisions. This could cause issues if
11a given guest is tied to a particular PSCI revision (unlikely), or if
12a migration causes a different PSCI version to be exposed out of the
13blue to an unsuspecting guest.
14
15In order to remedy this situation, KVM exposes a set of "firmware
16pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
17interface. These registers can be saved/restored by userspace, and set
18to a convenient value if required.
19
20The following register is defined:
21
22* KVM_REG_ARM_PSCI_VERSION:
23
24 - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
25 (and thus has already been initialized)
26 - Returns the current PSCI version on GET_ONE_REG (defaulting to the
27 highest PSCI version implemented by KVM and compatible with v0.2)
28 - Allows any PSCI version implemented by KVM and compatible with
29 v0.2 to be set with SET_ONE_REG
30 - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/MAINTAINERS b/MAINTAINERS
index 3e41e12a88a4..db2bc3fe0ba4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7744,7 +7744,7 @@ F: arch/x86/include/asm/svm.h
7744F: arch/x86/kvm/svm.c 7744F: arch/x86/kvm/svm.c
7745 7745
7746KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) 7746KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
7747M: Christoffer Dall <christoffer.dall@linaro.org> 7747M: Christoffer Dall <christoffer.dall@arm.com>
7748M: Marc Zyngier <marc.zyngier@arm.com> 7748M: Marc Zyngier <marc.zyngier@arm.com>
7749L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7749L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7750L: kvmarm@lists.cs.columbia.edu 7750L: kvmarm@lists.cs.columbia.edu
@@ -7758,7 +7758,7 @@ F: virt/kvm/arm/
7758F: include/kvm/arm_* 7758F: include/kvm/arm_*
7759 7759
7760KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 7760KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
7761M: Christoffer Dall <christoffer.dall@linaro.org> 7761M: Christoffer Dall <christoffer.dall@arm.com>
7762M: Marc Zyngier <marc.zyngier@arm.com> 7762M: Marc Zyngier <marc.zyngier@arm.com>
7763L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7763L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7764L: kvmarm@lists.cs.columbia.edu 7764L: kvmarm@lists.cs.columbia.edu
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c6a749568dd6..c7c28c885a19 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -77,6 +77,9 @@ struct kvm_arch {
77 /* Interrupt controller */ 77 /* Interrupt controller */
78 struct vgic_dist vgic; 78 struct vgic_dist vgic;
79 int max_vcpus; 79 int max_vcpus;
80
81 /* Mandated version of PSCI */
82 u32 psci_version;
80}; 83};
81 84
82#define KVM_NR_MEM_OBJS 40 85#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 2ba95d6fe852..caae4843cb70 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
195#define KVM_REG_ARM_VFP_FPINST 0x1009 195#define KVM_REG_ARM_VFP_FPINST 0x1009
196#define KVM_REG_ARM_VFP_FPINST2 0x100A 196#define KVM_REG_ARM_VFP_FPINST2 0x100A
197 197
198/* KVM-as-firmware specific pseudo-registers */
199#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
200#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
201 KVM_REG_ARM_FW | ((r) & 0xffff))
202#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
203
198/* Device Control API: ARM VGIC */ 204/* Device Control API: ARM VGIC */
199#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 205#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
200#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 206#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 1e0784ebbfd6..a18f33edc471 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <kvm/arm_psci.h>
25#include <asm/cputype.h> 26#include <asm/cputype.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
27#include <asm/kvm.h> 28#include <asm/kvm.h>
@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
176unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 177unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
177{ 178{
178 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) 179 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
180 + kvm_arm_get_fw_num_regs(vcpu)
179 + NUM_TIMER_REGS; 181 + NUM_TIMER_REGS;
180} 182}
181 183
@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
196 uindices++; 198 uindices++;
197 } 199 }
198 200
201 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
202 if (ret)
203 return ret;
204 uindices += kvm_arm_get_fw_num_regs(vcpu);
205
199 ret = copy_timer_indices(vcpu, uindices); 206 ret = copy_timer_indices(vcpu, uindices);
200 if (ret) 207 if (ret)
201 return ret; 208 return ret;
@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
214 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 221 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
215 return get_core_reg(vcpu, reg); 222 return get_core_reg(vcpu, reg);
216 223
224 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
225 return kvm_arm_get_fw_reg(vcpu, reg);
226
217 if (is_timer_reg(reg->id)) 227 if (is_timer_reg(reg->id))
218 return get_timer_reg(vcpu, reg); 228 return get_timer_reg(vcpu, reg);
219 229
@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
230 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 240 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
231 return set_core_reg(vcpu, reg); 241 return set_core_reg(vcpu, reg);
232 242
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
244 return kvm_arm_set_fw_reg(vcpu, reg);
245
233 if (is_timer_reg(reg->id)) 246 if (is_timer_reg(reg->id))
234 return set_timer_reg(vcpu, reg); 247 return set_timer_reg(vcpu, reg);
235 248
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ab46bc70add6..469de8acd06f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -75,6 +75,9 @@ struct kvm_arch {
75 75
76 /* Interrupt controller */ 76 /* Interrupt controller */
77 struct vgic_dist vgic; 77 struct vgic_dist vgic;
78
79 /* Mandated version of PSCI */
80 u32 psci_version;
78}; 81};
79 82
80#define KVM_NR_MEM_OBJS 40 83#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 9abbf3044654..04b3256f8e6d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
208 208
209/* KVM-as-firmware specific pseudo-registers */
210#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
211#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
212 KVM_REG_ARM_FW | ((r) & 0xffff))
213#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
214
209/* Device Control API: ARM VGIC */ 215/* Device Control API: ARM VGIC */
210#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 216#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
211#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 217#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 959e50d2588c..56a0260ceb11 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <kvm/arm_psci.h>
28#include <asm/cputype.h> 29#include <asm/cputype.h>
29#include <linux/uaccess.h> 30#include <linux/uaccess.h>
30#include <asm/kvm.h> 31#include <asm/kvm.h>
@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
205unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 206unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
206{ 207{
207 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 208 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
208 + NUM_TIMER_REGS; 209 + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
209} 210}
210 211
211/** 212/**
@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
225 uindices++; 226 uindices++;
226 } 227 }
227 228
229 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
230 if (ret)
231 return ret;
232 uindices += kvm_arm_get_fw_num_regs(vcpu);
233
228 ret = copy_timer_indices(vcpu, uindices); 234 ret = copy_timer_indices(vcpu, uindices);
229 if (ret) 235 if (ret)
230 return ret; 236 return ret;
@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 249 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
244 return get_core_reg(vcpu, reg); 250 return get_core_reg(vcpu, reg);
245 251
252 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
253 return kvm_arm_get_fw_reg(vcpu, reg);
254
246 if (is_timer_reg(reg->id)) 255 if (is_timer_reg(reg->id))
247 return get_timer_reg(vcpu, reg); 256 return get_timer_reg(vcpu, reg);
248 257
@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
259 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 268 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
260 return set_core_reg(vcpu, reg); 269 return set_core_reg(vcpu, reg);
261 270
271 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
272 return kvm_arm_set_fw_reg(vcpu, reg);
273
262 if (is_timer_reg(reg->id)) 274 if (is_timer_reg(reg->id))
263 return set_timer_reg(vcpu, reg); 275 return set_timer_reg(vcpu, reg);
264 276
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 806b0b126a64..6e3b969391fd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
996 996
997 if (id == SYS_ID_AA64PFR0_EL1) { 997 if (id == SYS_ID_AA64PFR0_EL1) {
998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
999 pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", 999 kvm_debug("SVE unsupported for guests, suppressing\n");
1000 task_pid_nr(current));
1001 1000
1002 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1001 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1003 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1002 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1004 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1003 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1005 pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", 1004 kvm_debug("LORegions unsupported for guests, suppressing\n");
1006 task_pid_nr(current));
1007 1005
1008 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1006 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1009 } 1007 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aa66ccd6ed6c..c7668806163f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4544,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); 4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
4545} 4545}
4546 4546
4547static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4548{
4549 if (enable_ept)
4550 vmx_flush_tlb(vcpu, true);
4551}
4552
4553static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4547static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4554{ 4548{
4555 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4549 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -9278,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
9278 } else { 9272 } else {
9279 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 9273 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
9280 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9274 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9281 vmx_flush_tlb_ept_only(vcpu); 9275 vmx_flush_tlb(vcpu, true);
9282 } 9276 }
9283 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 9277 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
9284 9278
@@ -9306,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
9306 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 9300 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
9307 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 9301 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
9308 vmcs_write64(APIC_ACCESS_ADDR, hpa); 9302 vmcs_write64(APIC_ACCESS_ADDR, hpa);
9309 vmx_flush_tlb_ept_only(vcpu); 9303 vmx_flush_tlb(vcpu, true);
9310 } 9304 }
9311} 9305}
9312 9306
@@ -11220,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11220 } 11214 }
11221 } else if (nested_cpu_has2(vmcs12, 11215 } else if (nested_cpu_has2(vmcs12,
11222 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 11216 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11223 vmx_flush_tlb_ept_only(vcpu); 11217 vmx_flush_tlb(vcpu, true);
11224 } 11218 }
11225 11219
11226 /* 11220 /*
@@ -12073,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12073 } else if (!nested_cpu_has_ept(vmcs12) && 12067 } else if (!nested_cpu_has_ept(vmcs12) &&
12074 nested_cpu_has2(vmcs12, 12068 nested_cpu_has2(vmcs12,
12075 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 12069 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
12076 vmx_flush_tlb_ept_only(vcpu); 12070 vmx_flush_tlb(vcpu, true);
12077 } 12071 }
12078 12072
12079 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 12073 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7d35ce672989..c9492f764902 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
302 __rem; \ 302 __rem; \
303 }) 303 })
304 304
305#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
306#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
307#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
308#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
309 KVM_X86_DISABLE_EXITS_HTL | \
310 KVM_X86_DISABLE_EXITS_PAUSE)
311
312static inline bool kvm_mwait_in_guest(struct kvm *kvm) 305static inline bool kvm_mwait_in_guest(struct kvm *kvm)
313{ 306{
314 return kvm->arch.mwait_in_guest; 307 return kvm->arch.mwait_in_guest;
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e3dfb5..4b1548129fa2 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
37 * Our PSCI implementation stays the same across versions from 37 * Our PSCI implementation stays the same across versions from
38 * v0.2 onward, only adding the few mandatory functions (such 38 * v0.2 onward, only adding the few mandatory functions (such
39 * as FEATURES with 1.0) that are required by newer 39 * as FEATURES with 1.0) that are required by newer
40 * revisions. It is thus safe to return the latest. 40 * revisions. It is thus safe to return the latest, unless
41 * userspace has instructed us otherwise.
41 */ 42 */
42 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) 43 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
44 if (vcpu->kvm->arch.psci_version)
45 return vcpu->kvm->arch.psci_version;
46
43 return KVM_ARM_PSCI_LATEST; 47 return KVM_ARM_PSCI_LATEST;
48 }
44 49
45 return KVM_ARM_PSCI_0_1; 50 return KVM_ARM_PSCI_0_1;
46} 51}
@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
48 53
49int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); 54int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
50 55
56struct kvm_one_reg;
57
58int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
59int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
60int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
61int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
62
51#endif /* __KVM_ARM_PSCI_H__ */ 63#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1065006c9bf5..b02c41e53d56 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
676 __u8 pad[36]; 676 __u8 pad[36];
677}; 677};
678 678
679#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
680#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
681#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
682#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
683 KVM_X86_DISABLE_EXITS_HTL | \
684 KVM_X86_DISABLE_EXITS_PAUSE)
685
679/* for KVM_ENABLE_CAP */ 686/* for KVM_ENABLE_CAP */
680struct kvm_enable_cap { 687struct kvm_enable_cap {
681 /* in */ 688 /* in */
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
64static u32 kvm_next_vmid; 64static u32 kvm_next_vmid;
65static unsigned int kvm_vmid_bits __read_mostly; 65static unsigned int kvm_vmid_bits __read_mostly;
66static DEFINE_SPINLOCK(kvm_vmid_lock); 66static DEFINE_RWLOCK(kvm_vmid_lock);
67 67
68static bool vgic_present; 68static bool vgic_present;
69 69
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
473{ 473{
474 phys_addr_t pgd_phys; 474 phys_addr_t pgd_phys;
475 u64 vmid; 475 u64 vmid;
476 bool new_gen;
476 477
477 if (!need_new_vmid_gen(kvm)) 478 read_lock(&kvm_vmid_lock);
479 new_gen = need_new_vmid_gen(kvm);
480 read_unlock(&kvm_vmid_lock);
481
482 if (!new_gen)
478 return; 483 return;
479 484
480 spin_lock(&kvm_vmid_lock); 485 write_lock(&kvm_vmid_lock);
481 486
482 /* 487 /*
483 * We need to re-check the vmid_gen here to ensure that if another vcpu 488 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
485 * use the same vmid. 490 * use the same vmid.
486 */ 491 */
487 if (!need_new_vmid_gen(kvm)) { 492 if (!need_new_vmid_gen(kvm)) {
488 spin_unlock(&kvm_vmid_lock); 493 write_unlock(&kvm_vmid_lock);
489 return; 494 return;
490 } 495 }
491 496
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 524 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 525 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
521 526
522 spin_unlock(&kvm_vmid_lock); 527 write_unlock(&kvm_vmid_lock);
523} 528}
524 529
525static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 530static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 6919352cbf15..c4762bef13c6 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -18,6 +18,7 @@
18#include <linux/arm-smccc.h> 18#include <linux/arm-smccc.h>
19#include <linux/preempt.h> 19#include <linux/preempt.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
22 23
23#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
427 smccc_set_retval(vcpu, val, 0, 0, 0); 428 smccc_set_retval(vcpu, val, 0, 0, 0);
428 return 1; 429 return 1;
429} 430}
431
432int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
433{
434 return 1; /* PSCI version */
435}
436
437int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
438{
439 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
440 return -EFAULT;
441
442 return 0;
443}
444
445int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
446{
447 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
448 void __user *uaddr = (void __user *)(long)reg->addr;
449 u64 val;
450
451 val = kvm_psci_version(vcpu, vcpu->kvm);
452 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
453 return -EFAULT;
454
455 return 0;
456 }
457
458 return -EINVAL;
459}
460
461int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462{
463 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
464 void __user *uaddr = (void __user *)(long)reg->addr;
465 bool wants_02;
466 u64 val;
467
468 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
469 return -EFAULT;
470
471 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
472
473 switch (val) {
474 case KVM_ARM_PSCI_0_1:
475 if (wants_02)
476 return -EINVAL;
477 vcpu->kvm->arch.psci_version = val;
478 return 0;
479 case KVM_ARM_PSCI_0_2:
480 case KVM_ARM_PSCI_1_0:
481 if (!wants_02)
482 return -EINVAL;
483 vcpu->kvm->arch.psci_version = val;
484 return 0;
485 }
486 }
487
488 return -EINVAL;
489}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index bd125563b15b..702936cbe173 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -600,6 +600,7 @@ retry:
600 600
601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
603 bool target_vcpu_needs_kick = false;
603 604
604 spin_lock(&irq->irq_lock); 605 spin_lock(&irq->irq_lock);
605 606
@@ -670,11 +671,18 @@ retry:
670 list_del(&irq->ap_list); 671 list_del(&irq->ap_list);
671 irq->vcpu = target_vcpu; 672 irq->vcpu = target_vcpu;
672 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 673 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
674 target_vcpu_needs_kick = true;
673 } 675 }
674 676
675 spin_unlock(&irq->irq_lock); 677 spin_unlock(&irq->irq_lock);
676 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 678 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
677 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); 679 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
680
681 if (target_vcpu_needs_kick) {
682 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
683 kvm_vcpu_kick(target_vcpu);
684 }
685
678 goto retry; 686 goto retry;
679 } 687 }
680 688