diff options
author | Eugene Korenevsky <ekorenevsky@gmail.com> | 2014-12-11 00:53:27 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-01-08 16:45:15 -0500 |
commit | e9ac033e6b6970c7061725fc6824b3933eb5a0e7 (patch) | |
tree | d505687fd801995fdd1632b2336d53598007f30f | |
parent | ff651cb613b4cc8aa2e4284525948872b4d77d66 (diff) |
KVM: nVMX: Improve nested msr switch checking
This patch improve checks required by Intel Software Developer Manual.
- SMM MSRs are not allowed.
- microcode MSRs are not allowed.
- check x2apic MSRs only when LAPIC is in x2apic mode.
- MSR switch areas must be aligned to 16 bytes.
- address of first and last byte in MSR switch areas should not set any bits
beyond the processor's physical-address width.
Also it adds warning messages on failures during MSR switch. These messages
are useful for people who debug their VMMs in nVMX.
Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/include/uapi/asm/msr-index.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 128 |
2 files changed, 117 insertions, 14 deletions
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c8aa65d56027..d0050f25ea80 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -356,6 +356,9 @@ | |||
356 | #define MSR_IA32_UCODE_WRITE 0x00000079 | 356 | #define MSR_IA32_UCODE_WRITE 0x00000079 |
357 | #define MSR_IA32_UCODE_REV 0x0000008b | 357 | #define MSR_IA32_UCODE_REV 0x0000008b |
358 | 358 | ||
359 | #define MSR_IA32_SMM_MONITOR_CTL 0x0000009b | ||
360 | #define MSR_IA32_SMBASE 0x0000009e | ||
361 | |||
359 | #define MSR_IA32_PERF_STATUS 0x00000198 | 362 | #define MSR_IA32_PERF_STATUS 0x00000198 |
360 | #define MSR_IA32_PERF_CTL 0x00000199 | 363 | #define MSR_IA32_PERF_CTL 0x00000199 |
361 | #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 | 364 | #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9137d2ba26a2..70bdcf946f95 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8293,18 +8293,80 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) | |||
8293 | ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); | 8293 | ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); |
8294 | } | 8294 | } |
8295 | 8295 | ||
8296 | static inline int nested_vmx_msr_check_common(struct vmx_msr_entry *e) | 8296 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, |
8297 | unsigned long count_field, | ||
8298 | unsigned long addr_field, | ||
8299 | int maxphyaddr) | ||
8297 | { | 8300 | { |
8298 | if (e->index >> 8 == 0x8 || e->reserved != 0) | 8301 | u64 count, addr; |
8302 | |||
8303 | if (vmcs12_read_any(vcpu, count_field, &count) || | ||
8304 | vmcs12_read_any(vcpu, addr_field, &addr)) { | ||
8305 | WARN_ON(1); | ||
8306 | return -EINVAL; | ||
8307 | } | ||
8308 | if (count == 0) | ||
8309 | return 0; | ||
8310 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || | ||
8311 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { | ||
8312 | pr_warn_ratelimited( | ||
8313 | "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", | ||
8314 | addr_field, maxphyaddr, count, addr); | ||
8315 | return -EINVAL; | ||
8316 | } | ||
8317 | return 0; | ||
8318 | } | ||
8319 | |||
8320 | static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, | ||
8321 | struct vmcs12 *vmcs12) | ||
8322 | { | ||
8323 | int maxphyaddr; | ||
8324 | |||
8325 | if (vmcs12->vm_exit_msr_load_count == 0 && | ||
8326 | vmcs12->vm_exit_msr_store_count == 0 && | ||
8327 | vmcs12->vm_entry_msr_load_count == 0) | ||
8328 | return 0; /* Fast path */ | ||
8329 | maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8330 | if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, | ||
8331 | VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) || | ||
8332 | nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, | ||
8333 | VM_EXIT_MSR_STORE_ADDR, maxphyaddr) || | ||
8334 | nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, | ||
8335 | VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr)) | ||
8336 | return -EINVAL; | ||
8337 | return 0; | ||
8338 | } | ||
8339 | |||
8340 | static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, | ||
8341 | struct vmx_msr_entry *e) | ||
8342 | { | ||
8343 | /* x2APIC MSR accesses are not allowed */ | ||
8344 | if (apic_x2apic_mode(vcpu->arch.apic) && e->index >> 8 == 0x8) | ||
8345 | return -EINVAL; | ||
8346 | if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ | ||
8347 | e->index == MSR_IA32_UCODE_REV) | ||
8348 | return -EINVAL; | ||
8349 | if (e->reserved != 0) | ||
8299 | return -EINVAL; | 8350 | return -EINVAL; |
8300 | return 0; | 8351 | return 0; |
8301 | } | 8352 | } |
8302 | 8353 | ||
8303 | static inline int nested_vmx_load_msr_check(struct vmx_msr_entry *e) | 8354 | static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, |
8355 | struct vmx_msr_entry *e) | ||
8304 | { | 8356 | { |
8305 | if (e->index == MSR_FS_BASE || | 8357 | if (e->index == MSR_FS_BASE || |
8306 | e->index == MSR_GS_BASE || | 8358 | e->index == MSR_GS_BASE || |
8307 | nested_vmx_msr_check_common(e)) | 8359 | e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ |
8360 | nested_vmx_msr_check_common(vcpu, e)) | ||
8361 | return -EINVAL; | ||
8362 | return 0; | ||
8363 | } | ||
8364 | |||
8365 | static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, | ||
8366 | struct vmx_msr_entry *e) | ||
8367 | { | ||
8368 | if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ | ||
8369 | nested_vmx_msr_check_common(vcpu, e)) | ||
8308 | return -EINVAL; | 8370 | return -EINVAL; |
8309 | return 0; | 8371 | return 0; |
8310 | } | 8372 | } |
@@ -8321,13 +8383,27 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) | |||
8321 | 8383 | ||
8322 | msr.host_initiated = false; | 8384 | msr.host_initiated = false; |
8323 | for (i = 0; i < count; i++) { | 8385 | for (i = 0; i < count; i++) { |
8324 | kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, sizeof(e)); | 8386 | if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), |
8325 | if (nested_vmx_load_msr_check(&e)) | 8387 | &e, sizeof(e))) { |
8388 | pr_warn_ratelimited( | ||
8389 | "%s cannot read MSR entry (%u, 0x%08llx)\n", | ||
8390 | __func__, i, gpa + i * sizeof(e)); | ||
8326 | goto fail; | 8391 | goto fail; |
8392 | } | ||
8393 | if (nested_vmx_load_msr_check(vcpu, &e)) { | ||
8394 | pr_warn_ratelimited( | ||
8395 | "%s check failed (%u, 0x%x, 0x%x)\n", | ||
8396 | __func__, i, e.index, e.reserved); | ||
8397 | goto fail; | ||
8398 | } | ||
8327 | msr.index = e.index; | 8399 | msr.index = e.index; |
8328 | msr.data = e.value; | 8400 | msr.data = e.value; |
8329 | if (kvm_set_msr(vcpu, &msr)) | 8401 | if (kvm_set_msr(vcpu, &msr)) { |
8402 | pr_warn_ratelimited( | ||
8403 | "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", | ||
8404 | __func__, i, e.index, e.value); | ||
8330 | goto fail; | 8405 | goto fail; |
8406 | } | ||
8331 | } | 8407 | } |
8332 | return 0; | 8408 | return 0; |
8333 | fail: | 8409 | fail: |
@@ -8340,16 +8416,35 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) | |||
8340 | struct vmx_msr_entry e; | 8416 | struct vmx_msr_entry e; |
8341 | 8417 | ||
8342 | for (i = 0; i < count; i++) { | 8418 | for (i = 0; i < count; i++) { |
8343 | kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), | 8419 | if (kvm_read_guest(vcpu->kvm, |
8344 | &e, 2 * sizeof(u32)); | 8420 | gpa + i * sizeof(e), |
8345 | if (nested_vmx_msr_check_common(&e)) | 8421 | &e, 2 * sizeof(u32))) { |
8422 | pr_warn_ratelimited( | ||
8423 | "%s cannot read MSR entry (%u, 0x%08llx)\n", | ||
8424 | __func__, i, gpa + i * sizeof(e)); | ||
8346 | return -EINVAL; | 8425 | return -EINVAL; |
8347 | if (kvm_get_msr(vcpu, e.index, &e.value)) | 8426 | } |
8427 | if (nested_vmx_store_msr_check(vcpu, &e)) { | ||
8428 | pr_warn_ratelimited( | ||
8429 | "%s check failed (%u, 0x%x, 0x%x)\n", | ||
8430 | __func__, i, e.index, e.reserved); | ||
8348 | return -EINVAL; | 8431 | return -EINVAL; |
8349 | kvm_write_guest(vcpu->kvm, | 8432 | } |
8350 | gpa + i * sizeof(e) + | 8433 | if (kvm_get_msr(vcpu, e.index, &e.value)) { |
8434 | pr_warn_ratelimited( | ||
8435 | "%s cannot read MSR (%u, 0x%x)\n", | ||
8436 | __func__, i, e.index); | ||
8437 | return -EINVAL; | ||
8438 | } | ||
8439 | if (kvm_write_guest(vcpu->kvm, | ||
8440 | gpa + i * sizeof(e) + | ||
8351 | offsetof(struct vmx_msr_entry, value), | 8441 | offsetof(struct vmx_msr_entry, value), |
8352 | &e.value, sizeof(e.value)); | 8442 | &e.value, sizeof(e.value))) { |
8443 | pr_warn_ratelimited( | ||
8444 | "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", | ||
8445 | __func__, i, e.index, e.value); | ||
8446 | return -EINVAL; | ||
8447 | } | ||
8353 | } | 8448 | } |
8354 | return 0; | 8449 | return 0; |
8355 | } | 8450 | } |
@@ -8698,6 +8793,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8698 | return 1; | 8793 | return 1; |
8699 | } | 8794 | } |
8700 | 8795 | ||
8796 | if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { | ||
8797 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | ||
8798 | return 1; | ||
8799 | } | ||
8800 | |||
8701 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | 8801 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, |
8702 | nested_vmx_true_procbased_ctls_low, | 8802 | nested_vmx_true_procbased_ctls_low, |
8703 | nested_vmx_procbased_ctls_high) || | 8803 | nested_vmx_procbased_ctls_high) || |