aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2014-01-04 12:47:22 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-01-17 04:22:16 -0500
commitcae501397a25dc1e88375925c5e93a264d4a55ba (patch)
tree9baa938d4cbd923f8e5b230002068ec5d5a18ede /arch
parent542060ea79c861e100411a5a44df747b56a693df (diff)
KVM: nVMX: Clean up handling of VMX-related MSRs
This simplifies the code and also stops issuing warning about writing to unhandled MSRs when VMX is disabled or the Feature Control MSR is locked - we do handle them all according to the spec. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kvm/vmx.c79
2 files changed, 24 insertions, 56 deletions
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 37813b5ddc37..2e4a42d31cfe 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -527,6 +527,7 @@
527#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e 527#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
528#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f 528#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
529#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 529#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
530#define MSR_IA32_VMX_VMFUNC 0x00000491
530 531
531/* VMX_BASIC bits and bitmasks */ 532/* VMX_BASIC bits and bitmasks */
532#define VMX_BASIC_VMCS_SIZE_SHIFT 32 533#define VMX_BASIC_VMCS_SIZE_SHIFT 32
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e539c45eb669..fc4a255d5426 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2361,32 +2361,10 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
2361 return low | ((u64)high << 32); 2361 return low | ((u64)high << 32);
2362} 2362}
2363 2363
2364/* 2364/* Returns 0 on success, non-0 otherwise. */
2365 * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
2366 * also let it use VMX-specific MSRs.
2367 * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
2368 * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
2369 * like all other MSRs).
2370 */
2371static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 2365static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2372{ 2366{
2373 if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
2374 msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
2375 /*
2376 * According to the spec, processors which do not support VMX
2377 * should throw a #GP(0) when VMX capability MSRs are read.
2378 */
2379 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
2380 return 1;
2381 }
2382
2383 switch (msr_index) { 2367 switch (msr_index) {
2384 case MSR_IA32_FEATURE_CONTROL:
2385 if (nested_vmx_allowed(vcpu)) {
2386 *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2387 break;
2388 }
2389 return 0;
2390 case MSR_IA32_VMX_BASIC: 2368 case MSR_IA32_VMX_BASIC:
2391 /* 2369 /*
2392 * This MSR reports some information about VMX support. We 2370 * This MSR reports some information about VMX support. We
@@ -2453,38 +2431,9 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2453 *pdata = nested_vmx_ept_caps; 2431 *pdata = nested_vmx_ept_caps;
2454 break; 2432 break;
2455 default: 2433 default:
2456 return 0;
2457 }
2458
2459 return 1;
2460}
2461
2462static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2463
2464static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2465{
2466 u32 msr_index = msr_info->index;
2467 u64 data = msr_info->data;
2468 bool host_initialized = msr_info->host_initiated;
2469
2470 if (!nested_vmx_allowed(vcpu))
2471 return 0;
2472
2473 if (msr_index == MSR_IA32_FEATURE_CONTROL) {
2474 if (!host_initialized &&
2475 to_vmx(vcpu)->nested.msr_ia32_feature_control
2476 & FEATURE_CONTROL_LOCKED)
2477 return 0;
2478 to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
2479 if (host_initialized && data == 0)
2480 vmx_leave_nested(vcpu);
2481 return 1; 2434 return 1;
2482 } 2435 }
2483 2436
2484 /*
2485 * No need to treat VMX capability MSRs specially: If we don't handle
2486 * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
2487 */
2488 return 0; 2437 return 0;
2489} 2438}
2490 2439
@@ -2530,13 +2479,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2530 case MSR_IA32_SYSENTER_ESP: 2479 case MSR_IA32_SYSENTER_ESP:
2531 data = vmcs_readl(GUEST_SYSENTER_ESP); 2480 data = vmcs_readl(GUEST_SYSENTER_ESP);
2532 break; 2481 break;
2482 case MSR_IA32_FEATURE_CONTROL:
2483 if (!nested_vmx_allowed(vcpu))
2484 return 1;
2485 data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2486 break;
2487 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2488 if (!nested_vmx_allowed(vcpu))
2489 return 1;
2490 return vmx_get_vmx_msr(vcpu, msr_index, pdata);
2533 case MSR_TSC_AUX: 2491 case MSR_TSC_AUX:
2534 if (!to_vmx(vcpu)->rdtscp_enabled) 2492 if (!to_vmx(vcpu)->rdtscp_enabled)
2535 return 1; 2493 return 1;
2536 /* Otherwise falls through */ 2494 /* Otherwise falls through */
2537 default: 2495 default:
2538 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
2539 return 0;
2540 msr = find_msr_entry(to_vmx(vcpu), msr_index); 2496 msr = find_msr_entry(to_vmx(vcpu), msr_index);
2541 if (msr) { 2497 if (msr) {
2542 data = msr->data; 2498 data = msr->data;
@@ -2549,6 +2505,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2549 return 0; 2505 return 0;
2550} 2506}
2551 2507
2508static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2509
2552/* 2510/*
2553 * Writes msr value into into the appropriate "register". 2511 * Writes msr value into into the appropriate "register".
2554 * Returns 0 on success, non-0 otherwise. 2512 * Returns 0 on success, non-0 otherwise.
@@ -2603,6 +2561,17 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2603 case MSR_IA32_TSC_ADJUST: 2561 case MSR_IA32_TSC_ADJUST:
2604 ret = kvm_set_msr_common(vcpu, msr_info); 2562 ret = kvm_set_msr_common(vcpu, msr_info);
2605 break; 2563 break;
2564 case MSR_IA32_FEATURE_CONTROL:
2565 if (!nested_vmx_allowed(vcpu) ||
2566 (to_vmx(vcpu)->nested.msr_ia32_feature_control &
2567 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
2568 return 1;
2569 vmx->nested.msr_ia32_feature_control = data;
2570 if (msr_info->host_initiated && data == 0)
2571 vmx_leave_nested(vcpu);
2572 break;
2573 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2574 return 1; /* they are read-only */
2606 case MSR_TSC_AUX: 2575 case MSR_TSC_AUX:
2607 if (!vmx->rdtscp_enabled) 2576 if (!vmx->rdtscp_enabled)
2608 return 1; 2577 return 1;
@@ -2611,8 +2580,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2611 return 1; 2580 return 1;
2612 /* Otherwise falls through */ 2581 /* Otherwise falls through */
2613 default: 2582 default:
2614 if (vmx_set_vmx_msr(vcpu, msr_info))
2615 break;
2616 msr = find_msr_entry(vmx, msr_index); 2583 msr = find_msr_entry(vmx, msr_index);
2617 if (msr) { 2584 if (msr) {
2618 msr->data = data; 2585 msr->data = data;