diff options
| -rw-r--r-- | arch/x86/kvm/vmx.c | 39 |
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a17bbb862f91..ea9b2e938ed1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -1137,6 +1137,11 @@ static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) | |||
| 1137 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); | 1137 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); |
| 1138 | } | 1138 | } |
| 1139 | 1139 | ||
| 1140 | static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) | ||
| 1141 | { | ||
| 1142 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); | ||
| 1143 | } | ||
| 1144 | |||
| 1140 | static inline bool is_exception(u32 intr_info) | 1145 | static inline bool is_exception(u32 intr_info) |
| 1141 | { | 1146 | { |
| 1142 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | 1147 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) |
| @@ -2430,6 +2435,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2430 | vmx->nested.nested_vmx_secondary_ctls_high &= | 2435 | vmx->nested.nested_vmx_secondary_ctls_high &= |
| 2431 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | 2436 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
| 2432 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | | 2437 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
| 2438 | SECONDARY_EXEC_APIC_REGISTER_VIRT | | ||
| 2433 | SECONDARY_EXEC_WBINVD_EXITING | | 2439 | SECONDARY_EXEC_WBINVD_EXITING | |
| 2434 | SECONDARY_EXEC_XSAVES; | 2440 | SECONDARY_EXEC_XSAVES; |
| 2435 | 2441 | ||
| @@ -7434,6 +7440,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
| 7434 | case EXIT_REASON_APIC_ACCESS: | 7440 | case EXIT_REASON_APIC_ACCESS: |
| 7435 | return nested_cpu_has2(vmcs12, | 7441 | return nested_cpu_has2(vmcs12, |
| 7436 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); | 7442 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); |
| 7443 | case EXIT_REASON_APIC_WRITE: | ||
| 7444 | /* apic_write should exit unconditionally. */ | ||
| 7445 | return 1; | ||
| 7437 | case EXIT_REASON_EPT_VIOLATION: | 7446 | case EXIT_REASON_EPT_VIOLATION: |
| 7438 | /* | 7447 | /* |
| 7439 | * L0 always deals with the EPT violation. If nested EPT is | 7448 | * L0 always deals with the EPT violation. If nested EPT is |
| @@ -8593,6 +8602,7 @@ static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, | |||
| 8593 | static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, | 8602 | static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, |
| 8594 | struct vmcs12 *vmcs12) | 8603 | struct vmcs12 *vmcs12) |
| 8595 | { | 8604 | { |
| 8605 | int msr; | ||
| 8596 | struct page *page; | 8606 | struct page *page; |
| 8597 | unsigned long *msr_bitmap; | 8607 | unsigned long *msr_bitmap; |
| 8598 | 8608 | ||
| @@ -8612,16 +8622,35 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 8612 | } | 8622 | } |
| 8613 | 8623 | ||
| 8614 | if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { | 8624 | if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { |
| 8625 | if (nested_cpu_has_apic_reg_virt(vmcs12)) | ||
| 8626 | for (msr = 0x800; msr <= 0x8ff; msr++) | ||
| 8627 | nested_vmx_disable_intercept_for_msr( | ||
| 8628 | msr_bitmap, | ||
| 8629 | vmx_msr_bitmap_nested, | ||
| 8630 | msr, MSR_TYPE_R); | ||
| 8615 | /* TPR is allowed */ | 8631 | /* TPR is allowed */ |
| 8616 | nested_vmx_disable_intercept_for_msr(msr_bitmap, | 8632 | nested_vmx_disable_intercept_for_msr(msr_bitmap, |
| 8617 | vmx_msr_bitmap_nested, | 8633 | vmx_msr_bitmap_nested, |
| 8618 | APIC_BASE_MSR + (APIC_TASKPRI >> 4), | 8634 | APIC_BASE_MSR + (APIC_TASKPRI >> 4), |
| 8619 | MSR_TYPE_R | MSR_TYPE_W); | 8635 | MSR_TYPE_R | MSR_TYPE_W); |
| 8620 | } else | 8636 | } else { |
| 8637 | /* | ||
| 8638 | * Enable reading intercept of all the x2apic | ||
| 8639 | * MSRs. We should not rely on vmcs12 to do any | ||
| 8640 | * optimizations here, it may have been modified | ||
| 8641 | * by L1. | ||
| 8642 | */ | ||
| 8643 | for (msr = 0x800; msr <= 0x8ff; msr++) | ||
| 8644 | __vmx_enable_intercept_for_msr( | ||
| 8645 | vmx_msr_bitmap_nested, | ||
| 8646 | msr, | ||
| 8647 | MSR_TYPE_R); | ||
| 8648 | |||
| 8621 | __vmx_enable_intercept_for_msr( | 8649 | __vmx_enable_intercept_for_msr( |
| 8622 | vmx_msr_bitmap_nested, | 8650 | vmx_msr_bitmap_nested, |
| 8623 | APIC_BASE_MSR + (APIC_TASKPRI >> 4), | 8651 | APIC_BASE_MSR + (APIC_TASKPRI >> 4), |
| 8624 | MSR_TYPE_R | MSR_TYPE_W); | 8652 | MSR_TYPE_W); |
| 8653 | } | ||
| 8625 | kunmap(page); | 8654 | kunmap(page); |
| 8626 | nested_release_page_clean(page); | 8655 | nested_release_page_clean(page); |
| 8627 | 8656 | ||
| @@ -8631,14 +8660,16 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 8631 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | 8660 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, |
| 8632 | struct vmcs12 *vmcs12) | 8661 | struct vmcs12 *vmcs12) |
| 8633 | { | 8662 | { |
| 8634 | if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) | 8663 | if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && |
| 8664 | !nested_cpu_has_apic_reg_virt(vmcs12)) | ||
| 8635 | return 0; | 8665 | return 0; |
| 8636 | 8666 | ||
| 8637 | /* | 8667 | /* |
| 8638 | * If virtualize x2apic mode is enabled, | 8668 | * If virtualize x2apic mode is enabled, |
| 8639 | * virtualize apic access must be disabled. | 8669 | * virtualize apic access must be disabled. |
| 8640 | */ | 8670 | */ |
| 8641 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | 8671 | if (nested_cpu_has_virt_x2apic_mode(vmcs12) && |
| 8672 | nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | ||
| 8642 | return -EINVAL; | 8673 | return -EINVAL; |
| 8643 | 8674 | ||
| 8644 | /* tpr shadow is needed by all apicv features. */ | 8675 | /* tpr shadow is needed by all apicv features. */ |
