aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx.c47
1 files changed, 41 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfe3022e60f1..881d266eb3ae 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3134,9 +3134,17 @@ static __init int hardware_setup(void)
3134 if (!cpu_has_vmx_unrestricted_guest()) 3134 if (!cpu_has_vmx_unrestricted_guest())
3135 enable_unrestricted_guest = 0; 3135 enable_unrestricted_guest = 0;
3136 3136
3137 if (!cpu_has_vmx_flexpriority()) 3137 if (!cpu_has_vmx_flexpriority()) {
3138 flexpriority_enabled = 0; 3138 flexpriority_enabled = 0;
3139 3139
3140 /*
3141 * set_apic_access_page_addr() is used to reload apic access
3142 * page upon invalidation. No need to do anything if the
3143 * processor does not have the APIC_ACCESS_ADDR VMCS field.
3144 */
3145 kvm_x86_ops->set_apic_access_page_addr = NULL;
3146 }
3147
3140 if (!cpu_has_vmx_tpr_shadow()) 3148 if (!cpu_has_vmx_tpr_shadow())
3141 kvm_x86_ops->update_cr8_intercept = NULL; 3149 kvm_x86_ops->update_cr8_intercept = NULL;
3142 3150
@@ -4557,9 +4565,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4557 vmcs_write32(TPR_THRESHOLD, 0); 4565 vmcs_write32(TPR_THRESHOLD, 0);
4558 } 4566 }
4559 4567
4560 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) 4568 kvm_vcpu_reload_apic_access_page(vcpu);
4561 vmcs_write64(APIC_ACCESS_ADDR,
4562 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
4563 4569
4564 if (vmx_vm_has_apicv(vcpu->kvm)) 4570 if (vmx_vm_has_apicv(vcpu->kvm))
4565 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 4571 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -7198,6 +7204,29 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
7198 vmx_set_msr_bitmap(vcpu); 7204 vmx_set_msr_bitmap(vcpu);
7199} 7205}
7200 7206
7207static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
7208{
7209 struct vcpu_vmx *vmx = to_vmx(vcpu);
7210
7211 /*
7212 * Currently we do not handle the nested case where L2 has an
7213 * APIC access page of its own; that page is still pinned.
7214 * Hence, we skip the case where the VCPU is in guest mode _and_
7215 * L1 prepared an APIC access page for L2.
7216 *
7217 * For the case where L1 and L2 share the same APIC access page
7218 * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
7219 * in the vmcs12), this function will only update either the vmcs01
7220 * or the vmcs02. If the former, the vmcs02 will be updated by
7221 * prepare_vmcs02. If the latter, the vmcs01 will be updated in
7222 * the next L2->L1 exit.
7223 */
7224 if (!is_guest_mode(vcpu) ||
7225 !nested_cpu_has2(vmx->nested.current_vmcs12,
7226 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
7227 vmcs_write64(APIC_ACCESS_ADDR, hpa);
7228}
7229
7201static void vmx_hwapic_isr_update(struct kvm *kvm, int isr) 7230static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
7202{ 7231{
7203 u16 status; 7232 u16 status;
@@ -8140,8 +8169,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8140 } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) { 8169 } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
8141 exec_control |= 8170 exec_control |=
8142 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8171 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8143 vmcs_write64(APIC_ACCESS_ADDR, 8172 kvm_vcpu_reload_apic_access_page(vcpu);
8144 page_to_phys(vcpu->kvm->arch.apic_access_page));
8145 } 8173 }
8146 8174
8147 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 8175 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -8950,6 +8978,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
8950 } 8978 }
8951 8979
8952 /* 8980 /*
8981 * We are now running in L2, mmu_notifier will force to reload the
8982 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
8983 */
8984 kvm_vcpu_reload_apic_access_page(vcpu);
8985
8986 /*
8953 * Exiting from L2 to L1, we're now back to L1 which thinks it just 8987 * Exiting from L2 to L1, we're now back to L1 which thinks it just
8954 * finished a VMLAUNCH or VMRESUME instruction, so we need to set the 8988 * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
8955 * success or failure flag accordingly. 8989 * success or failure flag accordingly.
@@ -9074,6 +9108,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
9074 .enable_irq_window = enable_irq_window, 9108 .enable_irq_window = enable_irq_window,
9075 .update_cr8_intercept = update_cr8_intercept, 9109 .update_cr8_intercept = update_cr8_intercept,
9076 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, 9110 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
9111 .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
9077 .vm_has_apicv = vmx_vm_has_apicv, 9112 .vm_has_apicv = vmx_vm_has_apicv,
9078 .load_eoi_exitmap = vmx_load_eoi_exitmap, 9113 .load_eoi_exitmap = vmx_load_eoi_exitmap,
9079 .hwapic_irr_update = vmx_hwapic_irr_update, 9114 .hwapic_irr_update = vmx_hwapic_irr_update,