aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/vmx.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4655d6dd6759..62670b2f6d48 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -397,6 +397,7 @@ struct loaded_vmcs {
397 int cpu; 397 int cpu;
398 bool launched; 398 bool launched;
399 bool nmi_known_unmasked; 399 bool nmi_known_unmasked;
400 bool hv_timer_armed;
400 /* Support for vnmi-less CPUs */ 401 /* Support for vnmi-less CPUs */
401 int soft_vnmi_blocked; 402 int soft_vnmi_blocked;
402 ktime_t entry_time; 403 ktime_t entry_time;
@@ -10595,24 +10596,38 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
10595 msrs[i].host, false); 10596 msrs[i].host, false);
10596} 10597}
10597 10598
10598static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) 10599static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
10600{
10601 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
10602 if (!vmx->loaded_vmcs->hv_timer_armed)
10603 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10604 PIN_BASED_VMX_PREEMPTION_TIMER);
10605 vmx->loaded_vmcs->hv_timer_armed = true;
10606}
10607
10608static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
10599{ 10609{
10600 struct vcpu_vmx *vmx = to_vmx(vcpu); 10610 struct vcpu_vmx *vmx = to_vmx(vcpu);
10601 u64 tscl; 10611 u64 tscl;
10602 u32 delta_tsc; 10612 u32 delta_tsc;
10603 10613
10604 if (vmx->hv_deadline_tsc == -1) 10614 if (vmx->hv_deadline_tsc != -1) {
10605 return; 10615 tscl = rdtsc();
10616 if (vmx->hv_deadline_tsc > tscl)
10617 /* set_hv_timer ensures the delta fits in 32-bits */
10618 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10619 cpu_preemption_timer_multi);
10620 else
10621 delta_tsc = 0;
10606 10622
10607 tscl = rdtsc(); 10623 vmx_arm_hv_timer(vmx, delta_tsc);
10608 if (vmx->hv_deadline_tsc > tscl) 10624 return;
10609 /* sure to be 32 bit only because checked on set_hv_timer */ 10625 }
10610 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10611 cpu_preemption_timer_multi);
10612 else
10613 delta_tsc = 0;
10614 10626
10615 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 10627 if (vmx->loaded_vmcs->hv_timer_armed)
10628 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10629 PIN_BASED_VMX_PREEMPTION_TIMER);
10630 vmx->loaded_vmcs->hv_timer_armed = false;
10616} 10631}
10617 10632
10618static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) 10633static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10672,7 +10687,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
10672 10687
10673 atomic_switch_perf_msrs(vmx); 10688 atomic_switch_perf_msrs(vmx);
10674 10689
10675 vmx_arm_hv_timer(vcpu); 10690 vmx_update_hv_timer(vcpu);
10676 10691
10677 /* 10692 /*
10678 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 10693 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -12078,11 +12093,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12078 12093
12079 exec_control = vmcs12->pin_based_vm_exec_control; 12094 exec_control = vmcs12->pin_based_vm_exec_control;
12080 12095
12081 /* Preemption timer setting is only taken from vmcs01. */ 12096 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
12082 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12083 exec_control |= vmcs_config.pin_based_exec_ctrl; 12097 exec_control |= vmcs_config.pin_based_exec_ctrl;
12084 if (vmx->hv_deadline_tsc == -1) 12098 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12085 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 12099 vmx->loaded_vmcs->hv_timer_armed = false;
12086 12100
12087 /* Posted interrupts setting is only taken from vmcs12. */ 12101 /* Posted interrupts setting is only taken from vmcs12. */
12088 if (nested_cpu_has_posted_intr(vmcs12)) { 12102 if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -13255,12 +13269,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
13255 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 13269 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
13256 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 13270 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
13257 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 13271 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
13258 if (vmx->hv_deadline_tsc == -1) 13272
13259 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
13260 PIN_BASED_VMX_PREEMPTION_TIMER);
13261 else
13262 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
13263 PIN_BASED_VMX_PREEMPTION_TIMER);
13264 if (kvm_has_tsc_control) 13273 if (kvm_has_tsc_control)
13265 decache_tsc_multiplier(vmx); 13274 decache_tsc_multiplier(vmx);
13266 13275
@@ -13464,18 +13473,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
13464 return -ERANGE; 13473 return -ERANGE;
13465 13474
13466 vmx->hv_deadline_tsc = tscl + delta_tsc; 13475 vmx->hv_deadline_tsc = tscl + delta_tsc;
13467 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
13468 PIN_BASED_VMX_PREEMPTION_TIMER);
13469
13470 return delta_tsc == 0; 13476 return delta_tsc == 0;
13471} 13477}
13472 13478
13473static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 13479static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
13474{ 13480{
13475 struct vcpu_vmx *vmx = to_vmx(vcpu); 13481 to_vmx(vcpu)->hv_deadline_tsc = -1;
13476 vmx->hv_deadline_tsc = -1;
13477 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
13478 PIN_BASED_VMX_PREEMPTION_TIMER);
13479} 13482}
13480#endif 13483#endif
13481 13484