aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d1b5cd4d34c..20316c67b824 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
106static u32 tsc_tolerance_ppm = 250; 106static u32 tsc_tolerance_ppm = 250;
107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
108 108
109static bool backwards_tsc_observed = false;
110
109#define KVM_NR_SHARED_MSRS 16 111#define KVM_NR_SHARED_MSRS 16
110 112
111struct kvm_shared_msrs_global { 113struct kvm_shared_msrs_global {
@@ -280,7 +282,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
280} 282}
281EXPORT_SYMBOL_GPL(kvm_set_apic_base); 283EXPORT_SYMBOL_GPL(kvm_set_apic_base);
282 284
283asmlinkage void kvm_spurious_fault(void) 285asmlinkage __visible void kvm_spurious_fault(void)
284{ 286{
285 /* Fault while not rebooting. We want the trace. */ 287 /* Fault while not rebooting. We want the trace. */
286 BUG(); 288 BUG();
@@ -652,6 +654,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
652 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 654 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
653 return 1; 655 return 1;
654 656
657 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
658 return 1;
659
655 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) 660 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
656 return 1; 661 return 1;
657 662
@@ -680,6 +685,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
680 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 685 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
681 kvm_mmu_reset_context(vcpu); 686 kvm_mmu_reset_context(vcpu);
682 687
688 if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
689 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
690
683 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 691 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
684 kvm_update_cpuid(vcpu); 692 kvm_update_cpuid(vcpu);
685 693
@@ -1117,7 +1125,6 @@ static inline u64 get_kernel_ns(void)
1117{ 1125{
1118 struct timespec ts; 1126 struct timespec ts;
1119 1127
1120 WARN_ON(preemptible());
1121 ktime_get_ts(&ts); 1128 ktime_get_ts(&ts);
1122 monotonic_to_bootbased(&ts); 1129 monotonic_to_bootbased(&ts);
1123 return timespec_to_ns(&ts); 1130 return timespec_to_ns(&ts);
@@ -1481,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1481 &ka->master_kernel_ns, 1488 &ka->master_kernel_ns,
1482 &ka->master_cycle_now); 1489 &ka->master_cycle_now);
1483 1490
1484 ka->use_master_clock = host_tsc_clocksource & vcpus_matched; 1491 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1492 && !backwards_tsc_observed;
1485 1493
1486 if (ka->use_master_clock) 1494 if (ka->use_master_clock)
1487 atomic_set(&kvm_guest_has_master_clock, 1); 1495 atomic_set(&kvm_guest_has_master_clock, 1);
@@ -4164,7 +4172,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4164 | (write ? PFERR_WRITE_MASK : 0); 4172 | (write ? PFERR_WRITE_MASK : 0);
4165 4173
4166 if (vcpu_match_mmio_gva(vcpu, gva) 4174 if (vcpu_match_mmio_gva(vcpu, gva)
4167 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { 4175 && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4176 vcpu->arch.access, access)) {
4168 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4177 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4169 (gva & (PAGE_SIZE - 1)); 4178 (gva & (PAGE_SIZE - 1));
4170 trace_vcpu_match_mmio(gva, *gpa, write, false); 4179 trace_vcpu_match_mmio(gva, *gpa, write, false);
@@ -6939,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
6939 */ 6948 */
6940 if (backwards_tsc) { 6949 if (backwards_tsc) {
6941 u64 delta_cyc = max_tsc - local_tsc; 6950 u64 delta_cyc = max_tsc - local_tsc;
6951 backwards_tsc_observed = true;
6942 list_for_each_entry(kvm, &vm_list, vm_list) { 6952 list_for_each_entry(kvm, &vm_list, vm_list) {
6943 kvm_for_each_vcpu(i, vcpu, kvm) { 6953 kvm_for_each_vcpu(i, vcpu, kvm) {
6944 vcpu->arch.tsc_offset_adjustment += delta_cyc; 6954 vcpu->arch.tsc_offset_adjustment += delta_cyc;