diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 64 |
1 files changed, 36 insertions, 28 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 12f33e662382..c805cf494154 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -161,6 +161,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
161 | { "halt_exits", VCPU_STAT(halt_exits) }, | 161 | { "halt_exits", VCPU_STAT(halt_exits) }, |
162 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | 162 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
163 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, | 163 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, |
164 | { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, | ||
164 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 165 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
165 | { "hypercalls", VCPU_STAT(hypercalls) }, | 166 | { "hypercalls", VCPU_STAT(hypercalls) }, |
166 | { "request_irq", VCPU_STAT(request_irq_exits) }, | 167 | { "request_irq", VCPU_STAT(request_irq_exits) }, |
@@ -2002,22 +2003,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) | |||
2002 | vcpu->arch.pv_time_enabled = false; | 2003 | vcpu->arch.pv_time_enabled = false; |
2003 | } | 2004 | } |
2004 | 2005 | ||
2005 | static void accumulate_steal_time(struct kvm_vcpu *vcpu) | ||
2006 | { | ||
2007 | u64 delta; | ||
2008 | |||
2009 | if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) | ||
2010 | return; | ||
2011 | |||
2012 | delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; | ||
2013 | vcpu->arch.st.last_steal = current->sched_info.run_delay; | ||
2014 | vcpu->arch.st.accum_steal = delta; | ||
2015 | } | ||
2016 | |||
2017 | static void record_steal_time(struct kvm_vcpu *vcpu) | 2006 | static void record_steal_time(struct kvm_vcpu *vcpu) |
2018 | { | 2007 | { |
2019 | accumulate_steal_time(vcpu); | ||
2020 | |||
2021 | if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) | 2008 | if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) |
2022 | return; | 2009 | return; |
2023 | 2010 | ||
@@ -2025,9 +2012,26 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
2025 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) | 2012 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) |
2026 | return; | 2013 | return; |
2027 | 2014 | ||
2028 | vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; | 2015 | if (vcpu->arch.st.steal.version & 1) |
2029 | vcpu->arch.st.steal.version += 2; | 2016 | vcpu->arch.st.steal.version += 1; /* first time write, random junk */ |
2030 | vcpu->arch.st.accum_steal = 0; | 2017 | |
2018 | vcpu->arch.st.steal.version += 1; | ||
2019 | |||
2020 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, | ||
2021 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); | ||
2022 | |||
2023 | smp_wmb(); | ||
2024 | |||
2025 | vcpu->arch.st.steal.steal += current->sched_info.run_delay - | ||
2026 | vcpu->arch.st.last_steal; | ||
2027 | vcpu->arch.st.last_steal = current->sched_info.run_delay; | ||
2028 | |||
2029 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, | ||
2030 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); | ||
2031 | |||
2032 | smp_wmb(); | ||
2033 | |||
2034 | vcpu->arch.st.steal.version += 1; | ||
2031 | 2035 | ||
2032 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, | 2036 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, |
2033 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); | 2037 | &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); |
@@ -7752,6 +7756,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
7752 | kvm_page_track_init(kvm); | 7756 | kvm_page_track_init(kvm); |
7753 | kvm_mmu_init_vm(kvm); | 7757 | kvm_mmu_init_vm(kvm); |
7754 | 7758 | ||
7759 | if (kvm_x86_ops->vm_init) | ||
7760 | return kvm_x86_ops->vm_init(kvm); | ||
7761 | |||
7755 | return 0; | 7762 | return 0; |
7756 | } | 7763 | } |
7757 | 7764 | ||
@@ -7873,6 +7880,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
7873 | x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); | 7880 | x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); |
7874 | x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); | 7881 | x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); |
7875 | } | 7882 | } |
7883 | if (kvm_x86_ops->vm_destroy) | ||
7884 | kvm_x86_ops->vm_destroy(kvm); | ||
7876 | kvm_iommu_unmap_guest(kvm); | 7885 | kvm_iommu_unmap_guest(kvm); |
7877 | kfree(kvm->arch.vpic); | 7886 | kfree(kvm->arch.vpic); |
7878 | kfree(kvm->arch.vioapic); | 7887 | kfree(kvm->arch.vioapic); |
@@ -8355,19 +8364,21 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) | |||
8355 | } | 8364 | } |
8356 | EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); | 8365 | EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); |
8357 | 8366 | ||
8367 | bool kvm_arch_has_irq_bypass(void) | ||
8368 | { | ||
8369 | return kvm_x86_ops->update_pi_irte != NULL; | ||
8370 | } | ||
8371 | |||
8358 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | 8372 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
8359 | struct irq_bypass_producer *prod) | 8373 | struct irq_bypass_producer *prod) |
8360 | { | 8374 | { |
8361 | struct kvm_kernel_irqfd *irqfd = | 8375 | struct kvm_kernel_irqfd *irqfd = |
8362 | container_of(cons, struct kvm_kernel_irqfd, consumer); | 8376 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
8363 | 8377 | ||
8364 | if (kvm_x86_ops->update_pi_irte) { | 8378 | irqfd->producer = prod; |
8365 | irqfd->producer = prod; | ||
8366 | return kvm_x86_ops->update_pi_irte(irqfd->kvm, | ||
8367 | prod->irq, irqfd->gsi, 1); | ||
8368 | } | ||
8369 | 8379 | ||
8370 | return -EINVAL; | 8380 | return kvm_x86_ops->update_pi_irte(irqfd->kvm, |
8381 | prod->irq, irqfd->gsi, 1); | ||
8371 | } | 8382 | } |
8372 | 8383 | ||
8373 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | 8384 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
@@ -8377,11 +8388,6 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |||
8377 | struct kvm_kernel_irqfd *irqfd = | 8388 | struct kvm_kernel_irqfd *irqfd = |
8378 | container_of(cons, struct kvm_kernel_irqfd, consumer); | 8389 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
8379 | 8390 | ||
8380 | if (!kvm_x86_ops->update_pi_irte) { | ||
8381 | WARN_ON(irqfd->producer != NULL); | ||
8382 | return; | ||
8383 | } | ||
8384 | |||
8385 | WARN_ON(irqfd->producer != prod); | 8391 | WARN_ON(irqfd->producer != prod); |
8386 | irqfd->producer = NULL; | 8392 | irqfd->producer = NULL; |
8387 | 8393 | ||
@@ -8429,3 +8435,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); | |||
8429 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); | 8435 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); |
8430 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); | 8436 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); |
8431 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); | 8437 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); |
8438 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); | ||
8439 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); | ||