aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:27:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:27:09 -0400
commit7beaa24ba49717419e24d1f6321e8b3c265a719c (patch)
treea5c5433d3c7bfc4c23e67174463ccf519c8406f0 /arch/x86/kvm/x86.c
parent07b75260ebc2c789724c594d7eaf0194fa47b3be (diff)
parent9842df62004f366b9fed2423e24df10542ee0dc5 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small release overall. x86: - miscellaneous fixes - AVIC support (local APIC virtualization, AMD version) s390: - polling for interrupts after a VCPU goes to halted state is now enabled for s390 - use hardware provided information about facility bits that do not need any hypervisor activity, and other fixes for cpu models and facilities - improve perf output - floating interrupt controller improvements. MIPS: - miscellaneous fixes PPC: - bugfixes only ARM: - 16K page size support - generic firmware probing layer for timer and GIC Christoffer Dall (KVM-ARM maintainer) says: "There are a few changes in this pull request touching things outside KVM, but they should all carry the necessary acks and it made the merge process much easier to do it this way." though actually the irqchip maintainers' acks didn't make it into the patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer, later acked at http://mid.gmane.org/573351D1.4060303@arm.com ('more formally and for documentation purposes')" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (82 commits) KVM: MTRR: remove MSR 0x2f8 KVM: x86: make hwapic_isr_update and hwapic_irr_update look the same svm: Manage vcpu load/unload when enable AVIC svm: Do not intercept CR8 when enable AVIC svm: Do not expose x2APIC when enable AVIC KVM: x86: Introducing kvm_x86_ops.apicv_post_state_restore svm: Add VMEXIT handlers for AVIC svm: Add interrupt injection via AVIC KVM: x86: Detect and Initialize AVIC support svm: Introduce new AVIC VMCB registers KVM: split kvm_vcpu_wake_up from kvm_vcpu_kick KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking hooks KVM: x86: Introducing kvm_x86_ops VM init/destroy hooks KVM: x86: Rename kvm_apic_get_reg to kvm_lapic_get_reg KVM: x86: Misc LAPIC changes to expose helper functions KVM: shrink halt polling even more for invalid wakeups KVM: s390: set halt polling to 80 microseconds KVM: halt_polling: provide a way to qualify wakeups during poll KVM: PPC: Book3S HV: Re-enable XICS fast path for irqfd-generated interrupts kvm: Conditionally register IRQ bypass consumer ...
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 12f33e662382..c805cf494154 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -161,6 +161,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
161 { "halt_exits", VCPU_STAT(halt_exits) }, 161 { "halt_exits", VCPU_STAT(halt_exits) },
162 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 162 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
163 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, 163 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
164 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
164 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 165 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
165 { "hypercalls", VCPU_STAT(hypercalls) }, 166 { "hypercalls", VCPU_STAT(hypercalls) },
166 { "request_irq", VCPU_STAT(request_irq_exits) }, 167 { "request_irq", VCPU_STAT(request_irq_exits) },
@@ -2002,22 +2003,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
2002 vcpu->arch.pv_time_enabled = false; 2003 vcpu->arch.pv_time_enabled = false;
2003} 2004}
2004 2005
2005static void accumulate_steal_time(struct kvm_vcpu *vcpu)
2006{
2007 u64 delta;
2008
2009 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2010 return;
2011
2012 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
2013 vcpu->arch.st.last_steal = current->sched_info.run_delay;
2014 vcpu->arch.st.accum_steal = delta;
2015}
2016
2017static void record_steal_time(struct kvm_vcpu *vcpu) 2006static void record_steal_time(struct kvm_vcpu *vcpu)
2018{ 2007{
2019 accumulate_steal_time(vcpu);
2020
2021 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 2008 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2022 return; 2009 return;
2023 2010
@@ -2025,9 +2012,26 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
2025 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) 2012 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2026 return; 2013 return;
2027 2014
2028 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; 2015 if (vcpu->arch.st.steal.version & 1)
2029 vcpu->arch.st.steal.version += 2; 2016 vcpu->arch.st.steal.version += 1; /* first time write, random junk */
2030 vcpu->arch.st.accum_steal = 0; 2017
2018 vcpu->arch.st.steal.version += 1;
2019
2020 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2021 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2022
2023 smp_wmb();
2024
2025 vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2026 vcpu->arch.st.last_steal;
2027 vcpu->arch.st.last_steal = current->sched_info.run_delay;
2028
2029 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2030 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2031
2032 smp_wmb();
2033
2034 vcpu->arch.st.steal.version += 1;
2031 2035
2032 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 2036 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2033 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 2037 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
@@ -7752,6 +7756,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7752 kvm_page_track_init(kvm); 7756 kvm_page_track_init(kvm);
7753 kvm_mmu_init_vm(kvm); 7757 kvm_mmu_init_vm(kvm);
7754 7758
7759 if (kvm_x86_ops->vm_init)
7760 return kvm_x86_ops->vm_init(kvm);
7761
7755 return 0; 7762 return 0;
7756} 7763}
7757 7764
@@ -7873,6 +7880,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
7873 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); 7880 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
7874 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 7881 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
7875 } 7882 }
7883 if (kvm_x86_ops->vm_destroy)
7884 kvm_x86_ops->vm_destroy(kvm);
7876 kvm_iommu_unmap_guest(kvm); 7885 kvm_iommu_unmap_guest(kvm);
7877 kfree(kvm->arch.vpic); 7886 kfree(kvm->arch.vpic);
7878 kfree(kvm->arch.vioapic); 7887 kfree(kvm->arch.vioapic);
@@ -8355,19 +8364,21 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
8355} 8364}
8356EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 8365EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
8357 8366
8367bool kvm_arch_has_irq_bypass(void)
8368{
8369 return kvm_x86_ops->update_pi_irte != NULL;
8370}
8371
8358int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 8372int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8359 struct irq_bypass_producer *prod) 8373 struct irq_bypass_producer *prod)
8360{ 8374{
8361 struct kvm_kernel_irqfd *irqfd = 8375 struct kvm_kernel_irqfd *irqfd =
8362 container_of(cons, struct kvm_kernel_irqfd, consumer); 8376 container_of(cons, struct kvm_kernel_irqfd, consumer);
8363 8377
8364 if (kvm_x86_ops->update_pi_irte) { 8378 irqfd->producer = prod;
8365 irqfd->producer = prod;
8366 return kvm_x86_ops->update_pi_irte(irqfd->kvm,
8367 prod->irq, irqfd->gsi, 1);
8368 }
8369 8379
8370 return -EINVAL; 8380 return kvm_x86_ops->update_pi_irte(irqfd->kvm,
8381 prod->irq, irqfd->gsi, 1);
8371} 8382}
8372 8383
8373void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 8384void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
@@ -8377,11 +8388,6 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8377 struct kvm_kernel_irqfd *irqfd = 8388 struct kvm_kernel_irqfd *irqfd =
8378 container_of(cons, struct kvm_kernel_irqfd, consumer); 8389 container_of(cons, struct kvm_kernel_irqfd, consumer);
8379 8390
8380 if (!kvm_x86_ops->update_pi_irte) {
8381 WARN_ON(irqfd->producer != NULL);
8382 return;
8383 }
8384
8385 WARN_ON(irqfd->producer != prod); 8391 WARN_ON(irqfd->producer != prod);
8386 irqfd->producer = NULL; 8392 irqfd->producer = NULL;
8387 8393
@@ -8429,3 +8435,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
8429EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); 8435EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
8430EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 8436EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
8431EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 8437EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
8438EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
8439EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);