aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-10-13 15:32:50 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-10-13 15:32:50 -0400
commit58f800d5ace99c49e6418cb5757d868f2746acb4 (patch)
tree5e6d8e459f282e35b8b8245a39de838a2c364d42
parent1330a0170a48ad3788eff01aaf889203652ab4c7 (diff)
parent73917739334c6509833b0403b81d4a04a8784bdf (diff)
Merge branch 'kvm-master' into HEAD
This merge brings in a couple important SMM fixes, which makes it easier to test latest KVM with unrestricted_guest=0 and to test the in-progress work on SMM support in the firmware. Conflicts: arch/x86/kvm/x86.c
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/kvm/vmx.c26
-rw-r--r--arch/x86/kvm/x86.c135
3 files changed, 83 insertions, 84 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cdbdb559ecd2..53deb2750bf6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1251,10 +1251,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1251 1251
1252int kvm_is_in_guest(void); 1252int kvm_is_in_guest(void);
1253 1253
1254int __x86_set_memory_region(struct kvm *kvm, 1254int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1255 const struct kvm_userspace_memory_region *mem); 1255int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1256int x86_set_memory_region(struct kvm *kvm,
1257 const struct kvm_userspace_memory_region *mem);
1258bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1256bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1259bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1257bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1260 1258
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c5c22831aee2..8eeba6ac5914 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4227,17 +4227,13 @@ static void seg_setup(int seg)
4227static int alloc_apic_access_page(struct kvm *kvm) 4227static int alloc_apic_access_page(struct kvm *kvm)
4228{ 4228{
4229 struct page *page; 4229 struct page *page;
4230 struct kvm_userspace_memory_region kvm_userspace_mem;
4231 int r = 0; 4230 int r = 0;
4232 4231
4233 mutex_lock(&kvm->slots_lock); 4232 mutex_lock(&kvm->slots_lock);
4234 if (kvm->arch.apic_access_page_done) 4233 if (kvm->arch.apic_access_page_done)
4235 goto out; 4234 goto out;
4236 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 4235 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
4237 kvm_userspace_mem.flags = 0; 4236 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
4238 kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
4239 kvm_userspace_mem.memory_size = PAGE_SIZE;
4240 r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
4241 if (r) 4237 if (r)
4242 goto out; 4238 goto out;
4243 4239
@@ -4262,17 +4258,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
4262{ 4258{
4263 /* Called with kvm->slots_lock held. */ 4259 /* Called with kvm->slots_lock held. */
4264 4260
4265 struct kvm_userspace_memory_region kvm_userspace_mem;
4266 int r = 0; 4261 int r = 0;
4267 4262
4268 BUG_ON(kvm->arch.ept_identity_pagetable_done); 4263 BUG_ON(kvm->arch.ept_identity_pagetable_done);
4269 4264
4270 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 4265 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
4271 kvm_userspace_mem.flags = 0; 4266 kvm->arch.ept_identity_map_addr, PAGE_SIZE);
4272 kvm_userspace_mem.guest_phys_addr =
4273 kvm->arch.ept_identity_map_addr;
4274 kvm_userspace_mem.memory_size = PAGE_SIZE;
4275 r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
4276 4267
4277 return r; 4268 return r;
4278} 4269}
@@ -5089,14 +5080,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
5089static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 5080static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5090{ 5081{
5091 int ret; 5082 int ret;
5092 struct kvm_userspace_memory_region tss_mem = {
5093 .slot = TSS_PRIVATE_MEMSLOT,
5094 .guest_phys_addr = addr,
5095 .memory_size = PAGE_SIZE * 3,
5096 .flags = 0,
5097 };
5098 5083
5099 ret = x86_set_memory_region(kvm, &tss_mem); 5084 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5085 PAGE_SIZE * 3);
5100 if (ret) 5086 if (ret)
5101 return ret; 5087 return ret;
5102 kvm->arch.tss_addr = addr; 5088 kvm->arch.tss_addr = addr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2d2c9bb0d6d6..ba1a968b829a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6531,6 +6531,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
6531 return 1; 6531 return 1;
6532} 6532}
6533 6533
6534static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
6535{
6536 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6537 !vcpu->arch.apf.halted);
6538}
6539
6534static int vcpu_run(struct kvm_vcpu *vcpu) 6540static int vcpu_run(struct kvm_vcpu *vcpu)
6535{ 6541{
6536 int r; 6542 int r;
@@ -6539,8 +6545,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
6539 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6545 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6540 6546
6541 for (;;) { 6547 for (;;) {
6542 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 6548 if (kvm_vcpu_running(vcpu)) {
6543 !vcpu->arch.apf.halted) {
6544 r = vcpu_enter_guest(vcpu); 6549 r = vcpu_enter_guest(vcpu);
6545 } else { 6550 } else {
6546 r = vcpu_block(kvm, vcpu); 6551 r = vcpu_block(kvm, vcpu);
@@ -7556,34 +7561,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
7556 kvm_free_pit(kvm); 7561 kvm_free_pit(kvm);
7557} 7562}
7558 7563
7559int __x86_set_memory_region(struct kvm *kvm, 7564int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7560 const struct kvm_userspace_memory_region *mem)
7561{ 7565{
7562 int i, r; 7566 int i, r;
7567 u64 hva;
7568 struct kvm_memslots *slots = kvm_memslots(kvm);
7569 struct kvm_memory_slot *slot, old;
7563 7570
7564 /* Called with kvm->slots_lock held. */ 7571 /* Called with kvm->slots_lock held. */
7565 BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); 7572 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7573 return -EINVAL;
7574
7575 slot = id_to_memslot(slots, id);
7576 if (size) {
7577 if (WARN_ON(slot->npages))
7578 return -EEXIST;
7579
7580 /*
7581 * MAP_SHARED to prevent internal slot pages from being moved
7582 * by fork()/COW.
7583 */
7584 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
7585 MAP_SHARED | MAP_ANONYMOUS, 0);
7586 if (IS_ERR((void *)hva))
7587 return PTR_ERR((void *)hva);
7588 } else {
7589 if (!slot->npages)
7590 return 0;
7566 7591
7592 hva = 0;
7593 }
7594
7595 old = *slot;
7567 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 7596 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7568 struct kvm_userspace_memory_region m = *mem; 7597 struct kvm_userspace_memory_region m;
7569 7598
7570 m.slot |= i << 16; 7599 m.slot = id | (i << 16);
7600 m.flags = 0;
7601 m.guest_phys_addr = gpa;
7602 m.userspace_addr = hva;
7603 m.memory_size = size;
7571 r = __kvm_set_memory_region(kvm, &m); 7604 r = __kvm_set_memory_region(kvm, &m);
7572 if (r < 0) 7605 if (r < 0)
7573 return r; 7606 return r;
7574 } 7607 }
7575 7608
7609 if (!size) {
7610 r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
7611 WARN_ON(r < 0);
7612 }
7613
7576 return 0; 7614 return 0;
7577} 7615}
7578EXPORT_SYMBOL_GPL(__x86_set_memory_region); 7616EXPORT_SYMBOL_GPL(__x86_set_memory_region);
7579 7617
7580int x86_set_memory_region(struct kvm *kvm, 7618int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7581 const struct kvm_userspace_memory_region *mem)
7582{ 7619{
7583 int r; 7620 int r;
7584 7621
7585 mutex_lock(&kvm->slots_lock); 7622 mutex_lock(&kvm->slots_lock);
7586 r = __x86_set_memory_region(kvm, mem); 7623 r = __x86_set_memory_region(kvm, id, gpa, size);
7587 mutex_unlock(&kvm->slots_lock); 7624 mutex_unlock(&kvm->slots_lock);
7588 7625
7589 return r; 7626 return r;
@@ -7598,16 +7635,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
7598 * unless the the memory map has changed due to process exit 7635 * unless the the memory map has changed due to process exit
7599 * or fd copying. 7636 * or fd copying.
7600 */ 7637 */
7601 struct kvm_userspace_memory_region mem; 7638 x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
7602 memset(&mem, 0, sizeof(mem)); 7639 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
7603 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 7640 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
7604 x86_set_memory_region(kvm, &mem);
7605
7606 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
7607 x86_set_memory_region(kvm, &mem);
7608
7609 mem.slot = TSS_PRIVATE_MEMSLOT;
7610 x86_set_memory_region(kvm, &mem);
7611 } 7641 }
7612 kvm_iommu_unmap_guest(kvm); 7642 kvm_iommu_unmap_guest(kvm);
7613 kfree(kvm->arch.vpic); 7643 kfree(kvm->arch.vpic);
@@ -7710,27 +7740,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
7710 const struct kvm_userspace_memory_region *mem, 7740 const struct kvm_userspace_memory_region *mem,
7711 enum kvm_mr_change change) 7741 enum kvm_mr_change change)
7712{ 7742{
7713 /*
7714 * Only private memory slots need to be mapped here since
7715 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
7716 */
7717 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
7718 unsigned long userspace_addr;
7719
7720 /*
7721 * MAP_SHARED to prevent internal slot pages from being moved
7722 * by fork()/COW.
7723 */
7724 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
7725 PROT_READ | PROT_WRITE,
7726 MAP_SHARED | MAP_ANONYMOUS, 0);
7727
7728 if (IS_ERR((void *)userspace_addr))
7729 return PTR_ERR((void *)userspace_addr);
7730
7731 memslot->userspace_addr = userspace_addr;
7732 }
7733
7734 return 0; 7743 return 0;
7735} 7744}
7736 7745
@@ -7792,17 +7801,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7792{ 7801{
7793 int nr_mmu_pages = 0; 7802 int nr_mmu_pages = 0;
7794 7803
7795 if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
7796 int ret;
7797
7798 ret = vm_munmap(old->userspace_addr,
7799 old->npages * PAGE_SIZE);
7800 if (ret < 0)
7801 printk(KERN_WARNING
7802 "kvm_vm_ioctl_set_memory_region: "
7803 "failed to munmap memory\n");
7804 }
7805
7806 if (!kvm->arch.n_requested_mmu_pages) 7804 if (!kvm->arch.n_requested_mmu_pages)
7807 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7805 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
7808 7806
@@ -7851,19 +7849,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7851 kvm_mmu_invalidate_zap_all_pages(kvm); 7849 kvm_mmu_invalidate_zap_all_pages(kvm);
7852} 7850}
7853 7851
7852static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
7853{
7854 if (!list_empty_careful(&vcpu->async_pf.done))
7855 return true;
7856
7857 if (kvm_apic_has_events(vcpu))
7858 return true;
7859
7860 if (vcpu->arch.pv.pv_unhalted)
7861 return true;
7862
7863 if (atomic_read(&vcpu->arch.nmi_queued))
7864 return true;
7865
7866 if (test_bit(KVM_REQ_SMI, &vcpu->requests))
7867 return true;
7868
7869 if (kvm_arch_interrupt_allowed(vcpu) &&
7870 kvm_cpu_has_interrupt(vcpu))
7871 return true;
7872
7873 return false;
7874}
7875
7854int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 7876int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
7855{ 7877{
7856 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) 7878 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
7857 kvm_x86_ops->check_nested_events(vcpu, false); 7879 kvm_x86_ops->check_nested_events(vcpu, false);
7858 7880
7859 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 7881 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
7860 !vcpu->arch.apf.halted)
7861 || !list_empty_careful(&vcpu->async_pf.done)
7862 || kvm_apic_has_events(vcpu)
7863 || vcpu->arch.pv.pv_unhalted
7864 || atomic_read(&vcpu->arch.nmi_queued) ||
7865 (kvm_arch_interrupt_allowed(vcpu) &&
7866 kvm_cpu_has_interrupt(vcpu));
7867} 7882}
7868 7883
7869int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 7884int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)