aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-10-12 07:38:32 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-10-13 12:28:46 -0400
commit1d8007bdee074fdffcf3539492d8a151a1fb3436 (patch)
tree282870ab57659afefbddfc32e5e81e7812889c4b
parentd2922422c48df93f3edff7d872ee4f3191fefb08 (diff)
KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region
The next patch will make x86_set_memory_region fill the userspace_addr. Since the struct is not used untouched anymore, it makes sense to build it in x86_set_memory_region directly; it also simplifies the callers. Reported-by: Alexandre DERUMIER <aderumier@odiso.com> Cc: stable@vger.kernel.org Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69 Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/kvm/vmx.c26
-rw-r--r--arch/x86/kvm/x86.c31
3 files changed, 21 insertions, 42 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2beee0382088..3a36ee704c30 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1226 1226
1227int kvm_is_in_guest(void); 1227int kvm_is_in_guest(void);
1228 1228
1229int __x86_set_memory_region(struct kvm *kvm, 1229int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1230 const struct kvm_userspace_memory_region *mem); 1230int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1231int x86_set_memory_region(struct kvm *kvm,
1232 const struct kvm_userspace_memory_region *mem);
1233bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1231bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1234bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1232bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1235 1233
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06ef4908ba61..6a8bc64566ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
4105static int alloc_apic_access_page(struct kvm *kvm) 4105static int alloc_apic_access_page(struct kvm *kvm)
4106{ 4106{
4107 struct page *page; 4107 struct page *page;
4108 struct kvm_userspace_memory_region kvm_userspace_mem;
4109 int r = 0; 4108 int r = 0;
4110 4109
4111 mutex_lock(&kvm->slots_lock); 4110 mutex_lock(&kvm->slots_lock);
4112 if (kvm->arch.apic_access_page_done) 4111 if (kvm->arch.apic_access_page_done)
4113 goto out; 4112 goto out;
4114 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 4113 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
4115 kvm_userspace_mem.flags = 0; 4114 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
4116 kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
4117 kvm_userspace_mem.memory_size = PAGE_SIZE;
4118 r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
4119 if (r) 4115 if (r)
4120 goto out; 4116 goto out;
4121 4117
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
4140{ 4136{
4141 /* Called with kvm->slots_lock held. */ 4137 /* Called with kvm->slots_lock held. */
4142 4138
4143 struct kvm_userspace_memory_region kvm_userspace_mem;
4144 int r = 0; 4139 int r = 0;
4145 4140
4146 BUG_ON(kvm->arch.ept_identity_pagetable_done); 4141 BUG_ON(kvm->arch.ept_identity_pagetable_done);
4147 4142
4148 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 4143 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
4149 kvm_userspace_mem.flags = 0; 4144 kvm->arch.ept_identity_map_addr, PAGE_SIZE);
4150 kvm_userspace_mem.guest_phys_addr =
4151 kvm->arch.ept_identity_map_addr;
4152 kvm_userspace_mem.memory_size = PAGE_SIZE;
4153 r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
4154 4145
4155 return r; 4146 return r;
4156} 4147}
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4949static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4940static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4950{ 4941{
4951 int ret; 4942 int ret;
4952 struct kvm_userspace_memory_region tss_mem = {
4953 .slot = TSS_PRIVATE_MEMSLOT,
4954 .guest_phys_addr = addr,
4955 .memory_size = PAGE_SIZE * 3,
4956 .flags = 0,
4957 };
4958 4943
4959 ret = x86_set_memory_region(kvm, &tss_mem); 4944 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
4945 PAGE_SIZE * 3);
4960 if (ret) 4946 if (ret)
4961 return ret; 4947 return ret;
4962 kvm->arch.tss_addr = addr; 4948 kvm->arch.tss_addr = addr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 92511d4b7236..7bf8096f013d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7474,18 +7474,21 @@ void kvm_arch_sync_events(struct kvm *kvm)
7474 kvm_free_pit(kvm); 7474 kvm_free_pit(kvm);
7475} 7475}
7476 7476
7477int __x86_set_memory_region(struct kvm *kvm, 7477int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7478 const struct kvm_userspace_memory_region *mem)
7479{ 7478{
7480 int i, r; 7479 int i, r;
7481 7480
7482 /* Called with kvm->slots_lock held. */ 7481 /* Called with kvm->slots_lock held. */
7483 BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); 7482 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7483 return -EINVAL;
7484 7484
7485 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 7485 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7486 struct kvm_userspace_memory_region m = *mem; 7486 struct kvm_userspace_memory_region m;
7487 7487
7488 m.slot |= i << 16; 7488 m.slot = id | (i << 16);
7489 m.flags = 0;
7490 m.guest_phys_addr = gpa;
7491 m.memory_size = size;
7489 r = __kvm_set_memory_region(kvm, &m); 7492 r = __kvm_set_memory_region(kvm, &m);
7490 if (r < 0) 7493 if (r < 0)
7491 return r; 7494 return r;
@@ -7495,13 +7498,12 @@ int __x86_set_memory_region(struct kvm *kvm,
7495} 7498}
7496EXPORT_SYMBOL_GPL(__x86_set_memory_region); 7499EXPORT_SYMBOL_GPL(__x86_set_memory_region);
7497 7500
7498int x86_set_memory_region(struct kvm *kvm, 7501int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7499 const struct kvm_userspace_memory_region *mem)
7500{ 7502{
7501 int r; 7503 int r;
7502 7504
7503 mutex_lock(&kvm->slots_lock); 7505 mutex_lock(&kvm->slots_lock);
7504 r = __x86_set_memory_region(kvm, mem); 7506 r = __x86_set_memory_region(kvm, id, gpa, size);
7505 mutex_unlock(&kvm->slots_lock); 7507 mutex_unlock(&kvm->slots_lock);
7506 7508
7507 return r; 7509 return r;
@@ -7516,16 +7518,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
7516 * unless the the memory map has changed due to process exit 7518 * unless the the memory map has changed due to process exit
7517 * or fd copying. 7519 * or fd copying.
7518 */ 7520 */
7519 struct kvm_userspace_memory_region mem; 7521 x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
7520 memset(&mem, 0, sizeof(mem)); 7522 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
7521 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 7523 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
7522 x86_set_memory_region(kvm, &mem);
7523
7524 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
7525 x86_set_memory_region(kvm, &mem);
7526
7527 mem.slot = TSS_PRIVATE_MEMSLOT;
7528 x86_set_memory_region(kvm, &mem);
7529 } 7524 }
7530 kvm_iommu_unmap_guest(kvm); 7525 kvm_iommu_unmap_guest(kvm);
7531 kfree(kvm->arch.vpic); 7526 kfree(kvm->arch.vpic);