aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f7125710d02..ac5ed00e9065 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -362,10 +362,12 @@ EXPORT_SYMBOL_GPL(fx_init);
362 * space. 362 * space.
363 * 363 *
364 * Discontiguous memory is allowed, mostly for framebuffers. 364 * Discontiguous memory is allowed, mostly for framebuffers.
365 *
366 * Must be called holding kvm->lock.
365 */ 367 */
366int kvm_set_memory_region(struct kvm *kvm, 368int __kvm_set_memory_region(struct kvm *kvm,
367 struct kvm_userspace_memory_region *mem, 369 struct kvm_userspace_memory_region *mem,
368 int user_alloc) 370 int user_alloc)
369{ 371{
370 int r; 372 int r;
371 gfn_t base_gfn; 373 gfn_t base_gfn;
@@ -392,8 +394,6 @@ int kvm_set_memory_region(struct kvm *kvm,
392 if (!npages) 394 if (!npages)
393 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 395 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
394 396
395 mutex_lock(&kvm->lock);
396
397 new = old = *memslot; 397 new = old = *memslot;
398 398
399 new.base_gfn = base_gfn; 399 new.base_gfn = base_gfn;
@@ -403,7 +403,7 @@ int kvm_set_memory_region(struct kvm *kvm,
403 /* Disallow changing a memory slot's size. */ 403 /* Disallow changing a memory slot's size. */
404 r = -EINVAL; 404 r = -EINVAL;
405 if (npages && old.npages && npages != old.npages) 405 if (npages && old.npages && npages != old.npages)
406 goto out_unlock; 406 goto out_free;
407 407
408 /* Check for overlaps */ 408 /* Check for overlaps */
409 r = -EEXIST; 409 r = -EEXIST;
@@ -414,7 +414,7 @@ int kvm_set_memory_region(struct kvm *kvm,
414 continue; 414 continue;
415 if (!((base_gfn + npages <= s->base_gfn) || 415 if (!((base_gfn + npages <= s->base_gfn) ||
416 (base_gfn >= s->base_gfn + s->npages))) 416 (base_gfn >= s->base_gfn + s->npages)))
417 goto out_unlock; 417 goto out_free;
418 } 418 }
419 419
420 /* Free page dirty bitmap if unneeded */ 420 /* Free page dirty bitmap if unneeded */
@@ -428,7 +428,7 @@ int kvm_set_memory_region(struct kvm *kvm,
428 new.rmap = vmalloc(npages * sizeof(struct page *)); 428 new.rmap = vmalloc(npages * sizeof(struct page *));
429 429
430 if (!new.rmap) 430 if (!new.rmap)
431 goto out_unlock; 431 goto out_free;
432 432
433 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 433 memset(new.rmap, 0, npages * sizeof(*new.rmap));
434 434
@@ -445,7 +445,7 @@ int kvm_set_memory_region(struct kvm *kvm,
445 up_write(&current->mm->mmap_sem); 445 up_write(&current->mm->mmap_sem);
446 446
447 if (IS_ERR((void *)new.userspace_addr)) 447 if (IS_ERR((void *)new.userspace_addr))
448 goto out_unlock; 448 goto out_free;
449 } 449 }
450 } else { 450 } else {
451 if (!old.user_alloc && old.rmap) { 451 if (!old.user_alloc && old.rmap) {
@@ -468,7 +468,7 @@ int kvm_set_memory_region(struct kvm *kvm,
468 468
469 new.dirty_bitmap = vmalloc(dirty_bytes); 469 new.dirty_bitmap = vmalloc(dirty_bytes);
470 if (!new.dirty_bitmap) 470 if (!new.dirty_bitmap)
471 goto out_unlock; 471 goto out_free;
472 memset(new.dirty_bitmap, 0, dirty_bytes); 472 memset(new.dirty_bitmap, 0, dirty_bytes);
473 } 473 }
474 474
@@ -498,18 +498,28 @@ int kvm_set_memory_region(struct kvm *kvm,
498 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 498 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
499 kvm_flush_remote_tlbs(kvm); 499 kvm_flush_remote_tlbs(kvm);
500 500
501 mutex_unlock(&kvm->lock);
502
503 kvm_free_physmem_slot(&old, &new); 501 kvm_free_physmem_slot(&old, &new);
504 return 0; 502 return 0;
505 503
506out_unlock: 504out_free:
507 mutex_unlock(&kvm->lock);
508 kvm_free_physmem_slot(&new, &old); 505 kvm_free_physmem_slot(&new, &old);
509out: 506out:
510 return r; 507 return r;
511 508
512} 509}
510EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
511
512int kvm_set_memory_region(struct kvm *kvm,
513 struct kvm_userspace_memory_region *mem,
514 int user_alloc)
515{
516 int r;
517
518 mutex_lock(&kvm->lock);
519 r = __kvm_set_memory_region(kvm, mem, user_alloc);
520 mutex_unlock(&kvm->lock);
521 return r;
522}
513EXPORT_SYMBOL_GPL(kvm_set_memory_region); 523EXPORT_SYMBOL_GPL(kvm_set_memory_region);
514 524
515int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 525int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
@@ -888,14 +898,21 @@ static int emulator_read_emulated(unsigned long addr,
888 memcpy(val, vcpu->mmio_data, bytes); 898 memcpy(val, vcpu->mmio_data, bytes);
889 vcpu->mmio_read_completed = 0; 899 vcpu->mmio_read_completed = 0;
890 return X86EMUL_CONTINUE; 900 return X86EMUL_CONTINUE;
891 } else if (emulator_read_std(addr, val, bytes, vcpu) 901 }
892 == X86EMUL_CONTINUE)
893 return X86EMUL_CONTINUE;
894 902
895 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 903 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
904
905 /* For APIC access vmexit */
906 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
907 goto mmio;
908
909 if (emulator_read_std(addr, val, bytes, vcpu)
910 == X86EMUL_CONTINUE)
911 return X86EMUL_CONTINUE;
896 if (gpa == UNMAPPED_GVA) 912 if (gpa == UNMAPPED_GVA)
897 return X86EMUL_PROPAGATE_FAULT; 913 return X86EMUL_PROPAGATE_FAULT;
898 914
915mmio:
899 /* 916 /*
900 * Is this MMIO handled locally? 917 * Is this MMIO handled locally?
901 */ 918 */
@@ -938,9 +955,14 @@ static int emulator_write_emulated_onepage(unsigned long addr,
938 return X86EMUL_PROPAGATE_FAULT; 955 return X86EMUL_PROPAGATE_FAULT;
939 } 956 }
940 957
958 /* For APIC access vmexit */
959 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
960 goto mmio;
961
941 if (emulator_write_phys(vcpu, gpa, val, bytes)) 962 if (emulator_write_phys(vcpu, gpa, val, bytes))
942 return X86EMUL_CONTINUE; 963 return X86EMUL_CONTINUE;
943 964
965mmio:
944 /* 966 /*
945 * Is this MMIO handled locally? 967 * Is this MMIO handled locally?
946 */ 968 */