aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2007-07-23 02:51:37 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:20 -0400
commit11ec2804711896546ee3c945f3786c7f9fdd175a (patch)
tree1e26102931efb8b0b48c440887577d21170ef94a /drivers
parent15ad71460d75fd7ca41bb248a2310f3f39b302ba (diff)
KVM: Convert vm lock to a mutex
This allows the kvm mmu to perform sleepy operations, such as memory allocation. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h2
-rw-r--r--drivers/kvm/kvm_main.c69
-rw-r--r--drivers/kvm/mmu.c9
-rw-r--r--drivers/kvm/svm.c8
-rw-r--r--drivers/kvm/vmx.c8
5 files changed, 46 insertions, 50 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 0667183ecbed..1072c8322d4f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -393,7 +393,7 @@ struct kvm_memory_slot {
393}; 393};
394 394
395struct kvm { 395struct kvm {
396 spinlock_t lock; /* protects everything except vcpus */ 396 struct mutex lock; /* protects everything except vcpus */
397 int naliases; 397 int naliases;
398 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 398 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
399 int nmemslots; 399 int nmemslots;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 6035e6d35417..7aeaaba79c54 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -363,7 +363,7 @@ static struct kvm *kvm_create_vm(void)
363 return ERR_PTR(-ENOMEM); 363 return ERR_PTR(-ENOMEM);
364 364
365 kvm_io_bus_init(&kvm->pio_bus); 365 kvm_io_bus_init(&kvm->pio_bus);
366 spin_lock_init(&kvm->lock); 366 mutex_init(&kvm->lock);
367 INIT_LIST_HEAD(&kvm->active_mmu_pages); 367 INIT_LIST_HEAD(&kvm->active_mmu_pages);
368 kvm_io_bus_init(&kvm->mmio_bus); 368 kvm_io_bus_init(&kvm->mmio_bus);
369 spin_lock(&kvm_lock); 369 spin_lock(&kvm_lock);
@@ -489,7 +489,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
489 struct page *page; 489 struct page *page;
490 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; 490 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
491 491
492 spin_lock(&vcpu->kvm->lock); 492 mutex_lock(&vcpu->kvm->lock);
493 page = gfn_to_page(vcpu->kvm, pdpt_gfn); 493 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
494 if (!page) { 494 if (!page) {
495 ret = 0; 495 ret = 0;
@@ -510,7 +510,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
510 510
511 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); 511 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
512out: 512out:
513 spin_unlock(&vcpu->kvm->lock); 513 mutex_unlock(&vcpu->kvm->lock);
514 514
515 return ret; 515 return ret;
516} 516}
@@ -570,9 +570,9 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
570 kvm_arch_ops->set_cr0(vcpu, cr0); 570 kvm_arch_ops->set_cr0(vcpu, cr0);
571 vcpu->cr0 = cr0; 571 vcpu->cr0 = cr0;
572 572
573 spin_lock(&vcpu->kvm->lock); 573 mutex_lock(&vcpu->kvm->lock);
574 kvm_mmu_reset_context(vcpu); 574 kvm_mmu_reset_context(vcpu);
575 spin_unlock(&vcpu->kvm->lock); 575 mutex_unlock(&vcpu->kvm->lock);
576 return; 576 return;
577} 577}
578EXPORT_SYMBOL_GPL(set_cr0); 578EXPORT_SYMBOL_GPL(set_cr0);
@@ -611,9 +611,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
611 return; 611 return;
612 } 612 }
613 kvm_arch_ops->set_cr4(vcpu, cr4); 613 kvm_arch_ops->set_cr4(vcpu, cr4);
614 spin_lock(&vcpu->kvm->lock); 614 mutex_lock(&vcpu->kvm->lock);
615 kvm_mmu_reset_context(vcpu); 615 kvm_mmu_reset_context(vcpu);
616 spin_unlock(&vcpu->kvm->lock); 616 mutex_unlock(&vcpu->kvm->lock);
617} 617}
618EXPORT_SYMBOL_GPL(set_cr4); 618EXPORT_SYMBOL_GPL(set_cr4);
619 619
@@ -650,7 +650,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
650 } 650 }
651 651
652 vcpu->cr3 = cr3; 652 vcpu->cr3 = cr3;
653 spin_lock(&vcpu->kvm->lock); 653 mutex_lock(&vcpu->kvm->lock);
654 /* 654 /*
655 * Does the new cr3 value map to physical memory? (Note, we 655 * Does the new cr3 value map to physical memory? (Note, we
656 * catch an invalid cr3 even in real-mode, because it would 656 * catch an invalid cr3 even in real-mode, because it would
@@ -664,7 +664,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
664 inject_gp(vcpu); 664 inject_gp(vcpu);
665 else 665 else
666 vcpu->mmu.new_cr3(vcpu); 666 vcpu->mmu.new_cr3(vcpu);
667 spin_unlock(&vcpu->kvm->lock); 667 mutex_unlock(&vcpu->kvm->lock);
668} 668}
669EXPORT_SYMBOL_GPL(set_cr3); 669EXPORT_SYMBOL_GPL(set_cr3);
670 670
@@ -741,7 +741,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
741 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 741 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
742 742
743raced: 743raced:
744 spin_lock(&kvm->lock); 744 mutex_lock(&kvm->lock);
745 745
746 memory_config_version = kvm->memory_config_version; 746 memory_config_version = kvm->memory_config_version;
747 new = old = *memslot; 747 new = old = *memslot;
@@ -770,7 +770,7 @@ raced:
770 * Do memory allocations outside lock. memory_config_version will 770 * Do memory allocations outside lock. memory_config_version will
771 * detect any races. 771 * detect any races.
772 */ 772 */
773 spin_unlock(&kvm->lock); 773 mutex_unlock(&kvm->lock);
774 774
775 /* Deallocate if slot is being removed */ 775 /* Deallocate if slot is being removed */
776 if (!npages) 776 if (!npages)
@@ -809,10 +809,10 @@ raced:
809 memset(new.dirty_bitmap, 0, dirty_bytes); 809 memset(new.dirty_bitmap, 0, dirty_bytes);
810 } 810 }
811 811
812 spin_lock(&kvm->lock); 812 mutex_lock(&kvm->lock);
813 813
814 if (memory_config_version != kvm->memory_config_version) { 814 if (memory_config_version != kvm->memory_config_version) {
815 spin_unlock(&kvm->lock); 815 mutex_unlock(&kvm->lock);
816 kvm_free_physmem_slot(&new, &old); 816 kvm_free_physmem_slot(&new, &old);
817 goto raced; 817 goto raced;
818 } 818 }
@@ -830,13 +830,13 @@ raced:
830 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 830 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
831 kvm_flush_remote_tlbs(kvm); 831 kvm_flush_remote_tlbs(kvm);
832 832
833 spin_unlock(&kvm->lock); 833 mutex_unlock(&kvm->lock);
834 834
835 kvm_free_physmem_slot(&old, &new); 835 kvm_free_physmem_slot(&old, &new);
836 return 0; 836 return 0;
837 837
838out_unlock: 838out_unlock:
839 spin_unlock(&kvm->lock); 839 mutex_unlock(&kvm->lock);
840out_free: 840out_free:
841 kvm_free_physmem_slot(&new, &old); 841 kvm_free_physmem_slot(&new, &old);
842out: 842out:
@@ -854,14 +854,14 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
854 int n; 854 int n;
855 unsigned long any = 0; 855 unsigned long any = 0;
856 856
857 spin_lock(&kvm->lock); 857 mutex_lock(&kvm->lock);
858 858
859 /* 859 /*
860 * Prevent changes to guest memory configuration even while the lock 860 * Prevent changes to guest memory configuration even while the lock
861 * is not taken. 861 * is not taken.
862 */ 862 */
863 ++kvm->busy; 863 ++kvm->busy;
864 spin_unlock(&kvm->lock); 864 mutex_unlock(&kvm->lock);
865 r = -EINVAL; 865 r = -EINVAL;
866 if (log->slot >= KVM_MEMORY_SLOTS) 866 if (log->slot >= KVM_MEMORY_SLOTS)
867 goto out; 867 goto out;
@@ -880,18 +880,18 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
880 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 880 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
881 goto out; 881 goto out;
882 882
883 spin_lock(&kvm->lock); 883 mutex_lock(&kvm->lock);
884 kvm_mmu_slot_remove_write_access(kvm, log->slot); 884 kvm_mmu_slot_remove_write_access(kvm, log->slot);
885 kvm_flush_remote_tlbs(kvm); 885 kvm_flush_remote_tlbs(kvm);
886 memset(memslot->dirty_bitmap, 0, n); 886 memset(memslot->dirty_bitmap, 0, n);
887 spin_unlock(&kvm->lock); 887 mutex_unlock(&kvm->lock);
888 888
889 r = 0; 889 r = 0;
890 890
891out: 891out:
892 spin_lock(&kvm->lock); 892 mutex_lock(&kvm->lock);
893 --kvm->busy; 893 --kvm->busy;
894 spin_unlock(&kvm->lock); 894 mutex_unlock(&kvm->lock);
895 return r; 895 return r;
896} 896}
897 897
@@ -921,7 +921,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
921 < alias->target_phys_addr) 921 < alias->target_phys_addr)
922 goto out; 922 goto out;
923 923
924 spin_lock(&kvm->lock); 924 mutex_lock(&kvm->lock);
925 925
926 p = &kvm->aliases[alias->slot]; 926 p = &kvm->aliases[alias->slot];
927 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; 927 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -935,7 +935,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
935 935
936 kvm_mmu_zap_all(kvm); 936 kvm_mmu_zap_all(kvm);
937 937
938 spin_unlock(&kvm->lock); 938 mutex_unlock(&kvm->lock);
939 939
940 return 0; 940 return 0;
941 941
@@ -1900,12 +1900,12 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1900 vcpu->pio.cur_count = now; 1900 vcpu->pio.cur_count = now;
1901 1901
1902 for (i = 0; i < nr_pages; ++i) { 1902 for (i = 0; i < nr_pages; ++i) {
1903 spin_lock(&vcpu->kvm->lock); 1903 mutex_lock(&vcpu->kvm->lock);
1904 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 1904 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1905 if (page) 1905 if (page)
1906 get_page(page); 1906 get_page(page);
1907 vcpu->pio.guest_pages[i] = page; 1907 vcpu->pio.guest_pages[i] = page;
1908 spin_unlock(&vcpu->kvm->lock); 1908 mutex_unlock(&vcpu->kvm->lock);
1909 if (!page) { 1909 if (!page) {
1910 inject_gp(vcpu); 1910 inject_gp(vcpu);
1911 free_pio_guest_pages(vcpu); 1911 free_pio_guest_pages(vcpu);
@@ -2298,13 +2298,13 @@ static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2298 gpa_t gpa; 2298 gpa_t gpa;
2299 2299
2300 vcpu_load(vcpu); 2300 vcpu_load(vcpu);
2301 spin_lock(&vcpu->kvm->lock); 2301 mutex_lock(&vcpu->kvm->lock);
2302 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 2302 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2303 tr->physical_address = gpa; 2303 tr->physical_address = gpa;
2304 tr->valid = gpa != UNMAPPED_GVA; 2304 tr->valid = gpa != UNMAPPED_GVA;
2305 tr->writeable = 1; 2305 tr->writeable = 1;
2306 tr->usermode = 0; 2306 tr->usermode = 0;
2307 spin_unlock(&vcpu->kvm->lock); 2307 mutex_unlock(&vcpu->kvm->lock);
2308 vcpu_put(vcpu); 2308 vcpu_put(vcpu);
2309 2309
2310 return 0; 2310 return 0;
@@ -2426,14 +2426,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2426 if (r < 0) 2426 if (r < 0)
2427 goto free_vcpu; 2427 goto free_vcpu;
2428 2428
2429 spin_lock(&kvm->lock); 2429 mutex_lock(&kvm->lock);
2430 if (kvm->vcpus[n]) { 2430 if (kvm->vcpus[n]) {
2431 r = -EEXIST; 2431 r = -EEXIST;
2432 spin_unlock(&kvm->lock); 2432 mutex_unlock(&kvm->lock);
2433 goto mmu_unload; 2433 goto mmu_unload;
2434 } 2434 }
2435 kvm->vcpus[n] = vcpu; 2435 kvm->vcpus[n] = vcpu;
2436 spin_unlock(&kvm->lock); 2436 mutex_unlock(&kvm->lock);
2437 2437
2438 /* Now it's all set up, let userspace reach it */ 2438 /* Now it's all set up, let userspace reach it */
2439 r = create_vcpu_fd(vcpu); 2439 r = create_vcpu_fd(vcpu);
@@ -2442,9 +2442,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2442 return r; 2442 return r;
2443 2443
2444unlink: 2444unlink:
2445 spin_lock(&kvm->lock); 2445 mutex_lock(&kvm->lock);
2446 kvm->vcpus[n] = NULL; 2446 kvm->vcpus[n] = NULL;
2447 spin_unlock(&kvm->lock); 2447 mutex_unlock(&kvm->lock);
2448 2448
2449mmu_unload: 2449mmu_unload:
2450 vcpu_load(vcpu); 2450 vcpu_load(vcpu);
@@ -2945,8 +2945,7 @@ static void decache_vcpus_on_cpu(int cpu)
2945 int i; 2945 int i;
2946 2946
2947 spin_lock(&kvm_lock); 2947 spin_lock(&kvm_lock);
2948 list_for_each_entry(vm, &vm_list, vm_list) { 2948 list_for_each_entry(vm, &vm_list, vm_list)
2949 spin_lock(&vm->lock);
2950 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 2949 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2951 vcpu = vm->vcpus[i]; 2950 vcpu = vm->vcpus[i];
2952 if (!vcpu) 2951 if (!vcpu)
@@ -2967,8 +2966,6 @@ static void decache_vcpus_on_cpu(int cpu)
2967 mutex_unlock(&vcpu->mutex); 2966 mutex_unlock(&vcpu->mutex);
2968 } 2967 }
2969 } 2968 }
2970 spin_unlock(&vm->lock);
2971 }
2972 spin_unlock(&kvm_lock); 2969 spin_unlock(&kvm_lock);
2973} 2970}
2974 2971
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 396c736e546b..e303b4137bfa 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -275,10 +275,9 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
275 r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT); 275 r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
276 kvm_mmu_free_some_pages(vcpu); 276 kvm_mmu_free_some_pages(vcpu);
277 if (r < 0) { 277 if (r < 0) {
278 spin_unlock(&vcpu->kvm->lock); 278 mutex_unlock(&vcpu->kvm->lock);
279 r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL); 279 r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
280 spin_lock(&vcpu->kvm->lock); 280 mutex_lock(&vcpu->kvm->lock);
281 kvm_mmu_free_some_pages(vcpu);
282 } 281 }
283 return r; 282 return r;
284} 283}
@@ -1069,7 +1068,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1069{ 1068{
1070 int r; 1069 int r;
1071 1070
1072 spin_lock(&vcpu->kvm->lock); 1071 mutex_lock(&vcpu->kvm->lock);
1073 r = mmu_topup_memory_caches(vcpu); 1072 r = mmu_topup_memory_caches(vcpu);
1074 if (r) 1073 if (r)
1075 goto out; 1074 goto out;
@@ -1077,7 +1076,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1077 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); 1076 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1078 kvm_mmu_flush_tlb(vcpu); 1077 kvm_mmu_flush_tlb(vcpu);
1079out: 1078out:
1080 spin_unlock(&vcpu->kvm->lock); 1079 mutex_unlock(&vcpu->kvm->lock);
1081 return r; 1080 return r;
1082} 1081}
1083EXPORT_SYMBOL_GPL(kvm_mmu_load); 1082EXPORT_SYMBOL_GPL(kvm_mmu_load);
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 3997bbd78fb7..9a840e08b207 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -941,21 +941,21 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
941 if (is_external_interrupt(exit_int_info)) 941 if (is_external_interrupt(exit_int_info))
942 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 942 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
943 943
944 spin_lock(&vcpu->kvm->lock); 944 mutex_lock(&vcpu->kvm->lock);
945 945
946 fault_address = svm->vmcb->control.exit_info_2; 946 fault_address = svm->vmcb->control.exit_info_2;
947 error_code = svm->vmcb->control.exit_info_1; 947 error_code = svm->vmcb->control.exit_info_1;
948 r = kvm_mmu_page_fault(vcpu, fault_address, error_code); 948 r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
949 if (r < 0) { 949 if (r < 0) {
950 spin_unlock(&vcpu->kvm->lock); 950 mutex_unlock(&vcpu->kvm->lock);
951 return r; 951 return r;
952 } 952 }
953 if (!r) { 953 if (!r) {
954 spin_unlock(&vcpu->kvm->lock); 954 mutex_unlock(&vcpu->kvm->lock);
955 return 1; 955 return 1;
956 } 956 }
957 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); 957 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
958 spin_unlock(&vcpu->kvm->lock); 958 mutex_unlock(&vcpu->kvm->lock);
959 959
960 switch (er) { 960 switch (er) {
961 case EMULATE_DONE: 961 case EMULATE_DONE:
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 8c87d20f8e39..5b77d9b7b1ac 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1711,19 +1711,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1711 if (is_page_fault(intr_info)) { 1711 if (is_page_fault(intr_info)) {
1712 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1712 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1713 1713
1714 spin_lock(&vcpu->kvm->lock); 1714 mutex_lock(&vcpu->kvm->lock);
1715 r = kvm_mmu_page_fault(vcpu, cr2, error_code); 1715 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1716 if (r < 0) { 1716 if (r < 0) {
1717 spin_unlock(&vcpu->kvm->lock); 1717 mutex_unlock(&vcpu->kvm->lock);
1718 return r; 1718 return r;
1719 } 1719 }
1720 if (!r) { 1720 if (!r) {
1721 spin_unlock(&vcpu->kvm->lock); 1721 mutex_unlock(&vcpu->kvm->lock);
1722 return 1; 1722 return 1;
1723 } 1723 }
1724 1724
1725 er = emulate_instruction(vcpu, kvm_run, cr2, error_code); 1725 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1726 spin_unlock(&vcpu->kvm->lock); 1726 mutex_unlock(&vcpu->kvm->lock);
1727 1727
1728 switch (er) { 1728 switch (er) {
1729 case EMULATE_DONE: 1729 case EMULATE_DONE: