diff options
author | Shaohua Li <shaohua.li@intel.com> | 2007-07-23 02:51:37 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:20 -0400 |
commit | 11ec2804711896546ee3c945f3786c7f9fdd175a (patch) | |
tree | 1e26102931efb8b0b48c440887577d21170ef94a /drivers/kvm/kvm_main.c | |
parent | 15ad71460d75fd7ca41bb248a2310f3f39b302ba (diff) |
KVM: Convert vm lock to a mutex
This allows the kvm mmu to perform sleepy operations, such as memory
allocation.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 69 |
1 files changed, 33 insertions, 36 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 6035e6d35417..7aeaaba79c54 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -363,7 +363,7 @@ static struct kvm *kvm_create_vm(void) | |||
363 | return ERR_PTR(-ENOMEM); | 363 | return ERR_PTR(-ENOMEM); |
364 | 364 | ||
365 | kvm_io_bus_init(&kvm->pio_bus); | 365 | kvm_io_bus_init(&kvm->pio_bus); |
366 | spin_lock_init(&kvm->lock); | 366 | mutex_init(&kvm->lock); |
367 | INIT_LIST_HEAD(&kvm->active_mmu_pages); | 367 | INIT_LIST_HEAD(&kvm->active_mmu_pages); |
368 | kvm_io_bus_init(&kvm->mmio_bus); | 368 | kvm_io_bus_init(&kvm->mmio_bus); |
369 | spin_lock(&kvm_lock); | 369 | spin_lock(&kvm_lock); |
@@ -489,7 +489,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
489 | struct page *page; | 489 | struct page *page; |
490 | u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; | 490 | u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; |
491 | 491 | ||
492 | spin_lock(&vcpu->kvm->lock); | 492 | mutex_lock(&vcpu->kvm->lock); |
493 | page = gfn_to_page(vcpu->kvm, pdpt_gfn); | 493 | page = gfn_to_page(vcpu->kvm, pdpt_gfn); |
494 | if (!page) { | 494 | if (!page) { |
495 | ret = 0; | 495 | ret = 0; |
@@ -510,7 +510,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
510 | 510 | ||
511 | memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); | 511 | memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); |
512 | out: | 512 | out: |
513 | spin_unlock(&vcpu->kvm->lock); | 513 | mutex_unlock(&vcpu->kvm->lock); |
514 | 514 | ||
515 | return ret; | 515 | return ret; |
516 | } | 516 | } |
@@ -570,9 +570,9 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
570 | kvm_arch_ops->set_cr0(vcpu, cr0); | 570 | kvm_arch_ops->set_cr0(vcpu, cr0); |
571 | vcpu->cr0 = cr0; | 571 | vcpu->cr0 = cr0; |
572 | 572 | ||
573 | spin_lock(&vcpu->kvm->lock); | 573 | mutex_lock(&vcpu->kvm->lock); |
574 | kvm_mmu_reset_context(vcpu); | 574 | kvm_mmu_reset_context(vcpu); |
575 | spin_unlock(&vcpu->kvm->lock); | 575 | mutex_unlock(&vcpu->kvm->lock); |
576 | return; | 576 | return; |
577 | } | 577 | } |
578 | EXPORT_SYMBOL_GPL(set_cr0); | 578 | EXPORT_SYMBOL_GPL(set_cr0); |
@@ -611,9 +611,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
611 | return; | 611 | return; |
612 | } | 612 | } |
613 | kvm_arch_ops->set_cr4(vcpu, cr4); | 613 | kvm_arch_ops->set_cr4(vcpu, cr4); |
614 | spin_lock(&vcpu->kvm->lock); | 614 | mutex_lock(&vcpu->kvm->lock); |
615 | kvm_mmu_reset_context(vcpu); | 615 | kvm_mmu_reset_context(vcpu); |
616 | spin_unlock(&vcpu->kvm->lock); | 616 | mutex_unlock(&vcpu->kvm->lock); |
617 | } | 617 | } |
618 | EXPORT_SYMBOL_GPL(set_cr4); | 618 | EXPORT_SYMBOL_GPL(set_cr4); |
619 | 619 | ||
@@ -650,7 +650,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
650 | } | 650 | } |
651 | 651 | ||
652 | vcpu->cr3 = cr3; | 652 | vcpu->cr3 = cr3; |
653 | spin_lock(&vcpu->kvm->lock); | 653 | mutex_lock(&vcpu->kvm->lock); |
654 | /* | 654 | /* |
655 | * Does the new cr3 value map to physical memory? (Note, we | 655 | * Does the new cr3 value map to physical memory? (Note, we |
656 | * catch an invalid cr3 even in real-mode, because it would | 656 | * catch an invalid cr3 even in real-mode, because it would |
@@ -664,7 +664,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
664 | inject_gp(vcpu); | 664 | inject_gp(vcpu); |
665 | else | 665 | else |
666 | vcpu->mmu.new_cr3(vcpu); | 666 | vcpu->mmu.new_cr3(vcpu); |
667 | spin_unlock(&vcpu->kvm->lock); | 667 | mutex_unlock(&vcpu->kvm->lock); |
668 | } | 668 | } |
669 | EXPORT_SYMBOL_GPL(set_cr3); | 669 | EXPORT_SYMBOL_GPL(set_cr3); |
670 | 670 | ||
@@ -741,7 +741,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | |||
741 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | 741 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; |
742 | 742 | ||
743 | raced: | 743 | raced: |
744 | spin_lock(&kvm->lock); | 744 | mutex_lock(&kvm->lock); |
745 | 745 | ||
746 | memory_config_version = kvm->memory_config_version; | 746 | memory_config_version = kvm->memory_config_version; |
747 | new = old = *memslot; | 747 | new = old = *memslot; |
@@ -770,7 +770,7 @@ raced: | |||
770 | * Do memory allocations outside lock. memory_config_version will | 770 | * Do memory allocations outside lock. memory_config_version will |
771 | * detect any races. | 771 | * detect any races. |
772 | */ | 772 | */ |
773 | spin_unlock(&kvm->lock); | 773 | mutex_unlock(&kvm->lock); |
774 | 774 | ||
775 | /* Deallocate if slot is being removed */ | 775 | /* Deallocate if slot is being removed */ |
776 | if (!npages) | 776 | if (!npages) |
@@ -809,10 +809,10 @@ raced: | |||
809 | memset(new.dirty_bitmap, 0, dirty_bytes); | 809 | memset(new.dirty_bitmap, 0, dirty_bytes); |
810 | } | 810 | } |
811 | 811 | ||
812 | spin_lock(&kvm->lock); | 812 | mutex_lock(&kvm->lock); |
813 | 813 | ||
814 | if (memory_config_version != kvm->memory_config_version) { | 814 | if (memory_config_version != kvm->memory_config_version) { |
815 | spin_unlock(&kvm->lock); | 815 | mutex_unlock(&kvm->lock); |
816 | kvm_free_physmem_slot(&new, &old); | 816 | kvm_free_physmem_slot(&new, &old); |
817 | goto raced; | 817 | goto raced; |
818 | } | 818 | } |
@@ -830,13 +830,13 @@ raced: | |||
830 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 830 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
831 | kvm_flush_remote_tlbs(kvm); | 831 | kvm_flush_remote_tlbs(kvm); |
832 | 832 | ||
833 | spin_unlock(&kvm->lock); | 833 | mutex_unlock(&kvm->lock); |
834 | 834 | ||
835 | kvm_free_physmem_slot(&old, &new); | 835 | kvm_free_physmem_slot(&old, &new); |
836 | return 0; | 836 | return 0; |
837 | 837 | ||
838 | out_unlock: | 838 | out_unlock: |
839 | spin_unlock(&kvm->lock); | 839 | mutex_unlock(&kvm->lock); |
840 | out_free: | 840 | out_free: |
841 | kvm_free_physmem_slot(&new, &old); | 841 | kvm_free_physmem_slot(&new, &old); |
842 | out: | 842 | out: |
@@ -854,14 +854,14 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
854 | int n; | 854 | int n; |
855 | unsigned long any = 0; | 855 | unsigned long any = 0; |
856 | 856 | ||
857 | spin_lock(&kvm->lock); | 857 | mutex_lock(&kvm->lock); |
858 | 858 | ||
859 | /* | 859 | /* |
860 | * Prevent changes to guest memory configuration even while the lock | 860 | * Prevent changes to guest memory configuration even while the lock |
861 | * is not taken. | 861 | * is not taken. |
862 | */ | 862 | */ |
863 | ++kvm->busy; | 863 | ++kvm->busy; |
864 | spin_unlock(&kvm->lock); | 864 | mutex_unlock(&kvm->lock); |
865 | r = -EINVAL; | 865 | r = -EINVAL; |
866 | if (log->slot >= KVM_MEMORY_SLOTS) | 866 | if (log->slot >= KVM_MEMORY_SLOTS) |
867 | goto out; | 867 | goto out; |
@@ -880,18 +880,18 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
880 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | 880 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
881 | goto out; | 881 | goto out; |
882 | 882 | ||
883 | spin_lock(&kvm->lock); | 883 | mutex_lock(&kvm->lock); |
884 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | 884 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
885 | kvm_flush_remote_tlbs(kvm); | 885 | kvm_flush_remote_tlbs(kvm); |
886 | memset(memslot->dirty_bitmap, 0, n); | 886 | memset(memslot->dirty_bitmap, 0, n); |
887 | spin_unlock(&kvm->lock); | 887 | mutex_unlock(&kvm->lock); |
888 | 888 | ||
889 | r = 0; | 889 | r = 0; |
890 | 890 | ||
891 | out: | 891 | out: |
892 | spin_lock(&kvm->lock); | 892 | mutex_lock(&kvm->lock); |
893 | --kvm->busy; | 893 | --kvm->busy; |
894 | spin_unlock(&kvm->lock); | 894 | mutex_unlock(&kvm->lock); |
895 | return r; | 895 | return r; |
896 | } | 896 | } |
897 | 897 | ||
@@ -921,7 +921,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
921 | < alias->target_phys_addr) | 921 | < alias->target_phys_addr) |
922 | goto out; | 922 | goto out; |
923 | 923 | ||
924 | spin_lock(&kvm->lock); | 924 | mutex_lock(&kvm->lock); |
925 | 925 | ||
926 | p = &kvm->aliases[alias->slot]; | 926 | p = &kvm->aliases[alias->slot]; |
927 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | 927 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; |
@@ -935,7 +935,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
935 | 935 | ||
936 | kvm_mmu_zap_all(kvm); | 936 | kvm_mmu_zap_all(kvm); |
937 | 937 | ||
938 | spin_unlock(&kvm->lock); | 938 | mutex_unlock(&kvm->lock); |
939 | 939 | ||
940 | return 0; | 940 | return 0; |
941 | 941 | ||
@@ -1900,12 +1900,12 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
1900 | vcpu->pio.cur_count = now; | 1900 | vcpu->pio.cur_count = now; |
1901 | 1901 | ||
1902 | for (i = 0; i < nr_pages; ++i) { | 1902 | for (i = 0; i < nr_pages; ++i) { |
1903 | spin_lock(&vcpu->kvm->lock); | 1903 | mutex_lock(&vcpu->kvm->lock); |
1904 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); | 1904 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); |
1905 | if (page) | 1905 | if (page) |
1906 | get_page(page); | 1906 | get_page(page); |
1907 | vcpu->pio.guest_pages[i] = page; | 1907 | vcpu->pio.guest_pages[i] = page; |
1908 | spin_unlock(&vcpu->kvm->lock); | 1908 | mutex_unlock(&vcpu->kvm->lock); |
1909 | if (!page) { | 1909 | if (!page) { |
1910 | inject_gp(vcpu); | 1910 | inject_gp(vcpu); |
1911 | free_pio_guest_pages(vcpu); | 1911 | free_pio_guest_pages(vcpu); |
@@ -2298,13 +2298,13 @@ static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
2298 | gpa_t gpa; | 2298 | gpa_t gpa; |
2299 | 2299 | ||
2300 | vcpu_load(vcpu); | 2300 | vcpu_load(vcpu); |
2301 | spin_lock(&vcpu->kvm->lock); | 2301 | mutex_lock(&vcpu->kvm->lock); |
2302 | gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); | 2302 | gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); |
2303 | tr->physical_address = gpa; | 2303 | tr->physical_address = gpa; |
2304 | tr->valid = gpa != UNMAPPED_GVA; | 2304 | tr->valid = gpa != UNMAPPED_GVA; |
2305 | tr->writeable = 1; | 2305 | tr->writeable = 1; |
2306 | tr->usermode = 0; | 2306 | tr->usermode = 0; |
2307 | spin_unlock(&vcpu->kvm->lock); | 2307 | mutex_unlock(&vcpu->kvm->lock); |
2308 | vcpu_put(vcpu); | 2308 | vcpu_put(vcpu); |
2309 | 2309 | ||
2310 | return 0; | 2310 | return 0; |
@@ -2426,14 +2426,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
2426 | if (r < 0) | 2426 | if (r < 0) |
2427 | goto free_vcpu; | 2427 | goto free_vcpu; |
2428 | 2428 | ||
2429 | spin_lock(&kvm->lock); | 2429 | mutex_lock(&kvm->lock); |
2430 | if (kvm->vcpus[n]) { | 2430 | if (kvm->vcpus[n]) { |
2431 | r = -EEXIST; | 2431 | r = -EEXIST; |
2432 | spin_unlock(&kvm->lock); | 2432 | mutex_unlock(&kvm->lock); |
2433 | goto mmu_unload; | 2433 | goto mmu_unload; |
2434 | } | 2434 | } |
2435 | kvm->vcpus[n] = vcpu; | 2435 | kvm->vcpus[n] = vcpu; |
2436 | spin_unlock(&kvm->lock); | 2436 | mutex_unlock(&kvm->lock); |
2437 | 2437 | ||
2438 | /* Now it's all set up, let userspace reach it */ | 2438 | /* Now it's all set up, let userspace reach it */ |
2439 | r = create_vcpu_fd(vcpu); | 2439 | r = create_vcpu_fd(vcpu); |
@@ -2442,9 +2442,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
2442 | return r; | 2442 | return r; |
2443 | 2443 | ||
2444 | unlink: | 2444 | unlink: |
2445 | spin_lock(&kvm->lock); | 2445 | mutex_lock(&kvm->lock); |
2446 | kvm->vcpus[n] = NULL; | 2446 | kvm->vcpus[n] = NULL; |
2447 | spin_unlock(&kvm->lock); | 2447 | mutex_unlock(&kvm->lock); |
2448 | 2448 | ||
2449 | mmu_unload: | 2449 | mmu_unload: |
2450 | vcpu_load(vcpu); | 2450 | vcpu_load(vcpu); |
@@ -2945,8 +2945,7 @@ static void decache_vcpus_on_cpu(int cpu) | |||
2945 | int i; | 2945 | int i; |
2946 | 2946 | ||
2947 | spin_lock(&kvm_lock); | 2947 | spin_lock(&kvm_lock); |
2948 | list_for_each_entry(vm, &vm_list, vm_list) { | 2948 | list_for_each_entry(vm, &vm_list, vm_list) |
2949 | spin_lock(&vm->lock); | ||
2950 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 2949 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
2951 | vcpu = vm->vcpus[i]; | 2950 | vcpu = vm->vcpus[i]; |
2952 | if (!vcpu) | 2951 | if (!vcpu) |
@@ -2967,8 +2966,6 @@ static void decache_vcpus_on_cpu(int cpu) | |||
2967 | mutex_unlock(&vcpu->mutex); | 2966 | mutex_unlock(&vcpu->mutex); |
2968 | } | 2967 | } |
2969 | } | 2968 | } |
2970 | spin_unlock(&vm->lock); | ||
2971 | } | ||
2972 | spin_unlock(&kvm_lock); | 2969 | spin_unlock(&kvm_lock); |
2973 | } | 2970 | } |
2974 | 2971 | ||