diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-17 06:04:56 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-20 13:16:29 -0400 |
commit | 90cb0529dd230548a7f0d6b315997be854caea1b (patch) | |
tree | 31aad8e119781b7df846a8f8d8522a82ff3e4b25 /drivers/kvm/kvm_main.c | |
parent | d55e2cb20123cdb5020ec4a2b2f1eace5038c292 (diff) |
KVM: Fix memory slot management functions for guest smp
The memory slot management functions were oriented against vcpu 0, where
they should be kvm-wide. This causes hangs starting X on guest smp.
Fix by making the functions (and resultant tail in the mmu) non-vcpu-specific.
Unfortunately this reduces the efficiency of the mmu object cache a bit. We
may have to revisit this later.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 68 |
1 files changed, 9 insertions, 59 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 1b206f197c6b..05f0418f2195 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -238,23 +238,6 @@ static void vcpu_load(struct kvm_vcpu *vcpu) | |||
238 | kvm_arch_ops->vcpu_load(vcpu); | 238 | kvm_arch_ops->vcpu_load(vcpu); |
239 | } | 239 | } |
240 | 240 | ||
241 | /* | ||
242 | * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL | ||
243 | * if the slot is not populated. | ||
244 | */ | ||
245 | static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot) | ||
246 | { | ||
247 | struct kvm_vcpu *vcpu = &kvm->vcpus[slot]; | ||
248 | |||
249 | mutex_lock(&vcpu->mutex); | ||
250 | if (!vcpu->vmcs) { | ||
251 | mutex_unlock(&vcpu->mutex); | ||
252 | return NULL; | ||
253 | } | ||
254 | kvm_arch_ops->vcpu_load(vcpu); | ||
255 | return vcpu; | ||
256 | } | ||
257 | |||
258 | static void vcpu_put(struct kvm_vcpu *vcpu) | 241 | static void vcpu_put(struct kvm_vcpu *vcpu) |
259 | { | 242 | { |
260 | kvm_arch_ops->vcpu_put(vcpu); | 243 | kvm_arch_ops->vcpu_put(vcpu); |
@@ -663,13 +646,6 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
663 | } | 646 | } |
664 | EXPORT_SYMBOL_GPL(fx_init); | 647 | EXPORT_SYMBOL_GPL(fx_init); |
665 | 648 | ||
666 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) | ||
667 | { | ||
668 | spin_lock(&vcpu->kvm->lock); | ||
669 | kvm_mmu_slot_remove_write_access(vcpu, slot); | ||
670 | spin_unlock(&vcpu->kvm->lock); | ||
671 | } | ||
672 | |||
673 | /* | 649 | /* |
674 | * Allocate some memory and give it an address in the guest physical address | 650 | * Allocate some memory and give it an address in the guest physical address |
675 | * space. | 651 | * space. |
@@ -792,19 +768,10 @@ raced: | |||
792 | *memslot = new; | 768 | *memslot = new; |
793 | ++kvm->memory_config_version; | 769 | ++kvm->memory_config_version; |
794 | 770 | ||
795 | spin_unlock(&kvm->lock); | 771 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
796 | 772 | kvm_flush_remote_tlbs(kvm); | |
797 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
798 | struct kvm_vcpu *vcpu; | ||
799 | 773 | ||
800 | vcpu = vcpu_load_slot(kvm, i); | 774 | spin_unlock(&kvm->lock); |
801 | if (!vcpu) | ||
802 | continue; | ||
803 | if (new.flags & KVM_MEM_LOG_DIRTY_PAGES) | ||
804 | do_remove_write_access(vcpu, mem->slot); | ||
805 | kvm_mmu_reset_context(vcpu); | ||
806 | vcpu_put(vcpu); | ||
807 | } | ||
808 | 775 | ||
809 | kvm_free_physmem_slot(&old, &new); | 776 | kvm_free_physmem_slot(&old, &new); |
810 | return 0; | 777 | return 0; |
@@ -826,7 +793,6 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
826 | struct kvm_memory_slot *memslot; | 793 | struct kvm_memory_slot *memslot; |
827 | int r, i; | 794 | int r, i; |
828 | int n; | 795 | int n; |
829 | int cleared; | ||
830 | unsigned long any = 0; | 796 | unsigned long any = 0; |
831 | 797 | ||
832 | spin_lock(&kvm->lock); | 798 | spin_lock(&kvm->lock); |
@@ -855,23 +821,11 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
855 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | 821 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
856 | goto out; | 822 | goto out; |
857 | 823 | ||
858 | if (any) { | 824 | spin_lock(&kvm->lock); |
859 | cleared = 0; | 825 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
860 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 826 | kvm_flush_remote_tlbs(kvm); |
861 | struct kvm_vcpu *vcpu; | 827 | memset(memslot->dirty_bitmap, 0, n); |
862 | 828 | spin_unlock(&kvm->lock); | |
863 | vcpu = vcpu_load_slot(kvm, i); | ||
864 | if (!vcpu) | ||
865 | continue; | ||
866 | if (!cleared) { | ||
867 | do_remove_write_access(vcpu, log->slot); | ||
868 | memset(memslot->dirty_bitmap, 0, n); | ||
869 | cleared = 1; | ||
870 | } | ||
871 | kvm_arch_ops->tlb_flush(vcpu); | ||
872 | vcpu_put(vcpu); | ||
873 | } | ||
874 | } | ||
875 | 829 | ||
876 | r = 0; | 830 | r = 0; |
877 | 831 | ||
@@ -920,13 +874,9 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
920 | break; | 874 | break; |
921 | kvm->naliases = n; | 875 | kvm->naliases = n; |
922 | 876 | ||
923 | spin_unlock(&kvm->lock); | 877 | kvm_mmu_zap_all(kvm); |
924 | 878 | ||
925 | vcpu_load(&kvm->vcpus[0]); | ||
926 | spin_lock(&kvm->lock); | ||
927 | kvm_mmu_zap_all(&kvm->vcpus[0]); | ||
928 | spin_unlock(&kvm->lock); | 879 | spin_unlock(&kvm->lock); |
929 | vcpu_put(&kvm->vcpus[0]); | ||
930 | 880 | ||
931 | return 0; | 881 | return 0; |
932 | 882 | ||