diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2015-05-17 11:30:37 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-06-05 11:26:35 -0400 |
| commit | f481b069e674378758c73761827e83ab05c46b52 (patch) | |
| tree | 5f6ee0f8820e320f127fb7a23dd3ba28d5965302 /virt | |
| parent | 8e73485c7959fd25650761eab04db1e72ea14c23 (diff) | |
KVM: implement multiple address spaces
Only two ioctls have to be modified; the address space id is
placed in the higher 16 bits of their slot id argument.
As of this patch, no architecture defines more than one
address space; x86 will be the first.
Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/kvm_main.c | 70 |
1 files changed, 43 insertions, 27 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3a121cedcc77..848af90b8091 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -518,9 +518,11 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
| 518 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); | 518 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| 519 | 519 | ||
| 520 | r = -ENOMEM; | 520 | r = -ENOMEM; |
| 521 | kvm->memslots = kvm_alloc_memslots(); | 521 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 522 | if (!kvm->memslots) | 522 | kvm->memslots[i] = kvm_alloc_memslots(); |
| 523 | goto out_err_no_srcu; | 523 | if (!kvm->memslots[i]) |
| 524 | goto out_err_no_srcu; | ||
| 525 | } | ||
| 524 | 526 | ||
| 525 | if (init_srcu_struct(&kvm->srcu)) | 527 | if (init_srcu_struct(&kvm->srcu)) |
| 526 | goto out_err_no_srcu; | 528 | goto out_err_no_srcu; |
| @@ -562,7 +564,8 @@ out_err_no_srcu: | |||
| 562 | out_err_no_disable: | 564 | out_err_no_disable: |
| 563 | for (i = 0; i < KVM_NR_BUSES; i++) | 565 | for (i = 0; i < KVM_NR_BUSES; i++) |
| 564 | kfree(kvm->buses[i]); | 566 | kfree(kvm->buses[i]); |
| 565 | kvm_free_memslots(kvm, kvm->memslots); | 567 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
| 568 | kvm_free_memslots(kvm, kvm->memslots[i]); | ||
| 566 | kvm_arch_free_vm(kvm); | 569 | kvm_arch_free_vm(kvm); |
| 567 | return ERR_PTR(r); | 570 | return ERR_PTR(r); |
| 568 | } | 571 | } |
| @@ -612,7 +615,8 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
| 612 | #endif | 615 | #endif |
| 613 | kvm_arch_destroy_vm(kvm); | 616 | kvm_arch_destroy_vm(kvm); |
| 614 | kvm_destroy_devices(kvm); | 617 | kvm_destroy_devices(kvm); |
| 615 | kvm_free_memslots(kvm, kvm->memslots); | 618 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
| 619 | kvm_free_memslots(kvm, kvm->memslots[i]); | ||
| 616 | cleanup_srcu_struct(&kvm->irq_srcu); | 620 | cleanup_srcu_struct(&kvm->irq_srcu); |
| 617 | cleanup_srcu_struct(&kvm->srcu); | 621 | cleanup_srcu_struct(&kvm->srcu); |
| 618 | kvm_arch_free_vm(kvm); | 622 | kvm_arch_free_vm(kvm); |
| @@ -729,9 +733,9 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m | |||
| 729 | } | 733 | } |
| 730 | 734 | ||
| 731 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | 735 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, |
| 732 | struct kvm_memslots *slots) | 736 | int as_id, struct kvm_memslots *slots) |
| 733 | { | 737 | { |
| 734 | struct kvm_memslots *old_memslots = kvm_memslots(kvm); | 738 | struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); |
| 735 | 739 | ||
| 736 | /* | 740 | /* |
| 737 | * Set the low bit in the generation, which disables SPTE caching | 741 | * Set the low bit in the generation, which disables SPTE caching |
| @@ -740,7 +744,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | |||
| 740 | WARN_ON(old_memslots->generation & 1); | 744 | WARN_ON(old_memslots->generation & 1); |
| 741 | slots->generation = old_memslots->generation + 1; | 745 | slots->generation = old_memslots->generation + 1; |
| 742 | 746 | ||
| 743 | rcu_assign_pointer(kvm->memslots, slots); | 747 | rcu_assign_pointer(kvm->memslots[as_id], slots); |
| 744 | synchronize_srcu_expedited(&kvm->srcu); | 748 | synchronize_srcu_expedited(&kvm->srcu); |
| 745 | 749 | ||
| 746 | /* | 750 | /* |
| @@ -772,6 +776,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 772 | struct kvm_memory_slot *slot; | 776 | struct kvm_memory_slot *slot; |
| 773 | struct kvm_memory_slot old, new; | 777 | struct kvm_memory_slot old, new; |
| 774 | struct kvm_memslots *slots = NULL, *old_memslots; | 778 | struct kvm_memslots *slots = NULL, *old_memslots; |
| 779 | int as_id, id; | ||
| 775 | enum kvm_mr_change change; | 780 | enum kvm_mr_change change; |
| 776 | 781 | ||
| 777 | r = check_memory_region_flags(mem); | 782 | r = check_memory_region_flags(mem); |
| @@ -779,24 +784,27 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 779 | goto out; | 784 | goto out; |
| 780 | 785 | ||
| 781 | r = -EINVAL; | 786 | r = -EINVAL; |
| 787 | as_id = mem->slot >> 16; | ||
| 788 | id = (u16)mem->slot; | ||
| 789 | |||
| 782 | /* General sanity checks */ | 790 | /* General sanity checks */ |
| 783 | if (mem->memory_size & (PAGE_SIZE - 1)) | 791 | if (mem->memory_size & (PAGE_SIZE - 1)) |
| 784 | goto out; | 792 | goto out; |
| 785 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 793 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
| 786 | goto out; | 794 | goto out; |
| 787 | /* We can read the guest memory with __xxx_user() later on. */ | 795 | /* We can read the guest memory with __xxx_user() later on. */ |
| 788 | if ((mem->slot < KVM_USER_MEM_SLOTS) && | 796 | if ((id < KVM_USER_MEM_SLOTS) && |
| 789 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || | 797 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
| 790 | !access_ok(VERIFY_WRITE, | 798 | !access_ok(VERIFY_WRITE, |
| 791 | (void __user *)(unsigned long)mem->userspace_addr, | 799 | (void __user *)(unsigned long)mem->userspace_addr, |
| 792 | mem->memory_size))) | 800 | mem->memory_size))) |
| 793 | goto out; | 801 | goto out; |
| 794 | if (mem->slot >= KVM_MEM_SLOTS_NUM) | 802 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
| 795 | goto out; | 803 | goto out; |
| 796 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | 804 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
| 797 | goto out; | 805 | goto out; |
| 798 | 806 | ||
| 799 | slot = id_to_memslot(kvm_memslots(kvm), mem->slot); | 807 | slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); |
| 800 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | 808 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
| 801 | npages = mem->memory_size >> PAGE_SHIFT; | 809 | npages = mem->memory_size >> PAGE_SHIFT; |
| 802 | 810 | ||
| @@ -805,7 +813,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 805 | 813 | ||
| 806 | new = old = *slot; | 814 | new = old = *slot; |
| 807 | 815 | ||
| 808 | new.id = mem->slot; | 816 | new.id = id; |
| 809 | new.base_gfn = base_gfn; | 817 | new.base_gfn = base_gfn; |
| 810 | new.npages = npages; | 818 | new.npages = npages; |
| 811 | new.flags = mem->flags; | 819 | new.flags = mem->flags; |
| @@ -840,9 +848,9 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 840 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { | 848 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { |
| 841 | /* Check for overlaps */ | 849 | /* Check for overlaps */ |
| 842 | r = -EEXIST; | 850 | r = -EEXIST; |
| 843 | kvm_for_each_memslot(slot, kvm_memslots(kvm)) { | 851 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { |
| 844 | if ((slot->id >= KVM_USER_MEM_SLOTS) || | 852 | if ((slot->id >= KVM_USER_MEM_SLOTS) || |
| 845 | (slot->id == mem->slot)) | 853 | (slot->id == id)) |
| 846 | continue; | 854 | continue; |
| 847 | if (!((base_gfn + npages <= slot->base_gfn) || | 855 | if (!((base_gfn + npages <= slot->base_gfn) || |
| 848 | (base_gfn >= slot->base_gfn + slot->npages))) | 856 | (base_gfn >= slot->base_gfn + slot->npages))) |
| @@ -871,13 +879,13 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 871 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); | 879 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
| 872 | if (!slots) | 880 | if (!slots) |
| 873 | goto out_free; | 881 | goto out_free; |
| 874 | memcpy(slots, kvm_memslots(kvm), sizeof(struct kvm_memslots)); | 882 | memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); |
| 875 | 883 | ||
| 876 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { | 884 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { |
| 877 | slot = id_to_memslot(slots, mem->slot); | 885 | slot = id_to_memslot(slots, id); |
| 878 | slot->flags |= KVM_MEMSLOT_INVALID; | 886 | slot->flags |= KVM_MEMSLOT_INVALID; |
| 879 | 887 | ||
| 880 | old_memslots = install_new_memslots(kvm, slots); | 888 | old_memslots = install_new_memslots(kvm, as_id, slots); |
| 881 | 889 | ||
| 882 | /* slot was deleted or moved, clear iommu mapping */ | 890 | /* slot was deleted or moved, clear iommu mapping */ |
| 883 | kvm_iommu_unmap_pages(kvm, &old); | 891 | kvm_iommu_unmap_pages(kvm, &old); |
| @@ -909,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 909 | } | 917 | } |
| 910 | 918 | ||
| 911 | update_memslots(slots, &new); | 919 | update_memslots(slots, &new); |
| 912 | old_memslots = install_new_memslots(kvm, slots); | 920 | old_memslots = install_new_memslots(kvm, as_id, slots); |
| 913 | 921 | ||
| 914 | kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); | 922 | kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); |
| 915 | 923 | ||
| @@ -956,7 +964,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); | |||
| 956 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 964 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| 957 | struct kvm_userspace_memory_region *mem) | 965 | struct kvm_userspace_memory_region *mem) |
| 958 | { | 966 | { |
| 959 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 967 | if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
| 960 | return -EINVAL; | 968 | return -EINVAL; |
| 961 | 969 | ||
| 962 | return kvm_set_memory_region(kvm, mem); | 970 | return kvm_set_memory_region(kvm, mem); |
| @@ -967,16 +975,18 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
| 967 | { | 975 | { |
| 968 | struct kvm_memslots *slots; | 976 | struct kvm_memslots *slots; |
| 969 | struct kvm_memory_slot *memslot; | 977 | struct kvm_memory_slot *memslot; |
| 970 | int r, i; | 978 | int r, i, as_id, id; |
| 971 | unsigned long n; | 979 | unsigned long n; |
| 972 | unsigned long any = 0; | 980 | unsigned long any = 0; |
| 973 | 981 | ||
| 974 | r = -EINVAL; | 982 | r = -EINVAL; |
| 975 | if (log->slot >= KVM_USER_MEM_SLOTS) | 983 | as_id = log->slot >> 16; |
| 984 | id = (u16)log->slot; | ||
| 985 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) | ||
| 976 | goto out; | 986 | goto out; |
| 977 | 987 | ||
| 978 | slots = kvm_memslots(kvm); | 988 | slots = __kvm_memslots(kvm, as_id); |
| 979 | memslot = id_to_memslot(slots, log->slot); | 989 | memslot = id_to_memslot(slots, id); |
| 980 | r = -ENOENT; | 990 | r = -ENOENT; |
| 981 | if (!memslot->dirty_bitmap) | 991 | if (!memslot->dirty_bitmap) |
| 982 | goto out; | 992 | goto out; |
| @@ -1027,17 +1037,19 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, | |||
| 1027 | { | 1037 | { |
| 1028 | struct kvm_memslots *slots; | 1038 | struct kvm_memslots *slots; |
| 1029 | struct kvm_memory_slot *memslot; | 1039 | struct kvm_memory_slot *memslot; |
| 1030 | int r, i; | 1040 | int r, i, as_id, id; |
| 1031 | unsigned long n; | 1041 | unsigned long n; |
| 1032 | unsigned long *dirty_bitmap; | 1042 | unsigned long *dirty_bitmap; |
| 1033 | unsigned long *dirty_bitmap_buffer; | 1043 | unsigned long *dirty_bitmap_buffer; |
| 1034 | 1044 | ||
| 1035 | r = -EINVAL; | 1045 | r = -EINVAL; |
| 1036 | if (log->slot >= KVM_USER_MEM_SLOTS) | 1046 | as_id = log->slot >> 16; |
| 1047 | id = (u16)log->slot; | ||
| 1048 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) | ||
| 1037 | goto out; | 1049 | goto out; |
| 1038 | 1050 | ||
| 1039 | slots = kvm_memslots(kvm); | 1051 | slots = __kvm_memslots(kvm, as_id); |
| 1040 | memslot = id_to_memslot(slots, log->slot); | 1052 | memslot = id_to_memslot(slots, id); |
| 1041 | 1053 | ||
| 1042 | dirty_bitmap = memslot->dirty_bitmap; | 1054 | dirty_bitmap = memslot->dirty_bitmap; |
| 1043 | r = -ENOENT; | 1055 | r = -ENOENT; |
| @@ -2620,6 +2632,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) | |||
| 2620 | case KVM_CAP_IRQ_ROUTING: | 2632 | case KVM_CAP_IRQ_ROUTING: |
| 2621 | return KVM_MAX_IRQ_ROUTES; | 2633 | return KVM_MAX_IRQ_ROUTES; |
| 2622 | #endif | 2634 | #endif |
| 2635 | #if KVM_ADDRESS_SPACE_NUM > 1 | ||
| 2636 | case KVM_CAP_MULTI_ADDRESS_SPACE: | ||
| 2637 | return KVM_ADDRESS_SPACE_NUM; | ||
| 2638 | #endif | ||
| 2623 | default: | 2639 | default: |
| 2624 | break; | 2640 | break; |
| 2625 | } | 2641 | } |
