diff options
-rw-r--r-- | Documentation/virtual/kvm/api.txt | 12 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 2 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 26 | ||||
-rw-r--r-- | include/uapi/linux/kvm.h | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 70 |
5 files changed, 79 insertions, 32 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 2ddefd58b1aa..461956a0ee8e 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -254,6 +254,11 @@ since the last call to this ioctl. Bit 0 is the first page in the | |||
254 | memory slot. Ensure the entire structure is cleared to avoid padding | 254 | memory slot. Ensure the entire structure is cleared to avoid padding |
255 | issues. | 255 | issues. |
256 | 256 | ||
257 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies | ||
258 | the address space for which you want to return the dirty bitmap. | ||
259 | They must be less than the value that KVM_CHECK_EXTENSION returns for | ||
260 | the KVM_CAP_MULTI_ADDRESS_SPACE capability. | ||
261 | |||
257 | 262 | ||
258 | 4.9 KVM_SET_MEMORY_ALIAS | 263 | 4.9 KVM_SET_MEMORY_ALIAS |
259 | 264 | ||
@@ -924,6 +929,13 @@ slot. When changing an existing slot, it may be moved in the guest | |||
924 | physical memory space, or its flags may be modified. It may not be | 929 | physical memory space, or its flags may be modified. It may not be |
925 | resized. Slots may not overlap in guest physical address space. | 930 | resized. Slots may not overlap in guest physical address space. |
926 | 931 | ||
932 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" | ||
933 | specifies the address space which is being modified. They must be | ||
934 | less than the value that KVM_CHECK_EXTENSION returns for the | ||
935 | KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces | ||
936 | are unrelated; the restriction on overlapping slots only applies within | ||
937 | each address space. | ||
938 | |||
927 | Memory for the region is taken starting at the address denoted by the | 939 | Memory for the region is taken starting at the address denoted by the |
928 | field userspace_addr, which must point at user addressable memory for | 940 | field userspace_addr, which must point at user addressable memory for |
929 | the entire memory slot size. Any object may back this memory, including | 941 | the entire memory slot size. Any object may back this memory, including |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 3536d12eb798..2aa79c864e91 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -430,7 +430,7 @@ static inline void note_hpte_modification(struct kvm *kvm, | |||
430 | */ | 430 | */ |
431 | static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) | 431 | static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) |
432 | { | 432 | { |
433 | return rcu_dereference_raw_notrace(kvm->memslots); | 433 | return rcu_dereference_raw_notrace(kvm->memslots[0]); |
434 | } | 434 | } |
435 | 435 | ||
436 | extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); | 436 | extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ba1ea43998e4..9564fd78c547 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -44,6 +44,10 @@ | |||
44 | /* Two fragments for cross MMIO pages. */ | 44 | /* Two fragments for cross MMIO pages. */ |
45 | #define KVM_MAX_MMIO_FRAGMENTS 2 | 45 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
46 | 46 | ||
47 | #ifndef KVM_ADDRESS_SPACE_NUM | ||
48 | #define KVM_ADDRESS_SPACE_NUM 1 | ||
49 | #endif | ||
50 | |||
47 | /* | 51 | /* |
48 | * For the normal pfn, the highest 12 bits should be zero, | 52 | * For the normal pfn, the highest 12 bits should be zero, |
49 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, | 53 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
@@ -331,6 +335,13 @@ struct kvm_kernel_irq_routing_entry { | |||
331 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 335 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
332 | #endif | 336 | #endif |
333 | 337 | ||
338 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE | ||
339 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) | ||
340 | { | ||
341 | return 0; | ||
342 | } | ||
343 | #endif | ||
344 | |||
334 | /* | 345 | /* |
335 | * Note: | 346 | * Note: |
336 | * memslots are not sorted by id anymore, please use id_to_memslot() | 347 | * memslots are not sorted by id anymore, please use id_to_memslot() |
@@ -349,7 +360,7 @@ struct kvm { | |||
349 | spinlock_t mmu_lock; | 360 | spinlock_t mmu_lock; |
350 | struct mutex slots_lock; | 361 | struct mutex slots_lock; |
351 | struct mm_struct *mm; /* userspace tied to this vm */ | 362 | struct mm_struct *mm; /* userspace tied to this vm */ |
352 | struct kvm_memslots *memslots; | 363 | struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; |
353 | struct srcu_struct srcu; | 364 | struct srcu_struct srcu; |
354 | struct srcu_struct irq_srcu; | 365 | struct srcu_struct irq_srcu; |
355 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | 366 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
@@ -464,16 +475,23 @@ void kvm_exit(void); | |||
464 | void kvm_get_kvm(struct kvm *kvm); | 475 | void kvm_get_kvm(struct kvm *kvm); |
465 | void kvm_put_kvm(struct kvm *kvm); | 476 | void kvm_put_kvm(struct kvm *kvm); |
466 | 477 | ||
467 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | 478 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
468 | { | 479 | { |
469 | return rcu_dereference_check(kvm->memslots, | 480 | return rcu_dereference_check(kvm->memslots[as_id], |
470 | srcu_read_lock_held(&kvm->srcu) | 481 | srcu_read_lock_held(&kvm->srcu) |
471 | || lockdep_is_held(&kvm->slots_lock)); | 482 | || lockdep_is_held(&kvm->slots_lock)); |
472 | } | 483 | } |
473 | 484 | ||
485 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | ||
486 | { | ||
487 | return __kvm_memslots(kvm, 0); | ||
488 | } | ||
489 | |||
474 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) | 490 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
475 | { | 491 | { |
476 | return kvm_memslots(vcpu->kvm); | 492 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
493 | |||
494 | return __kvm_memslots(vcpu->kvm, as_id); | ||
477 | } | 495 | } |
478 | 496 | ||
479 | static inline struct kvm_memory_slot * | 497 | static inline struct kvm_memory_slot * |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index eace8babd227..5ff1038437e3 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -816,6 +816,7 @@ struct kvm_ppc_smmu_info { | |||
816 | #define KVM_CAP_PPC_HWRNG 115 | 816 | #define KVM_CAP_PPC_HWRNG 115 |
817 | #define KVM_CAP_DISABLE_QUIRKS 116 | 817 | #define KVM_CAP_DISABLE_QUIRKS 116 |
818 | #define KVM_CAP_X86_SMM 117 | 818 | #define KVM_CAP_X86_SMM 117 |
819 | #define KVM_CAP_MULTI_ADDRESS_SPACE 118 | ||
819 | 820 | ||
820 | #ifdef KVM_CAP_IRQ_ROUTING | 821 | #ifdef KVM_CAP_IRQ_ROUTING |
821 | 822 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3a121cedcc77..848af90b8091 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -518,9 +518,11 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
518 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); | 518 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
519 | 519 | ||
520 | r = -ENOMEM; | 520 | r = -ENOMEM; |
521 | kvm->memslots = kvm_alloc_memslots(); | 521 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
522 | if (!kvm->memslots) | 522 | kvm->memslots[i] = kvm_alloc_memslots(); |
523 | goto out_err_no_srcu; | 523 | if (!kvm->memslots[i]) |
524 | goto out_err_no_srcu; | ||
525 | } | ||
524 | 526 | ||
525 | if (init_srcu_struct(&kvm->srcu)) | 527 | if (init_srcu_struct(&kvm->srcu)) |
526 | goto out_err_no_srcu; | 528 | goto out_err_no_srcu; |
@@ -562,7 +564,8 @@ out_err_no_srcu: | |||
562 | out_err_no_disable: | 564 | out_err_no_disable: |
563 | for (i = 0; i < KVM_NR_BUSES; i++) | 565 | for (i = 0; i < KVM_NR_BUSES; i++) |
564 | kfree(kvm->buses[i]); | 566 | kfree(kvm->buses[i]); |
565 | kvm_free_memslots(kvm, kvm->memslots); | 567 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
568 | kvm_free_memslots(kvm, kvm->memslots[i]); | ||
566 | kvm_arch_free_vm(kvm); | 569 | kvm_arch_free_vm(kvm); |
567 | return ERR_PTR(r); | 570 | return ERR_PTR(r); |
568 | } | 571 | } |
@@ -612,7 +615,8 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
612 | #endif | 615 | #endif |
613 | kvm_arch_destroy_vm(kvm); | 616 | kvm_arch_destroy_vm(kvm); |
614 | kvm_destroy_devices(kvm); | 617 | kvm_destroy_devices(kvm); |
615 | kvm_free_memslots(kvm, kvm->memslots); | 618 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
619 | kvm_free_memslots(kvm, kvm->memslots[i]); | ||
616 | cleanup_srcu_struct(&kvm->irq_srcu); | 620 | cleanup_srcu_struct(&kvm->irq_srcu); |
617 | cleanup_srcu_struct(&kvm->srcu); | 621 | cleanup_srcu_struct(&kvm->srcu); |
618 | kvm_arch_free_vm(kvm); | 622 | kvm_arch_free_vm(kvm); |
@@ -729,9 +733,9 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m | |||
729 | } | 733 | } |
730 | 734 | ||
731 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | 735 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, |
732 | struct kvm_memslots *slots) | 736 | int as_id, struct kvm_memslots *slots) |
733 | { | 737 | { |
734 | struct kvm_memslots *old_memslots = kvm_memslots(kvm); | 738 | struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); |
735 | 739 | ||
736 | /* | 740 | /* |
737 | * Set the low bit in the generation, which disables SPTE caching | 741 | * Set the low bit in the generation, which disables SPTE caching |
@@ -740,7 +744,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | |||
740 | WARN_ON(old_memslots->generation & 1); | 744 | WARN_ON(old_memslots->generation & 1); |
741 | slots->generation = old_memslots->generation + 1; | 745 | slots->generation = old_memslots->generation + 1; |
742 | 746 | ||
743 | rcu_assign_pointer(kvm->memslots, slots); | 747 | rcu_assign_pointer(kvm->memslots[as_id], slots); |
744 | synchronize_srcu_expedited(&kvm->srcu); | 748 | synchronize_srcu_expedited(&kvm->srcu); |
745 | 749 | ||
746 | /* | 750 | /* |
@@ -772,6 +776,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
772 | struct kvm_memory_slot *slot; | 776 | struct kvm_memory_slot *slot; |
773 | struct kvm_memory_slot old, new; | 777 | struct kvm_memory_slot old, new; |
774 | struct kvm_memslots *slots = NULL, *old_memslots; | 778 | struct kvm_memslots *slots = NULL, *old_memslots; |
779 | int as_id, id; | ||
775 | enum kvm_mr_change change; | 780 | enum kvm_mr_change change; |
776 | 781 | ||
777 | r = check_memory_region_flags(mem); | 782 | r = check_memory_region_flags(mem); |
@@ -779,24 +784,27 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
779 | goto out; | 784 | goto out; |
780 | 785 | ||
781 | r = -EINVAL; | 786 | r = -EINVAL; |
787 | as_id = mem->slot >> 16; | ||
788 | id = (u16)mem->slot; | ||
789 | |||
782 | /* General sanity checks */ | 790 | /* General sanity checks */ |
783 | if (mem->memory_size & (PAGE_SIZE - 1)) | 791 | if (mem->memory_size & (PAGE_SIZE - 1)) |
784 | goto out; | 792 | goto out; |
785 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 793 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
786 | goto out; | 794 | goto out; |
787 | /* We can read the guest memory with __xxx_user() later on. */ | 795 | /* We can read the guest memory with __xxx_user() later on. */ |
788 | if ((mem->slot < KVM_USER_MEM_SLOTS) && | 796 | if ((id < KVM_USER_MEM_SLOTS) && |
789 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || | 797 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
790 | !access_ok(VERIFY_WRITE, | 798 | !access_ok(VERIFY_WRITE, |
791 | (void __user *)(unsigned long)mem->userspace_addr, | 799 | (void __user *)(unsigned long)mem->userspace_addr, |
792 | mem->memory_size))) | 800 | mem->memory_size))) |
793 | goto out; | 801 | goto out; |
794 | if (mem->slot >= KVM_MEM_SLOTS_NUM) | 802 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
795 | goto out; | 803 | goto out; |
796 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | 804 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
797 | goto out; | 805 | goto out; |
798 | 806 | ||
799 | slot = id_to_memslot(kvm_memslots(kvm), mem->slot); | 807 | slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); |
800 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | 808 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
801 | npages = mem->memory_size >> PAGE_SHIFT; | 809 | npages = mem->memory_size >> PAGE_SHIFT; |
802 | 810 | ||
@@ -805,7 +813,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
805 | 813 | ||
806 | new = old = *slot; | 814 | new = old = *slot; |
807 | 815 | ||
808 | new.id = mem->slot; | 816 | new.id = id; |
809 | new.base_gfn = base_gfn; | 817 | new.base_gfn = base_gfn; |
810 | new.npages = npages; | 818 | new.npages = npages; |
811 | new.flags = mem->flags; | 819 | new.flags = mem->flags; |
@@ -840,9 +848,9 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
840 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { | 848 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { |
841 | /* Check for overlaps */ | 849 | /* Check for overlaps */ |
842 | r = -EEXIST; | 850 | r = -EEXIST; |
843 | kvm_for_each_memslot(slot, kvm_memslots(kvm)) { | 851 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { |
844 | if ((slot->id >= KVM_USER_MEM_SLOTS) || | 852 | if ((slot->id >= KVM_USER_MEM_SLOTS) || |
845 | (slot->id == mem->slot)) | 853 | (slot->id == id)) |
846 | continue; | 854 | continue; |
847 | if (!((base_gfn + npages <= slot->base_gfn) || | 855 | if (!((base_gfn + npages <= slot->base_gfn) || |
848 | (base_gfn >= slot->base_gfn + slot->npages))) | 856 | (base_gfn >= slot->base_gfn + slot->npages))) |
@@ -871,13 +879,13 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
871 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); | 879 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
872 | if (!slots) | 880 | if (!slots) |
873 | goto out_free; | 881 | goto out_free; |
874 | memcpy(slots, kvm_memslots(kvm), sizeof(struct kvm_memslots)); | 882 | memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); |
875 | 883 | ||
876 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { | 884 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { |
877 | slot = id_to_memslot(slots, mem->slot); | 885 | slot = id_to_memslot(slots, id); |
878 | slot->flags |= KVM_MEMSLOT_INVALID; | 886 | slot->flags |= KVM_MEMSLOT_INVALID; |
879 | 887 | ||
880 | old_memslots = install_new_memslots(kvm, slots); | 888 | old_memslots = install_new_memslots(kvm, as_id, slots); |
881 | 889 | ||
882 | /* slot was deleted or moved, clear iommu mapping */ | 890 | /* slot was deleted or moved, clear iommu mapping */ |
883 | kvm_iommu_unmap_pages(kvm, &old); | 891 | kvm_iommu_unmap_pages(kvm, &old); |
@@ -909,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
909 | } | 917 | } |
910 | 918 | ||
911 | update_memslots(slots, &new); | 919 | update_memslots(slots, &new); |
912 | old_memslots = install_new_memslots(kvm, slots); | 920 | old_memslots = install_new_memslots(kvm, as_id, slots); |
913 | 921 | ||
914 | kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); | 922 | kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); |
915 | 923 | ||
@@ -956,7 +964,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); | |||
956 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 964 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
957 | struct kvm_userspace_memory_region *mem) | 965 | struct kvm_userspace_memory_region *mem) |
958 | { | 966 | { |
959 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 967 | if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
960 | return -EINVAL; | 968 | return -EINVAL; |
961 | 969 | ||
962 | return kvm_set_memory_region(kvm, mem); | 970 | return kvm_set_memory_region(kvm, mem); |
@@ -967,16 +975,18 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
967 | { | 975 | { |
968 | struct kvm_memslots *slots; | 976 | struct kvm_memslots *slots; |
969 | struct kvm_memory_slot *memslot; | 977 | struct kvm_memory_slot *memslot; |
970 | int r, i; | 978 | int r, i, as_id, id; |
971 | unsigned long n; | 979 | unsigned long n; |
972 | unsigned long any = 0; | 980 | unsigned long any = 0; |
973 | 981 | ||
974 | r = -EINVAL; | 982 | r = -EINVAL; |
975 | if (log->slot >= KVM_USER_MEM_SLOTS) | 983 | as_id = log->slot >> 16; |
984 | id = (u16)log->slot; | ||
985 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) | ||
976 | goto out; | 986 | goto out; |
977 | 987 | ||
978 | slots = kvm_memslots(kvm); | 988 | slots = __kvm_memslots(kvm, as_id); |
979 | memslot = id_to_memslot(slots, log->slot); | 989 | memslot = id_to_memslot(slots, id); |
980 | r = -ENOENT; | 990 | r = -ENOENT; |
981 | if (!memslot->dirty_bitmap) | 991 | if (!memslot->dirty_bitmap) |
982 | goto out; | 992 | goto out; |
@@ -1027,17 +1037,19 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, | |||
1027 | { | 1037 | { |
1028 | struct kvm_memslots *slots; | 1038 | struct kvm_memslots *slots; |
1029 | struct kvm_memory_slot *memslot; | 1039 | struct kvm_memory_slot *memslot; |
1030 | int r, i; | 1040 | int r, i, as_id, id; |
1031 | unsigned long n; | 1041 | unsigned long n; |
1032 | unsigned long *dirty_bitmap; | 1042 | unsigned long *dirty_bitmap; |
1033 | unsigned long *dirty_bitmap_buffer; | 1043 | unsigned long *dirty_bitmap_buffer; |
1034 | 1044 | ||
1035 | r = -EINVAL; | 1045 | r = -EINVAL; |
1036 | if (log->slot >= KVM_USER_MEM_SLOTS) | 1046 | as_id = log->slot >> 16; |
1047 | id = (u16)log->slot; | ||
1048 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) | ||
1037 | goto out; | 1049 | goto out; |
1038 | 1050 | ||
1039 | slots = kvm_memslots(kvm); | 1051 | slots = __kvm_memslots(kvm, as_id); |
1040 | memslot = id_to_memslot(slots, log->slot); | 1052 | memslot = id_to_memslot(slots, id); |
1041 | 1053 | ||
1042 | dirty_bitmap = memslot->dirty_bitmap; | 1054 | dirty_bitmap = memslot->dirty_bitmap; |
1043 | r = -ENOENT; | 1055 | r = -ENOENT; |
@@ -2620,6 +2632,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) | |||
2620 | case KVM_CAP_IRQ_ROUTING: | 2632 | case KVM_CAP_IRQ_ROUTING: |
2621 | return KVM_MAX_IRQ_ROUTES; | 2633 | return KVM_MAX_IRQ_ROUTES; |
2622 | #endif | 2634 | #endif |
2635 | #if KVM_ADDRESS_SPACE_NUM > 1 | ||
2636 | case KVM_CAP_MULTI_ADDRESS_SPACE: | ||
2637 | return KVM_ADDRESS_SPACE_NUM; | ||
2638 | #endif | ||
2623 | default: | 2639 | default: |
2624 | break; | 2640 | break; |
2625 | } | 2641 | } |