diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/iommu.c | 18 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 36 |
2 files changed, 35 insertions, 19 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 15147583abd1..bc697a66a883 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -76,10 +76,13 @@ unmap_pages: | |||
76 | static int kvm_iommu_map_memslots(struct kvm *kvm) | 76 | static int kvm_iommu_map_memslots(struct kvm *kvm) |
77 | { | 77 | { |
78 | int i, r = 0; | 78 | int i, r = 0; |
79 | struct kvm_memslots *slots; | ||
79 | 80 | ||
80 | for (i = 0; i < kvm->nmemslots; i++) { | 81 | slots = kvm->memslots; |
81 | r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, | 82 | |
82 | kvm->memslots[i].npages); | 83 | for (i = 0; i < slots->nmemslots; i++) { |
84 | r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn, | ||
85 | slots->memslots[i].npages); | ||
83 | if (r) | 86 | if (r) |
84 | break; | 87 | break; |
85 | } | 88 | } |
@@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, | |||
210 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | 213 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
211 | { | 214 | { |
212 | int i; | 215 | int i; |
216 | struct kvm_memslots *slots; | ||
217 | |||
218 | slots = kvm->memslots; | ||
213 | 219 | ||
214 | for (i = 0; i < kvm->nmemslots; i++) { | 220 | for (i = 0; i < slots->nmemslots; i++) { |
215 | kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, | 221 | kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, |
216 | kvm->memslots[i].npages); | 222 | slots->memslots[i].npages); |
217 | } | 223 | } |
218 | 224 | ||
219 | return 0; | 225 | return 0; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bc23b8e0609b..86dd8f3d29c9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void) | |||
375 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); | 375 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | r = -ENOMEM; | ||
379 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); | ||
380 | if (!kvm->memslots) | ||
381 | goto out_err; | ||
382 | |||
378 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 383 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
379 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 384 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
380 | if (!page) { | 385 | if (!page) |
381 | r = -ENOMEM; | ||
382 | goto out_err; | 386 | goto out_err; |
383 | } | 387 | |
384 | kvm->coalesced_mmio_ring = | 388 | kvm->coalesced_mmio_ring = |
385 | (struct kvm_coalesced_mmio_ring *)page_address(page); | 389 | (struct kvm_coalesced_mmio_ring *)page_address(page); |
386 | #endif | 390 | #endif |
@@ -416,6 +420,7 @@ out: | |||
416 | out_err: | 420 | out_err: |
417 | hardware_disable_all(); | 421 | hardware_disable_all(); |
418 | out_err_nodisable: | 422 | out_err_nodisable: |
423 | kfree(kvm->memslots); | ||
419 | kfree(kvm); | 424 | kfree(kvm); |
420 | return ERR_PTR(r); | 425 | return ERR_PTR(r); |
421 | } | 426 | } |
@@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | |||
450 | void kvm_free_physmem(struct kvm *kvm) | 455 | void kvm_free_physmem(struct kvm *kvm) |
451 | { | 456 | { |
452 | int i; | 457 | int i; |
458 | struct kvm_memslots *slots = kvm->memslots; | ||
459 | |||
460 | for (i = 0; i < slots->nmemslots; ++i) | ||
461 | kvm_free_physmem_slot(&slots->memslots[i], NULL); | ||
453 | 462 | ||
454 | for (i = 0; i < kvm->nmemslots; ++i) | 463 | kfree(kvm->memslots); |
455 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); | ||
456 | } | 464 | } |
457 | 465 | ||
458 | static void kvm_destroy_vm(struct kvm *kvm) | 466 | static void kvm_destroy_vm(struct kvm *kvm) |
@@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
533 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | 541 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
534 | goto out; | 542 | goto out; |
535 | 543 | ||
536 | memslot = &kvm->memslots[mem->slot]; | 544 | memslot = &kvm->memslots->memslots[mem->slot]; |
537 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | 545 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
538 | npages = mem->memory_size >> PAGE_SHIFT; | 546 | npages = mem->memory_size >> PAGE_SHIFT; |
539 | 547 | ||
@@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
554 | /* Check for overlaps */ | 562 | /* Check for overlaps */ |
555 | r = -EEXIST; | 563 | r = -EEXIST; |
556 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 564 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
557 | struct kvm_memory_slot *s = &kvm->memslots[i]; | 565 | struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; |
558 | 566 | ||
559 | if (s == memslot || !s->npages) | 567 | if (s == memslot || !s->npages) |
560 | continue; | 568 | continue; |
@@ -656,8 +664,8 @@ skip_lpage: | |||
656 | kvm_arch_flush_shadow(kvm); | 664 | kvm_arch_flush_shadow(kvm); |
657 | 665 | ||
658 | spin_lock(&kvm->mmu_lock); | 666 | spin_lock(&kvm->mmu_lock); |
659 | if (mem->slot >= kvm->nmemslots) | 667 | if (mem->slot >= kvm->memslots->nmemslots) |
660 | kvm->nmemslots = mem->slot + 1; | 668 | kvm->memslots->nmemslots = mem->slot + 1; |
661 | 669 | ||
662 | *memslot = new; | 670 | *memslot = new; |
663 | spin_unlock(&kvm->mmu_lock); | 671 | spin_unlock(&kvm->mmu_lock); |
@@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
727 | if (log->slot >= KVM_MEMORY_SLOTS) | 735 | if (log->slot >= KVM_MEMORY_SLOTS) |
728 | goto out; | 736 | goto out; |
729 | 737 | ||
730 | memslot = &kvm->memslots[log->slot]; | 738 | memslot = &kvm->memslots->memslots[log->slot]; |
731 | r = -ENOENT; | 739 | r = -ENOENT; |
732 | if (!memslot->dirty_bitmap) | 740 | if (!memslot->dirty_bitmap) |
733 | goto out; | 741 | goto out; |
@@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); | |||
781 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) | 789 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) |
782 | { | 790 | { |
783 | int i; | 791 | int i; |
792 | struct kvm_memslots *slots = kvm->memslots; | ||
784 | 793 | ||
785 | for (i = 0; i < kvm->nmemslots; ++i) { | 794 | for (i = 0; i < slots->nmemslots; ++i) { |
786 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | 795 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
787 | 796 | ||
788 | if (gfn >= memslot->base_gfn | 797 | if (gfn >= memslot->base_gfn |
789 | && gfn < memslot->base_gfn + memslot->npages) | 798 | && gfn < memslot->base_gfn + memslot->npages) |
@@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | |||
802 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 811 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
803 | { | 812 | { |
804 | int i; | 813 | int i; |
814 | struct kvm_memslots *slots = kvm->memslots; | ||
805 | 815 | ||
806 | gfn = unalias_gfn(kvm, gfn); | 816 | gfn = unalias_gfn(kvm, gfn); |
807 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 817 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
808 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | 818 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
809 | 819 | ||
810 | if (gfn >= memslot->base_gfn | 820 | if (gfn >= memslot->base_gfn |
811 | && gfn < memslot->base_gfn + memslot->npages) | 821 | && gfn < memslot->base_gfn + memslot->npages) |