diff options
-rw-r--r-- | include/linux/kvm_host.h | 3 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 13 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 2 |
3 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f1f78deece10..9af240387fe6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -440,8 +440,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |||
440 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | 440 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 |
441 | 441 | ||
442 | #ifdef CONFIG_IOMMU_API | 442 | #ifdef CONFIG_IOMMU_API |
443 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 443 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
444 | unsigned long npages); | ||
445 | int kvm_iommu_map_guest(struct kvm *kvm); | 444 | int kvm_iommu_map_guest(struct kvm *kvm); |
446 | int kvm_iommu_unmap_guest(struct kvm *kvm); | 445 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
447 | int kvm_assign_device(struct kvm *kvm, | 446 | int kvm_assign_device(struct kvm *kvm, |
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index bc697a66a883..cf567d8033db 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm); | |||
32 | static void kvm_iommu_put_pages(struct kvm *kvm, | 32 | static void kvm_iommu_put_pages(struct kvm *kvm, |
33 | gfn_t base_gfn, unsigned long npages); | 33 | gfn_t base_gfn, unsigned long npages); |
34 | 34 | ||
35 | int kvm_iommu_map_pages(struct kvm *kvm, | 35 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
36 | gfn_t base_gfn, unsigned long npages) | ||
37 | { | 36 | { |
38 | gfn_t gfn = base_gfn; | 37 | gfn_t gfn = slot->base_gfn; |
38 | unsigned long npages = slot->npages; | ||
39 | pfn_t pfn; | 39 | pfn_t pfn; |
40 | int i, r = 0; | 40 | int i, r = 0; |
41 | struct iommu_domain *domain = kvm->arch.iommu_domain; | 41 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, | |||
54 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) | 54 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) |
55 | continue; | 55 | continue; |
56 | 56 | ||
57 | pfn = gfn_to_pfn(kvm, gfn); | 57 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); |
58 | r = iommu_map_range(domain, | 58 | r = iommu_map_range(domain, |
59 | gfn_to_gpa(gfn), | 59 | gfn_to_gpa(gfn), |
60 | pfn_to_hpa(pfn), | 60 | pfn_to_hpa(pfn), |
@@ -69,7 +69,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, | |||
69 | return 0; | 69 | return 0; |
70 | 70 | ||
71 | unmap_pages: | 71 | unmap_pages: |
72 | kvm_iommu_put_pages(kvm, base_gfn, i); | 72 | kvm_iommu_put_pages(kvm, slot->base_gfn, i); |
73 | return r; | 73 | return r; |
74 | } | 74 | } |
75 | 75 | ||
@@ -81,8 +81,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) | |||
81 | slots = kvm->memslots; | 81 | slots = kvm->memslots; |
82 | 82 | ||
83 | for (i = 0; i < slots->nmemslots; i++) { | 83 | for (i = 0; i < slots->nmemslots; i++) { |
84 | r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn, | 84 | r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); |
85 | slots->memslots[i].npages); | ||
86 | if (r) | 85 | if (r) |
87 | break; | 86 | break; |
88 | } | 87 | } |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4e2321c733f7..87d296d8b270 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -684,7 +684,7 @@ skip_lpage: | |||
684 | spin_unlock(&kvm->mmu_lock); | 684 | spin_unlock(&kvm->mmu_lock); |
685 | #ifdef CONFIG_DMAR | 685 | #ifdef CONFIG_DMAR |
686 | /* map the pages in iommu page table */ | 686 | /* map the pages in iommu page table */ |
687 | r = kvm_iommu_map_pages(kvm, base_gfn, npages); | 687 | r = kvm_iommu_map_pages(kvm, memslot); |
688 | if (r) | 688 | if (r) |
689 | goto out; | 689 | goto out; |
690 | #endif | 690 | #endif |