diff options
author | Alex Williamson <alex.williamson@redhat.com> | 2012-04-11 11:51:49 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-04-11 21:55:25 -0400 |
commit | 32f6daad4651a748a58a3ab6da0611862175722f (patch) | |
tree | 09e2643d6df18db1ab7655fcb53ab797e9ac69c1 /virt/kvm | |
parent | f19a0c2c2e6add90b7d6a1b7595abebfe2e4c37a (diff) |
KVM: unmap pages from the iommu when slots are removed
We've been adding new mappings, but not destroying old mappings.
This can lead to a page leak as pages are pinned using
get_user_pages, but only unpinned with put_page if they still
exist in the memslots list on vm shutdown. A memslot that is
destroyed while an iommu domain is enabled for the guest will
therefore result in an elevated page reference count that is
never cleared.
Additionally, without this fix, the iommu is only programmed
with the first translation for a gpa. This can result in
peer-to-peer errors if a mapping is destroyed and replaced by a
new mapping at the same gpa as the iommu will still be pointing
to the original, pinned memory address.
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/iommu.c | 7 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 5 |
2 files changed, 9 insertions, 3 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index a457d2138f49..fec1723de9b4 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -310,6 +310,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm, | |||
310 | } | 310 | } |
311 | } | 311 | } |
312 | 312 | ||
313 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | ||
314 | { | ||
315 | kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); | ||
316 | } | ||
317 | |||
313 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | 318 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
314 | { | 319 | { |
315 | int idx; | 320 | int idx; |
@@ -320,7 +325,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm) | |||
320 | slots = kvm_memslots(kvm); | 325 | slots = kvm_memslots(kvm); |
321 | 326 | ||
322 | kvm_for_each_memslot(memslot, slots) | 327 | kvm_for_each_memslot(memslot, slots) |
323 | kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); | 328 | kvm_iommu_unmap_pages(kvm, memslot); |
324 | 329 | ||
325 | srcu_read_unlock(&kvm->srcu, idx); | 330 | srcu_read_unlock(&kvm->srcu, idx); |
326 | 331 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 42b73930a6de..9739b533ca2e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -808,12 +808,13 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
808 | if (r) | 808 | if (r) |
809 | goto out_free; | 809 | goto out_free; |
810 | 810 | ||
811 | /* map the pages in iommu page table */ | 811 | /* map/unmap the pages in iommu page table */ |
812 | if (npages) { | 812 | if (npages) { |
813 | r = kvm_iommu_map_pages(kvm, &new); | 813 | r = kvm_iommu_map_pages(kvm, &new); |
814 | if (r) | 814 | if (r) |
815 | goto out_free; | 815 | goto out_free; |
816 | } | 816 | } else |
817 | kvm_iommu_unmap_pages(kvm, &old); | ||
817 | 818 | ||
818 | r = -ENOMEM; | 819 | r = -ENOMEM; |
819 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), | 820 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), |