diff options
author | Alex Williamson <alex.williamson@redhat.com> | 2013-10-30 13:02:23 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-10-30 14:02:13 -0400 |
commit | d96eb2c6f480769bff32054e78b964860dae4d56 (patch) | |
tree | 48245bcee8cd688c11c2974c4a0c7391660dd423 /virt/kvm/iommu.c | |
parent | ec53500fae421e07c5d035918ca454a429732ef4 (diff) |
kvm/x86: Convert iommu_flags to iommu_noncoherent
Default to operating in coherent mode. This simplifies the logic when
we switch to a model of registering and unregistering noncoherent I/O
with KVM.
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/iommu.c')
-rw-r--r-- | virt/kvm/iommu.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index a3b14109049b..d32d156a423a 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -79,7 +79,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |||
79 | flags = IOMMU_READ; | 79 | flags = IOMMU_READ; |
80 | if (!(slot->flags & KVM_MEM_READONLY)) | 80 | if (!(slot->flags & KVM_MEM_READONLY)) |
81 | flags |= IOMMU_WRITE; | 81 | flags |= IOMMU_WRITE; |
82 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) | 82 | if (!kvm->arch.iommu_noncoherent) |
83 | flags |= IOMMU_CACHE; | 83 | flags |= IOMMU_CACHE; |
84 | 84 | ||
85 | 85 | ||
@@ -158,7 +158,8 @@ int kvm_assign_device(struct kvm *kvm, | |||
158 | { | 158 | { |
159 | struct pci_dev *pdev = NULL; | 159 | struct pci_dev *pdev = NULL; |
160 | struct iommu_domain *domain = kvm->arch.iommu_domain; | 160 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
161 | int r, last_flags; | 161 | int r; |
162 | bool noncoherent; | ||
162 | 163 | ||
163 | /* check if iommu exists and in use */ | 164 | /* check if iommu exists and in use */ |
164 | if (!domain) | 165 | if (!domain) |
@@ -174,15 +175,13 @@ int kvm_assign_device(struct kvm *kvm, | |||
174 | return r; | 175 | return r; |
175 | } | 176 | } |
176 | 177 | ||
177 | last_flags = kvm->arch.iommu_flags; | 178 | noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain, |
178 | if (iommu_domain_has_cap(kvm->arch.iommu_domain, | 179 | IOMMU_CAP_CACHE_COHERENCY); |
179 | IOMMU_CAP_CACHE_COHERENCY)) | ||
180 | kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; | ||
181 | 180 | ||
182 | /* Check if need to update IOMMU page table for guest memory */ | 181 | /* Check if need to update IOMMU page table for guest memory */ |
183 | if ((last_flags ^ kvm->arch.iommu_flags) == | 182 | if (noncoherent != kvm->arch.iommu_noncoherent) { |
184 | KVM_IOMMU_CACHE_COHERENCY) { | ||
185 | kvm_iommu_unmap_memslots(kvm); | 183 | kvm_iommu_unmap_memslots(kvm); |
184 | kvm->arch.iommu_noncoherent = noncoherent; | ||
186 | r = kvm_iommu_map_memslots(kvm); | 185 | r = kvm_iommu_map_memslots(kvm); |
187 | if (r) | 186 | if (r) |
188 | goto out_unmap; | 187 | goto out_unmap; |
@@ -342,6 +341,7 @@ int kvm_iommu_unmap_guest(struct kvm *kvm) | |||
342 | mutex_lock(&kvm->slots_lock); | 341 | mutex_lock(&kvm->slots_lock); |
343 | kvm_iommu_unmap_memslots(kvm); | 342 | kvm_iommu_unmap_memslots(kvm); |
344 | kvm->arch.iommu_domain = NULL; | 343 | kvm->arch.iommu_domain = NULL; |
344 | kvm->arch.iommu_noncoherent = false; | ||
345 | mutex_unlock(&kvm->slots_lock); | 345 | mutex_unlock(&kvm->slots_lock); |
346 | 346 | ||
347 | iommu_domain_free(domain); | 347 | iommu_domain_free(domain); |