aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/iommu.c6
-rw-r--r--virt/kvm/kvm_main.c43
2 files changed, 35 insertions, 14 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index e9693a29d00..4c403750360 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -73,14 +73,13 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
73{ 73{
74 int i, r = 0; 74 int i, r = 0;
75 75
76 down_read(&kvm->slots_lock);
77 for (i = 0; i < kvm->nmemslots; i++) { 76 for (i = 0; i < kvm->nmemslots; i++) {
78 r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, 77 r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
79 kvm->memslots[i].npages); 78 kvm->memslots[i].npages);
80 if (r) 79 if (r)
81 break; 80 break;
82 } 81 }
83 up_read(&kvm->slots_lock); 82
84 return r; 83 return r;
85} 84}
86 85
@@ -190,12 +189,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
190static int kvm_iommu_unmap_memslots(struct kvm *kvm) 189static int kvm_iommu_unmap_memslots(struct kvm *kvm)
191{ 190{
192 int i; 191 int i;
193 down_read(&kvm->slots_lock); 192
194 for (i = 0; i < kvm->nmemslots; i++) { 193 for (i = 0; i < kvm->nmemslots; i++) {
195 kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, 194 kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
196 kvm->memslots[i].npages); 195 kvm->memslots[i].npages);
197 } 196 }
198 up_read(&kvm->slots_lock);
199 197
200 return 0; 198 return 0;
201} 199}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3a5a08298aa..29a667ce35b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -173,7 +173,6 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
173 assigned_dev->host_irq_disabled = false; 173 assigned_dev->host_irq_disabled = false;
174 } 174 }
175 mutex_unlock(&assigned_dev->kvm->lock); 175 mutex_unlock(&assigned_dev->kvm->lock);
176 kvm_put_kvm(assigned_dev->kvm);
177} 176}
178 177
179static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 178static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
@@ -181,8 +180,6 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
181 struct kvm_assigned_dev_kernel *assigned_dev = 180 struct kvm_assigned_dev_kernel *assigned_dev =
182 (struct kvm_assigned_dev_kernel *) dev_id; 181 (struct kvm_assigned_dev_kernel *) dev_id;
183 182
184 kvm_get_kvm(assigned_dev->kvm);
185
186 schedule_work(&assigned_dev->interrupt_work); 183 schedule_work(&assigned_dev->interrupt_work);
187 184
188 disable_irq_nosync(irq); 185 disable_irq_nosync(irq);
@@ -213,6 +210,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
213 } 210 }
214} 211}
215 212
213/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
216static void kvm_free_assigned_irq(struct kvm *kvm, 214static void kvm_free_assigned_irq(struct kvm *kvm,
217 struct kvm_assigned_dev_kernel *assigned_dev) 215 struct kvm_assigned_dev_kernel *assigned_dev)
218{ 216{
@@ -228,11 +226,24 @@ static void kvm_free_assigned_irq(struct kvm *kvm,
228 if (!assigned_dev->irq_requested_type) 226 if (!assigned_dev->irq_requested_type)
229 return; 227 return;
230 228
231 if (cancel_work_sync(&assigned_dev->interrupt_work)) 229 /*
232 /* We had pending work. That means we will have to take 230 * In kvm_free_device_irq, cancel_work_sync return true if:
233 * care of kvm_put_kvm. 231 * 1. work is scheduled, and then cancelled.
234 */ 232 * 2. work callback is executed.
235 kvm_put_kvm(kvm); 233 *
234 * The first one ensured that the irq is disabled and no more events
235 * would happen. But for the second one, the irq may be enabled (e.g.
236 * for MSI). So we disable irq here to prevent further events.
237 *
238 * Notice this maybe result in nested disable if the interrupt type is
239 * INTx, but it's OK for we are going to free it.
240 *
241 * If this function is a part of VM destroy, please ensure that till
242 * now, the kvm state is still legal for probably we also have to wait
243 * interrupt_work done.
244 */
245 disable_irq_nosync(assigned_dev->host_irq);
246 cancel_work_sync(&assigned_dev->interrupt_work);
236 247
237 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 248 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
238 249
@@ -285,8 +296,8 @@ static int assigned_device_update_intx(struct kvm *kvm,
285 296
286 if (irqchip_in_kernel(kvm)) { 297 if (irqchip_in_kernel(kvm)) {
287 if (!msi2intx && 298 if (!msi2intx &&
288 adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) { 299 (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)) {
289 free_irq(adev->host_irq, (void *)kvm); 300 free_irq(adev->host_irq, (void *)adev);
290 pci_disable_msi(adev->dev); 301 pci_disable_msi(adev->dev);
291 } 302 }
292 303
@@ -455,6 +466,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
455 struct kvm_assigned_dev_kernel *match; 466 struct kvm_assigned_dev_kernel *match;
456 struct pci_dev *dev; 467 struct pci_dev *dev;
457 468
469 down_read(&kvm->slots_lock);
458 mutex_lock(&kvm->lock); 470 mutex_lock(&kvm->lock);
459 471
460 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 472 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
@@ -516,6 +528,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
516 528
517out: 529out:
518 mutex_unlock(&kvm->lock); 530 mutex_unlock(&kvm->lock);
531 up_read(&kvm->slots_lock);
519 return r; 532 return r;
520out_list_del: 533out_list_del:
521 list_del(&match->list); 534 list_del(&match->list);
@@ -527,6 +540,7 @@ out_put:
527out_free: 540out_free:
528 kfree(match); 541 kfree(match);
529 mutex_unlock(&kvm->lock); 542 mutex_unlock(&kvm->lock);
543 up_read(&kvm->slots_lock);
530 return r; 544 return r;
531} 545}
532#endif 546#endif
@@ -789,11 +803,19 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
789 return young; 803 return young;
790} 804}
791 805
806static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
807 struct mm_struct *mm)
808{
809 struct kvm *kvm = mmu_notifier_to_kvm(mn);
810 kvm_arch_flush_shadow(kvm);
811}
812
792static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 813static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
793 .invalidate_page = kvm_mmu_notifier_invalidate_page, 814 .invalidate_page = kvm_mmu_notifier_invalidate_page,
794 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 815 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
795 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 816 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
796 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 817 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
818 .release = kvm_mmu_notifier_release,
797}; 819};
798#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 820#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
799 821
@@ -883,6 +905,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
883{ 905{
884 struct mm_struct *mm = kvm->mm; 906 struct mm_struct *mm = kvm->mm;
885 907
908 kvm_arch_sync_events(kvm);
886 spin_lock(&kvm_lock); 909 spin_lock(&kvm_lock);
887 list_del(&kvm->vm_list); 910 list_del(&kvm->vm_list);
888 spin_unlock(&kvm_lock); 911 spin_unlock(&kvm_lock);