summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2015-02-06 16:19:12 -0500
committerAlex Williamson <alex.williamson@redhat.com>2015-02-06 16:19:12 -0500
commitc5e6688752c25434d71920bc969f9fab60353c5e (patch)
tree176ad1355f26bc2542e320471ab1691400c0974c /drivers/vfio
parentbabbf1760970f141eb4021288ce0fb7196bc1a23 (diff)
vfio/type1: Add conditional rescheduling
IOMMU operations can be expensive and it's not very difficult for a user to give us a lot of work to do for a map or unmap operation. Killing a large VM will vfio assigned devices can result in soft lockups and IOMMU tracing shows that we can easily spend 80% of our time with need-resched set. A sprinkling of conf_resched() calls after map and unmap calls has a very tiny affect on performance while resulting in traces with <1% of calls overflowing into needs- resched. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 35c90089478e..57d8c37a002b 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -351,8 +351,10 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
351 domain = d = list_first_entry(&iommu->domain_list, 351 domain = d = list_first_entry(&iommu->domain_list,
352 struct vfio_domain, next); 352 struct vfio_domain, next);
353 353
354 list_for_each_entry_continue(d, &iommu->domain_list, next) 354 list_for_each_entry_continue(d, &iommu->domain_list, next) {
355 iommu_unmap(d->domain, dma->iova, dma->size); 355 iommu_unmap(d->domain, dma->iova, dma->size);
356 cond_resched();
357 }
356 358
357 while (iova < end) { 359 while (iova < end) {
358 size_t unmapped, len; 360 size_t unmapped, len;
@@ -384,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
384 unmapped >> PAGE_SHIFT, 386 unmapped >> PAGE_SHIFT,
385 dma->prot, false); 387 dma->prot, false);
386 iova += unmapped; 388 iova += unmapped;
389
390 cond_resched();
387 } 391 }
388 392
389 vfio_lock_acct(-unlocked); 393 vfio_lock_acct(-unlocked);
@@ -528,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
528 map_try_harder(d, iova, pfn, npage, prot)) 532 map_try_harder(d, iova, pfn, npage, prot))
529 goto unwind; 533 goto unwind;
530 } 534 }
535
536 cond_resched();
531 } 537 }
532 538
533 return 0; 539 return 0;