aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAntonios Motakis <a.motakis@virtualopensystems.com>2013-10-11 12:40:46 -0400
committerAlex Williamson <alex.williamson@redhat.com>2013-10-11 12:40:46 -0400
commitd93b3ac0edb85b1c1e8fe0a065c1e5045783e2f6 (patch)
tree0b3d9b1b3c463f7d0e89f041c1783d7ea13d3a1c
parentd0e639c9e06d44e713170031fe05fb60ebe680af (diff)
VFIO: vfio_iommu_type1: fix bug caused by break in nested loop
In vfio_iommu_type1.c there is a bug in vfio_dma_do_map, when checking that pages are not already mapped. Since the check is being done in a for loop nested within the main loop, breaking out of it does not create the intended behavior. If the underlying IOMMU driver returns a non-NULL value, this will be ignored and mapping the DMA range will be attempted anyway, leading to unpredictable behavior. This interracts badly with the ARM SMMU driver issue fixed in the patch that was submitted with the title: "[PATCH 2/2] ARM: SMMU: return NULL on error in arm_smmu_iova_to_phys" Both fixes are required in order to use the vfio_iommu_type1 driver with an ARM SMMU. This patch refactors the function slightly, in order to also make this kind of bug less likely. Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a9807dea3887..4fb7a8f83c8a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
545 long npage; 545 long npage;
546 int ret = 0, prot = 0; 546 int ret = 0, prot = 0;
547 uint64_t mask; 547 uint64_t mask;
548 struct vfio_dma *dma = NULL;
549 unsigned long pfn;
548 550
549 end = map->iova + map->size; 551 end = map->iova + map->size;
550 552
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
587 } 589 }
588 590
589 for (iova = map->iova; iova < end; iova += size, vaddr += size) { 591 for (iova = map->iova; iova < end; iova += size, vaddr += size) {
590 struct vfio_dma *dma = NULL;
591 unsigned long pfn;
592 long i; 592 long i;
593 593
594 /* Pin a contiguous chunk of memory */ 594 /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
597 if (npage <= 0) { 597 if (npage <= 0) {
598 WARN_ON(!npage); 598 WARN_ON(!npage);
599 ret = (int)npage; 599 ret = (int)npage;
600 break; 600 goto out;
601 } 601 }
602 602
603 /* Verify pages are not already mapped */ 603 /* Verify pages are not already mapped */
604 for (i = 0; i < npage; i++) { 604 for (i = 0; i < npage; i++) {
605 if (iommu_iova_to_phys(iommu->domain, 605 if (iommu_iova_to_phys(iommu->domain,
606 iova + (i << PAGE_SHIFT))) { 606 iova + (i << PAGE_SHIFT))) {
607 vfio_unpin_pages(pfn, npage, prot, true);
608 ret = -EBUSY; 607 ret = -EBUSY;
609 break; 608 goto out_unpin;
610 } 609 }
611 } 610 }
612 611
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
616 if (ret) { 615 if (ret) {
617 if (ret != -EBUSY || 616 if (ret != -EBUSY ||
618 map_try_harder(iommu, iova, pfn, npage, prot)) { 617 map_try_harder(iommu, iova, pfn, npage, prot)) {
619 vfio_unpin_pages(pfn, npage, prot, true); 618 goto out_unpin;
620 break;
621 } 619 }
622 } 620 }
623 621
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
672 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 670 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
673 if (!dma) { 671 if (!dma) {
674 iommu_unmap(iommu->domain, iova, size); 672 iommu_unmap(iommu->domain, iova, size);
675 vfio_unpin_pages(pfn, npage, prot, true);
676 ret = -ENOMEM; 673 ret = -ENOMEM;
677 break; 674 goto out_unpin;
678 } 675 }
679 676
680 dma->size = size; 677 dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
685 } 682 }
686 } 683 }
687 684
688 if (ret) { 685 WARN_ON(ret);
689 struct vfio_dma *tmp; 686 mutex_unlock(&iommu->lock);
690 iova = map->iova; 687 return ret;
691 size = map->size; 688
692 while ((tmp = vfio_find_dma(iommu, iova, size))) { 689out_unpin:
693 int r = vfio_remove_dma_overlap(iommu, iova, 690 vfio_unpin_pages(pfn, npage, prot, true);
694 &size, tmp); 691
695 if (WARN_ON(r || !size)) 692out:
696 break; 693 iova = map->iova;
697 } 694 size = map->size;
695 while ((dma = vfio_find_dma(iommu, iova, size))) {
696 int r = vfio_remove_dma_overlap(iommu, iova,
697 &size, dma);
698 if (WARN_ON(r || !size))
699 break;
698 } 700 }
699 701
700 mutex_unlock(&iommu->lock); 702 mutex_unlock(&iommu->lock);