aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2014-05-30 13:35:54 -0400
committerAlex Williamson <alex.williamson@redhat.com>2014-05-30 13:35:54 -0400
commitc8dbca165bb090f926996a572ea2b5b577b34b70 (patch)
treeb0796dcde190329789cb56c5b42ce15c3ef71cf7
parenteb5685f0de4702e44a8b262f1acaea52ef99271a (diff)
vfio/iommu_type1: Avoid overflow
Coverity reports use of a tained scalar used as a loop boundary. For the most part, any values passed from userspace for a DMA mapping size, IOVA, or virtual address are valid, with some alignment constraints. The size is ultimately bound by how many pages the user is able to lock, IOVA is tested by the IOMMU driver when doing a map, and the virtual address needs to pass get_user_pages. The only problem I can find is that we do expect the __u64 user values to fit within our variables, which might not happen on 32bit platforms. Add a test for this and return error on overflow. Also propagate use of the type-correct local variables throughout the function. The above also points to the 'end' variable, which can be zero if we're operating at the very top of the address space. We try to account for this, but our loop botches it. Rework the loop to use the remaining size as our loop condition rather than the IOVA vs end. Detected by Coverity: CID 714659 Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-rw-r--r--drivers/vfio/vfio_iommu_type1.c45
1 files changed, 18 insertions, 27 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6673e7be507f..0734fbe5b651 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -524,7 +524,7 @@ unwind:
524static int vfio_dma_do_map(struct vfio_iommu *iommu, 524static int vfio_dma_do_map(struct vfio_iommu *iommu,
525 struct vfio_iommu_type1_dma_map *map) 525 struct vfio_iommu_type1_dma_map *map)
526{ 526{
527 dma_addr_t end, iova; 527 dma_addr_t iova = map->iova;
528 unsigned long vaddr = map->vaddr; 528 unsigned long vaddr = map->vaddr;
529 size_t size = map->size; 529 size_t size = map->size;
530 long npage; 530 long npage;
@@ -533,39 +533,30 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
533 struct vfio_dma *dma; 533 struct vfio_dma *dma;
534 unsigned long pfn; 534 unsigned long pfn;
535 535
536 end = map->iova + map->size; 536 /* Verify that none of our __u64 fields overflow */
537 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
538 return -EINVAL;
537 539
538 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; 540 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
539 541
542 WARN_ON(mask & PAGE_MASK);
543
540 /* READ/WRITE from device perspective */ 544 /* READ/WRITE from device perspective */
541 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) 545 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
542 prot |= IOMMU_WRITE; 546 prot |= IOMMU_WRITE;
543 if (map->flags & VFIO_DMA_MAP_FLAG_READ) 547 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
544 prot |= IOMMU_READ; 548 prot |= IOMMU_READ;
545 549
546 if (!prot) 550 if (!prot || !size || (size | iova | vaddr) & mask)
547 return -EINVAL; /* No READ/WRITE? */
548
549 if (vaddr & mask)
550 return -EINVAL;
551 if (map->iova & mask)
552 return -EINVAL;
553 if (!map->size || map->size & mask)
554 return -EINVAL;
555
556 WARN_ON(mask & PAGE_MASK);
557
558 /* Don't allow IOVA wrap */
559 if (end && end < map->iova)
560 return -EINVAL; 551 return -EINVAL;
561 552
562 /* Don't allow virtual address wrap */ 553 /* Don't allow IOVA or virtual address wrap */
563 if (vaddr + map->size && vaddr + map->size < vaddr) 554 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
564 return -EINVAL; 555 return -EINVAL;
565 556
566 mutex_lock(&iommu->lock); 557 mutex_lock(&iommu->lock);
567 558
568 if (vfio_find_dma(iommu, map->iova, map->size)) { 559 if (vfio_find_dma(iommu, iova, size)) {
569 mutex_unlock(&iommu->lock); 560 mutex_unlock(&iommu->lock);
570 return -EEXIST; 561 return -EEXIST;
571 } 562 }
@@ -576,17 +567,17 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
576 return -ENOMEM; 567 return -ENOMEM;
577 } 568 }
578 569
579 dma->iova = map->iova; 570 dma->iova = iova;
580 dma->vaddr = map->vaddr; 571 dma->vaddr = vaddr;
581 dma->prot = prot; 572 dma->prot = prot;
582 573
583 /* Insert zero-sized and grow as we map chunks of it */ 574 /* Insert zero-sized and grow as we map chunks of it */
584 vfio_link_dma(iommu, dma); 575 vfio_link_dma(iommu, dma);
585 576
586 for (iova = map->iova; iova < end; iova += size, vaddr += size) { 577 while (size) {
587 /* Pin a contiguous chunk of memory */ 578 /* Pin a contiguous chunk of memory */
588 npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT, 579 npage = vfio_pin_pages(vaddr + dma->size,
589 prot, &pfn); 580 size >> PAGE_SHIFT, prot, &pfn);
590 if (npage <= 0) { 581 if (npage <= 0) {
591 WARN_ON(!npage); 582 WARN_ON(!npage);
592 ret = (int)npage; 583 ret = (int)npage;
@@ -594,14 +585,14 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
594 } 585 }
595 586
596 /* Map it! */ 587 /* Map it! */
597 ret = vfio_iommu_map(iommu, iova, pfn, npage, prot); 588 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
598 if (ret) { 589 if (ret) {
599 vfio_unpin_pages(pfn, npage, prot, true); 590 vfio_unpin_pages(pfn, npage, prot, true);
600 break; 591 break;
601 } 592 }
602 593
603 size = npage << PAGE_SHIFT; 594 size -= npage << PAGE_SHIFT;
604 dma->size += size; 595 dma->size += npage << PAGE_SHIFT;
605 } 596 }
606 597
607 if (ret) 598 if (ret)