diff options
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 77 |
1 files changed, 42 insertions, 35 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 8a2be4e40f22..98231d10890c 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -370,6 +370,9 @@ static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, | |||
370 | struct vfio_dma *split; | 370 | struct vfio_dma *split; |
371 | int ret; | 371 | int ret; |
372 | 372 | ||
373 | if (!*size) | ||
374 | return 0; | ||
375 | |||
373 | /* | 376 | /* |
374 | * Existing dma region is completely covered, unmap all. This is | 377 | * Existing dma region is completely covered, unmap all. This is |
375 | * the likely case since userspace tends to map and unmap buffers | 378 | * the likely case since userspace tends to map and unmap buffers |
@@ -411,7 +414,9 @@ static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, | |||
411 | dma->vaddr += overlap; | 414 | dma->vaddr += overlap; |
412 | dma->size -= overlap; | 415 | dma->size -= overlap; |
413 | vfio_insert_dma(iommu, dma); | 416 | vfio_insert_dma(iommu, dma); |
414 | } | 417 | } else |
418 | kfree(dma); | ||
419 | |||
415 | *size = overlap; | 420 | *size = overlap; |
416 | return 0; | 421 | return 0; |
417 | } | 422 | } |
@@ -425,48 +430,41 @@ static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, | |||
425 | if (ret) | 430 | if (ret) |
426 | return ret; | 431 | return ret; |
427 | 432 | ||
428 | /* | 433 | dma->size -= overlap; |
429 | * We may have unmapped the entire vfio_dma if the user is | ||
430 | * trying to unmap a sub-region of what was originally | ||
431 | * mapped. If anything left, we can resize in place since | ||
432 | * iova is unchanged. | ||
433 | */ | ||
434 | if (overlap < dma->size) | ||
435 | dma->size -= overlap; | ||
436 | else | ||
437 | vfio_remove_dma(iommu, dma); | ||
438 | |||
439 | *size = overlap; | 434 | *size = overlap; |
440 | return 0; | 435 | return 0; |
441 | } | 436 | } |
442 | 437 | ||
443 | /* Split existing */ | 438 | /* Split existing */ |
439 | split = kzalloc(sizeof(*split), GFP_KERNEL); | ||
440 | if (!split) | ||
441 | return -ENOMEM; | ||
442 | |||
444 | offset = start - dma->iova; | 443 | offset = start - dma->iova; |
445 | 444 | ||
446 | ret = vfio_unmap_unpin(iommu, dma, start, size); | 445 | ret = vfio_unmap_unpin(iommu, dma, start, size); |
447 | if (ret) | 446 | if (ret) |
448 | return ret; | 447 | return ret; |
449 | 448 | ||
450 | WARN_ON(!*size); | 449 | if (!*size) { |
450 | kfree(split); | ||
451 | return -EINVAL; | ||
452 | } | ||
453 | |||
451 | tmp = dma->size; | 454 | tmp = dma->size; |
452 | 455 | ||
453 | /* | 456 | /* Resize the lower vfio_dma in place, before the below insert */ |
454 | * Resize the lower vfio_dma in place, insert new for remaining | ||
455 | * upper segment. | ||
456 | */ | ||
457 | dma->size = offset; | 457 | dma->size = offset; |
458 | 458 | ||
459 | if (offset + *size < tmp) { | 459 | /* Insert new for remainder, assuming it didn't all get unmapped */ |
460 | split = kzalloc(sizeof(*split), GFP_KERNEL); | 460 | if (likely(offset + *size < tmp)) { |
461 | if (!split) | ||
462 | return -ENOMEM; | ||
463 | |||
464 | split->size = tmp - offset - *size; | 461 | split->size = tmp - offset - *size; |
465 | split->iova = dma->iova + offset + *size; | 462 | split->iova = dma->iova + offset + *size; |
466 | split->vaddr = dma->vaddr + offset + *size; | 463 | split->vaddr = dma->vaddr + offset + *size; |
467 | split->prot = dma->prot; | 464 | split->prot = dma->prot; |
468 | vfio_insert_dma(iommu, split); | 465 | vfio_insert_dma(iommu, split); |
469 | } | 466 | } else |
467 | kfree(split); | ||
470 | 468 | ||
471 | return 0; | 469 | return 0; |
472 | } | 470 | } |
@@ -483,7 +481,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, | |||
483 | 481 | ||
484 | if (unmap->iova & mask) | 482 | if (unmap->iova & mask) |
485 | return -EINVAL; | 483 | return -EINVAL; |
486 | if (unmap->size & mask) | 484 | if (!unmap->size || unmap->size & mask) |
487 | return -EINVAL; | 485 | return -EINVAL; |
488 | 486 | ||
489 | WARN_ON(mask & PAGE_MASK); | 487 | WARN_ON(mask & PAGE_MASK); |
@@ -493,7 +491,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, | |||
493 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { | 491 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { |
494 | size = unmap->size; | 492 | size = unmap->size; |
495 | ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma); | 493 | ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma); |
496 | if (ret) | 494 | if (ret || !size) |
497 | break; | 495 | break; |
498 | unmapped += size; | 496 | unmapped += size; |
499 | } | 497 | } |
@@ -635,7 +633,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
635 | if (tmp && tmp->prot == prot && | 633 | if (tmp && tmp->prot == prot && |
636 | tmp->vaddr + tmp->size == vaddr) { | 634 | tmp->vaddr + tmp->size == vaddr) { |
637 | tmp->size += size; | 635 | tmp->size += size; |
638 | |||
639 | iova = tmp->iova; | 636 | iova = tmp->iova; |
640 | size = tmp->size; | 637 | size = tmp->size; |
641 | vaddr = tmp->vaddr; | 638 | vaddr = tmp->vaddr; |
@@ -643,19 +640,28 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
643 | } | 640 | } |
644 | } | 641 | } |
645 | 642 | ||
646 | /* Check if we abut a region above - nothing above ~0 + 1 */ | 643 | /* |
644 | * Check if we abut a region above - nothing above ~0 + 1. | ||
645 | * If we abut above and below, remove and free. If only | ||
646 | * abut above, remove, modify, reinsert. | ||
647 | */ | ||
647 | if (likely(iova + size)) { | 648 | if (likely(iova + size)) { |
648 | struct vfio_dma *tmp; | 649 | struct vfio_dma *tmp; |
649 | |||
650 | tmp = vfio_find_dma(iommu, iova + size, 1); | 650 | tmp = vfio_find_dma(iommu, iova + size, 1); |
651 | if (tmp && tmp->prot == prot && | 651 | if (tmp && tmp->prot == prot && |
652 | tmp->vaddr == vaddr + size) { | 652 | tmp->vaddr == vaddr + size) { |
653 | vfio_remove_dma(iommu, tmp); | 653 | vfio_remove_dma(iommu, tmp); |
654 | if (dma) | 654 | if (dma) { |
655 | dma->size += tmp->size; | 655 | dma->size += tmp->size; |
656 | else | 656 | kfree(tmp); |
657 | } else { | ||
657 | size += tmp->size; | 658 | size += tmp->size; |
658 | kfree(tmp); | 659 | tmp->size = size; |
660 | tmp->iova = iova; | ||
661 | tmp->vaddr = vaddr; | ||
662 | vfio_insert_dma(iommu, tmp); | ||
663 | dma = tmp; | ||
664 | } | ||
659 | } | 665 | } |
660 | } | 666 | } |
661 | 667 | ||
@@ -681,11 +687,10 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
681 | iova = map->iova; | 687 | iova = map->iova; |
682 | size = map->size; | 688 | size = map->size; |
683 | while ((tmp = vfio_find_dma(iommu, iova, size))) { | 689 | while ((tmp = vfio_find_dma(iommu, iova, size))) { |
684 | if (vfio_remove_dma_overlap(iommu, iova, &size, tmp)) { | 690 | int r = vfio_remove_dma_overlap(iommu, iova, |
685 | pr_warn("%s: Error rolling back failed map\n", | 691 | &size, tmp); |
686 | __func__); | 692 | if (WARN_ON(r || !size)) |
687 | break; | 693 | break; |
688 | } | ||
689 | } | 694 | } |
690 | } | 695 | } |
691 | 696 | ||
@@ -813,6 +818,8 @@ static void vfio_iommu_type1_release(void *iommu_data) | |||
813 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | 818 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); |
814 | size_t size = dma->size; | 819 | size_t size = dma->size; |
815 | vfio_remove_dma_overlap(iommu, dma->iova, &size, dma); | 820 | vfio_remove_dma_overlap(iommu, dma->iova, &size, dma); |
821 | if (WARN_ON(!size)) | ||
822 | break; | ||
816 | } | 823 | } |
817 | 824 | ||
818 | iommu_domain_free(iommu->domain); | 825 | iommu_domain_free(iommu->domain); |