aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h1
5 files changed, 124 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0240f108f90e..b311b389bd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -507,14 +507,16 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
507 * amdgpu_gem_va_update_vm -update the bo_va in its VM 507 * amdgpu_gem_va_update_vm -update the bo_va in its VM
508 * 508 *
509 * @adev: amdgpu_device pointer 509 * @adev: amdgpu_device pointer
510 * @vm: vm to update
510 * @bo_va: bo_va to update 511 * @bo_va: bo_va to update
511 * @list: validation list 512 * @list: validation list
512 * @operation: map or unmap 513 * @operation: map, unmap or clear
513 * 514 *
514 * Update the bo_va directly after setting its address. Errors are not 515 * Update the bo_va directly after setting its address. Errors are not
515 * vital here, so they are not reported back to userspace. 516 * vital here, so they are not reported back to userspace.
516 */ 517 */
517static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 518static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
519 struct amdgpu_vm *vm,
518 struct amdgpu_bo_va *bo_va, 520 struct amdgpu_bo_va *bo_va,
519 struct list_head *list, 521 struct list_head *list,
520 uint32_t operation) 522 uint32_t operation)
@@ -529,16 +531,16 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
529 goto error; 531 goto error;
530 } 532 }
531 533
532 r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, 534 r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
533 NULL); 535 NULL);
534 if (r) 536 if (r)
535 goto error; 537 goto error;
536 538
537 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 539 r = amdgpu_vm_update_page_directory(adev, vm);
538 if (r) 540 if (r)
539 goto error; 541 goto error;
540 542
541 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 543 r = amdgpu_vm_clear_freed(adev, vm);
542 if (r) 544 if (r)
543 goto error; 545 goto error;
544 546
@@ -592,6 +594,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
592 switch (args->operation) { 594 switch (args->operation) {
593 case AMDGPU_VA_OP_MAP: 595 case AMDGPU_VA_OP_MAP:
594 case AMDGPU_VA_OP_UNMAP: 596 case AMDGPU_VA_OP_UNMAP:
597 case AMDGPU_VA_OP_CLEAR:
595 break; 598 break;
596 default: 599 default:
597 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 600 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
@@ -600,7 +603,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
600 } 603 }
601 604
602 INIT_LIST_HEAD(&list); 605 INIT_LIST_HEAD(&list);
603 if (!(args->flags & AMDGPU_VM_PAGE_PRT)) { 606 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
607 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
604 gobj = drm_gem_object_lookup(filp, args->handle); 608 gobj = drm_gem_object_lookup(filp, args->handle);
605 if (gobj == NULL) 609 if (gobj == NULL)
606 return -ENOENT; 610 return -ENOENT;
@@ -625,8 +629,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
625 r = -ENOENT; 629 r = -ENOENT;
626 goto error_backoff; 630 goto error_backoff;
627 } 631 }
628 } else { 632 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
629 bo_va = fpriv->prt_va; 633 bo_va = fpriv->prt_va;
634 } else {
635 bo_va = NULL;
630 } 636 }
631 637
632 switch (args->operation) { 638 switch (args->operation) {
@@ -644,11 +650,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
644 case AMDGPU_VA_OP_UNMAP: 650 case AMDGPU_VA_OP_UNMAP:
645 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 651 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
646 break; 652 break;
653
654 case AMDGPU_VA_OP_CLEAR:
655 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
656 args->va_address,
657 args->map_size);
658 break;
647 default: 659 default:
648 break; 660 break;
649 } 661 }
650 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) 662 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
651 amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); 663 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
664 args->operation);
652 665
653error_backoff: 666error_backoff:
654 ttm_eu_backoff_reservation(&ticket, &list); 667 ttm_eu_backoff_reservation(&ticket, &list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 08ccb3d34b21..3e955190f013 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -188,7 +188,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
188 ), 188 ),
189 189
190 TP_fast_assign( 190 TP_fast_assign(
191 __entry->bo = bo_va->bo; 191 __entry->bo = bo_va ? bo_va->bo : NULL;
192 __entry->start = mapping->it.start; 192 __entry->start = mapping->it.start;
193 __entry->last = mapping->it.last; 193 __entry->last = mapping->it.last;
194 __entry->offset = mapping->offset; 194 __entry->offset = mapping->offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 296e985d0b65..b67e94e25cfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1613,6 +1613,105 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1613} 1613}
1614 1614
1615/** 1615/**
1616 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1617 *
1618 * @adev: amdgpu_device pointer
1619 * @vm: VM structure to use
1620 * @saddr: start of the range
1621 * @size: size of the range
1622 *
1623 * Remove all mappings in a range, split them as appropriate.
1624 * Returns 0 for success, error for failure.
1625 */
1626int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1627 struct amdgpu_vm *vm,
1628 uint64_t saddr, uint64_t size)
1629{
1630 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1631 struct interval_tree_node *it;
1632 LIST_HEAD(removed);
1633 uint64_t eaddr;
1634
1635 eaddr = saddr + size - 1;
1636 saddr /= AMDGPU_GPU_PAGE_SIZE;
1637 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1638
1639 /* Allocate all the needed memory */
1640 before = kzalloc(sizeof(*before), GFP_KERNEL);
1641 if (!before)
1642 return -ENOMEM;
1643
1644 after = kzalloc(sizeof(*after), GFP_KERNEL);
1645 if (!after) {
1646 kfree(before);
1647 return -ENOMEM;
1648 }
1649
1650 /* Now gather all removed mappings */
1651 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1652 while (it) {
1653 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1654 it = interval_tree_iter_next(it, saddr, eaddr);
1655
1656 /* Remember mapping split at the start */
1657 if (tmp->it.start < saddr) {
1658 before->it.start = tmp->it.start;;
1659 before->it.last = saddr - 1;
1660 before->offset = tmp->offset;
1661 before->flags = tmp->flags;
1662 list_add(&before->list, &tmp->list);
1663 }
1664
1665 /* Remember mapping split at the end */
1666 if (tmp->it.last > eaddr) {
1667 after->it.start = eaddr + 1;
1668 after->it.last = tmp->it.last;
1669 after->offset = tmp->offset;
1670 after->offset += after->it.start - tmp->it.start;
1671 after->flags = tmp->flags;
1672 list_add(&after->list, &tmp->list);
1673 }
1674
1675 list_del(&tmp->list);
1676 list_add(&tmp->list, &removed);
1677 }
1678
1679 /* And free them up */
1680 list_for_each_entry_safe(tmp, next, &removed, list) {
1681 interval_tree_remove(&tmp->it, &vm->va);
1682 list_del(&tmp->list);
1683
1684 if (tmp->it.start < saddr)
1685 tmp->it.start = saddr;
1686 if (tmp->it.last > eaddr)
1687 tmp->it.last = eaddr;
1688
1689 list_add(&tmp->list, &vm->freed);
1690 trace_amdgpu_vm_bo_unmap(NULL, tmp);
1691 }
1692
1693 /* Insert partial mapping before the range*/
1694 if (before->it.start != before->it.last) {
1695 interval_tree_insert(&before->it, &vm->va);
1696 if (before->flags & AMDGPU_PTE_PRT)
1697 amdgpu_vm_prt_get(adev);
1698 } else {
1699 kfree(before);
1700 }
1701
1702 /* Insert partial mapping after the range */
1703 if (after->it.start != after->it.last) {
1704 interval_tree_insert(&after->it, &vm->va);
1705 if (after->flags & AMDGPU_PTE_PRT)
1706 amdgpu_vm_prt_get(adev);
1707 } else {
1708 kfree(after);
1709 }
1710
1711 return 0;
1712}
1713
1714/**
1616 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 1715 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1617 * 1716 *
1618 * @adev: amdgpu_device pointer 1717 * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 1e5a3b2c7927..95fe47733b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -210,6 +210,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
210int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 210int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
211 struct amdgpu_bo_va *bo_va, 211 struct amdgpu_bo_va *bo_va,
212 uint64_t addr); 212 uint64_t addr);
213int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
214 struct amdgpu_vm *vm,
215 uint64_t saddr, uint64_t size);
213void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 216void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
214 struct amdgpu_bo_va *bo_va); 217 struct amdgpu_bo_va *bo_va);
215 218
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 2c30e324cb12..199f1b46fd2c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -350,6 +350,7 @@ struct drm_amdgpu_gem_op {
350 350
351#define AMDGPU_VA_OP_MAP 1 351#define AMDGPU_VA_OP_MAP 1
352#define AMDGPU_VA_OP_UNMAP 2 352#define AMDGPU_VA_OP_UNMAP 2
353#define AMDGPU_VA_OP_CLEAR 3
353 354
354/* Delay the page table update till the next CS */ 355/* Delay the page table update till the next CS */
355#define AMDGPU_VM_DELAY_UPDATE (1 << 0) 356#define AMDGPU_VM_DELAY_UPDATE (1 << 0)