diff options
author | Junwei Zhang <Jerry.Zhang@amd.com> | 2017-01-16 00:59:01 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-03-29 23:52:56 -0400 |
commit | b85891bd6d1bf887b3398f4c44b7a30b37f4485e (patch) | |
tree | cdc0d0f596927a2c869866bcbfd29007e5ce44d1 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 284710fa6c3a5fddbc0f8c6b3a07861a312c18d2 (diff) |
drm/amdgpu: IOCTL interface for PRT support v4
Till GFX8 we can only enable PRT support globally, but with the next hardware
generation we can do this on a per page basis.
Keep the interface consistent by adding PRT mappings and enable
support globally on current hardware when the first mapping is made.
v2: disable PRT support delayed and on all error paths
v3: PRT and other permissions are mutal exclusive,
PRT mappings don't need a BO.
v4: update PRT mappings durign CS as well, make va_flags 64bit
Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 62 |
1 files changed, 38 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 106cf83c2e6b..3c22656aa1bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -553,6 +553,12 @@ error: | |||
553 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | 553 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
554 | struct drm_file *filp) | 554 | struct drm_file *filp) |
555 | { | 555 | { |
556 | const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE | | ||
557 | AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | | ||
558 | AMDGPU_VM_PAGE_EXECUTABLE; | ||
559 | const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE | | ||
560 | AMDGPU_VM_PAGE_PRT; | ||
561 | |||
556 | struct drm_amdgpu_gem_va *args = data; | 562 | struct drm_amdgpu_gem_va *args = data; |
557 | struct drm_gem_object *gobj; | 563 | struct drm_gem_object *gobj; |
558 | struct amdgpu_device *adev = dev->dev_private; | 564 | struct amdgpu_device *adev = dev->dev_private; |
@@ -563,7 +569,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
563 | struct ttm_validate_buffer tv; | 569 | struct ttm_validate_buffer tv; |
564 | struct ww_acquire_ctx ticket; | 570 | struct ww_acquire_ctx ticket; |
565 | struct list_head list; | 571 | struct list_head list; |
566 | uint32_t invalid_flags, va_flags = 0; | 572 | uint64_t va_flags = 0; |
567 | int r = 0; | 573 | int r = 0; |
568 | 574 | ||
569 | if (!adev->vm_manager.enabled) | 575 | if (!adev->vm_manager.enabled) |
@@ -577,11 +583,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
577 | return -EINVAL; | 583 | return -EINVAL; |
578 | } | 584 | } |
579 | 585 | ||
580 | invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | | 586 | if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { |
581 | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); | 587 | dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n", |
582 | if ((args->flags & invalid_flags)) { | 588 | args->flags); |
583 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | ||
584 | args->flags, invalid_flags); | ||
585 | return -EINVAL; | 589 | return -EINVAL; |
586 | } | 590 | } |
587 | 591 | ||
@@ -595,28 +599,34 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
595 | return -EINVAL; | 599 | return -EINVAL; |
596 | } | 600 | } |
597 | 601 | ||
598 | gobj = drm_gem_object_lookup(filp, args->handle); | ||
599 | if (gobj == NULL) | ||
600 | return -ENOENT; | ||
601 | abo = gem_to_amdgpu_bo(gobj); | ||
602 | INIT_LIST_HEAD(&list); | 602 | INIT_LIST_HEAD(&list); |
603 | tv.bo = &abo->tbo; | 603 | if (!(args->flags & AMDGPU_VM_PAGE_PRT)) { |
604 | tv.shared = false; | 604 | gobj = drm_gem_object_lookup(filp, args->handle); |
605 | list_add(&tv.head, &list); | 605 | if (gobj == NULL) |
606 | return -ENOENT; | ||
607 | abo = gem_to_amdgpu_bo(gobj); | ||
608 | tv.bo = &abo->tbo; | ||
609 | tv.shared = false; | ||
610 | list_add(&tv.head, &list); | ||
611 | } else { | ||
612 | gobj = NULL; | ||
613 | abo = NULL; | ||
614 | } | ||
606 | 615 | ||
607 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); | 616 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
608 | 617 | ||
609 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | 618 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
610 | if (r) { | 619 | if (r) |
611 | drm_gem_object_unreference_unlocked(gobj); | 620 | goto error_unref; |
612 | return r; | ||
613 | } | ||
614 | 621 | ||
615 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); | 622 | if (abo) { |
616 | if (!bo_va) { | 623 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); |
617 | ttm_eu_backoff_reservation(&ticket, &list); | 624 | if (!bo_va) { |
618 | drm_gem_object_unreference_unlocked(gobj); | 625 | r = -ENOENT; |
619 | return -ENOENT; | 626 | goto error_backoff; |
627 | } | ||
628 | } else { | ||
629 | bo_va = fpriv->prt_va; | ||
620 | } | 630 | } |
621 | 631 | ||
622 | switch (args->operation) { | 632 | switch (args->operation) { |
@@ -627,6 +637,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
627 | va_flags |= AMDGPU_PTE_WRITEABLE; | 637 | va_flags |= AMDGPU_PTE_WRITEABLE; |
628 | if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) | 638 | if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) |
629 | va_flags |= AMDGPU_PTE_EXECUTABLE; | 639 | va_flags |= AMDGPU_PTE_EXECUTABLE; |
640 | if (args->flags & AMDGPU_VM_PAGE_PRT) | ||
641 | va_flags |= AMDGPU_PTE_PRT; | ||
630 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, | 642 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, |
631 | args->offset_in_bo, args->map_size, | 643 | args->offset_in_bo, args->map_size, |
632 | va_flags); | 644 | va_flags); |
@@ -637,11 +649,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
637 | default: | 649 | default: |
638 | break; | 650 | break; |
639 | } | 651 | } |
640 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && | 652 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) |
641 | !amdgpu_vm_debug) | ||
642 | amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); | 653 | amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); |
654 | |||
655 | error_backoff: | ||
643 | ttm_eu_backoff_reservation(&ticket, &list); | 656 | ttm_eu_backoff_reservation(&ticket, &list); |
644 | 657 | ||
658 | error_unref: | ||
645 | drm_gem_object_unreference_unlocked(gobj); | 659 | drm_gem_object_unreference_unlocked(gobj); |
646 | return r; | 660 | return r; |
647 | } | 661 | } |