aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c115
1 files changed, 77 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..68369cf1e318 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
52#endif 52#endif
53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
55#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
55 56
56/** 57/**
57 * amdgpu_uvd_cs_ctx - Command submission parser context 58 * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
81#endif 82#endif
82MODULE_FIRMWARE(FIRMWARE_TONGA); 83MODULE_FIRMWARE(FIRMWARE_TONGA);
83MODULE_FIRMWARE(FIRMWARE_CARRIZO); 84MODULE_FIRMWARE(FIRMWARE_CARRIZO);
85MODULE_FIRMWARE(FIRMWARE_FIJI);
84 86
85static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); 87static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
86static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 88static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
116 case CHIP_TONGA: 118 case CHIP_TONGA:
117 fw_name = FIRMWARE_TONGA; 119 fw_name = FIRMWARE_TONGA;
118 break; 120 break;
121 case CHIP_FIJI:
122 fw_name = FIRMWARE_FIJI;
123 break;
119 case CHIP_CARRIZO: 124 case CHIP_CARRIZO:
120 fw_name = FIRMWARE_CARRIZO; 125 fw_name = FIRMWARE_CARRIZO;
121 break; 126 break;
@@ -283,7 +288,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
283 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 288 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
284 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 289 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
285 if (handle != 0 && adev->uvd.filp[i] == filp) { 290 if (handle != 0 && adev->uvd.filp[i] == filp) {
286 struct amdgpu_fence *fence; 291 struct fence *fence;
287 292
288 amdgpu_uvd_note_usage(adev); 293 amdgpu_uvd_note_usage(adev);
289 294
@@ -293,8 +298,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
293 continue; 298 continue;
294 } 299 }
295 300
296 amdgpu_fence_wait(fence, false); 301 fence_wait(fence, false);
297 amdgpu_fence_unref(&fence); 302 fence_put(fence);
298 303
299 adev->uvd.filp[i] = NULL; 304 adev->uvd.filp[i] = NULL;
300 atomic_set(&adev->uvd.handles[i], 0); 305 atomic_set(&adev->uvd.handles[i], 0);
@@ -375,6 +380,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
375 unsigned fs_in_mb = width_in_mb * height_in_mb; 380 unsigned fs_in_mb = width_in_mb * height_in_mb;
376 381
377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 382 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
383 unsigned min_ctx_size = 0;
378 384
379 image_size = width * height; 385 image_size = width * height;
380 image_size += image_size / 2; 386 image_size += image_size / 2;
@@ -466,6 +472,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
466 472
467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 473 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
468 min_dpb_size = image_size * num_dpb_buffer; 474 min_dpb_size = image_size * num_dpb_buffer;
475 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
476 * 16 * num_dpb_buffer + 52 * 1024;
469 break; 477 break;
470 478
471 default: 479 default:
@@ -486,6 +494,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
486 494
487 buf_sizes[0x1] = dpb_size; 495 buf_sizes[0x1] = dpb_size;
488 buf_sizes[0x2] = image_size; 496 buf_sizes[0x2] = image_size;
497 buf_sizes[0x4] = min_ctx_size;
489 return 0; 498 return 0;
490} 499}
491 500
@@ -504,28 +513,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
504{ 513{
505 struct amdgpu_device *adev = ctx->parser->adev; 514 struct amdgpu_device *adev = ctx->parser->adev;
506 int32_t *msg, msg_type, handle; 515 int32_t *msg, msg_type, handle;
507 struct fence *f;
508 void *ptr; 516 void *ptr;
509 517 long r;
510 int i, r; 518 int i;
511 519
512 if (offset & 0x3F) { 520 if (offset & 0x3F) {
513 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 521 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
514 return -EINVAL; 522 return -EINVAL;
515 } 523 }
516 524
517 f = reservation_object_get_excl(bo->tbo.resv); 525 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
518 if (f) { 526 MAX_SCHEDULE_TIMEOUT);
519 r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); 527 if (r < 0) {
520 if (r) { 528 DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
521 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 529 return r;
522 return r;
523 }
524 } 530 }
525 531
526 r = amdgpu_bo_kmap(bo, &ptr); 532 r = amdgpu_bo_kmap(bo, &ptr);
527 if (r) { 533 if (r) {
528 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); 534 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
529 return r; 535 return r;
530 } 536 }
531 537
@@ -628,6 +634,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
628 return -EINVAL; 634 return -EINVAL;
629 } 635 }
630 636
637 } else if (cmd == 0x206) {
638 if ((end - start) < ctx->buf_sizes[4]) {
639 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
640 (unsigned)(end - start),
641 ctx->buf_sizes[4]);
642 return -EINVAL;
643 }
631 } else if ((cmd != 0x100) && (cmd != 0x204)) { 644 } else if ((cmd != 0x100) && (cmd != 0x204)) {
632 DRM_ERROR("invalid UVD command %X!\n", cmd); 645 DRM_ERROR("invalid UVD command %X!\n", cmd);
633 return -EINVAL; 646 return -EINVAL;
@@ -755,9 +768,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
755 struct amdgpu_uvd_cs_ctx ctx = {}; 768 struct amdgpu_uvd_cs_ctx ctx = {};
756 unsigned buf_sizes[] = { 769 unsigned buf_sizes[] = {
757 [0x00000000] = 2048, 770 [0x00000000] = 2048,
758 [0x00000001] = 32 * 1024 * 1024, 771 [0x00000001] = 0xFFFFFFFF,
759 [0x00000002] = 2048 * 1152 * 3, 772 [0x00000002] = 0xFFFFFFFF,
760 [0x00000003] = 2048, 773 [0x00000003] = 2048,
774 [0x00000004] = 0xFFFFFFFF,
761 }; 775 };
762 struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 776 struct amdgpu_ib *ib = &parser->ibs[ib_idx];
763 int r; 777 int r;
@@ -792,14 +806,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
792 return 0; 806 return 0;
793} 807}
794 808
809static int amdgpu_uvd_free_job(
810 struct amdgpu_cs_parser *sched_job)
811{
812 amdgpu_ib_free(sched_job->adev, sched_job->ibs);
813 kfree(sched_job->ibs);
814 return 0;
815}
816
795static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 817static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
796 struct amdgpu_bo *bo, 818 struct amdgpu_bo *bo,
797 struct amdgpu_fence **fence) 819 struct fence **fence)
798{ 820{
799 struct ttm_validate_buffer tv; 821 struct ttm_validate_buffer tv;
800 struct ww_acquire_ctx ticket; 822 struct ww_acquire_ctx ticket;
801 struct list_head head; 823 struct list_head head;
802 struct amdgpu_ib ib; 824 struct amdgpu_ib *ib = NULL;
825 struct fence *f = NULL;
826 struct amdgpu_device *adev = ring->adev;
803 uint64_t addr; 827 uint64_t addr;
804 int i, r; 828 int i, r;
805 829
@@ -821,34 +845,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
821 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 845 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
822 if (r) 846 if (r)
823 goto err; 847 goto err;
824 848 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
825 r = amdgpu_ib_get(ring, NULL, 64, &ib); 849 if (!ib) {
826 if (r) 850 r = -ENOMEM;
827 goto err; 851 goto err;
852 }
853 r = amdgpu_ib_get(ring, NULL, 64, ib);
854 if (r)
855 goto err1;
828 856
829 addr = amdgpu_bo_gpu_offset(bo); 857 addr = amdgpu_bo_gpu_offset(bo);
830 ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); 858 ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
831 ib.ptr[1] = addr; 859 ib->ptr[1] = addr;
832 ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); 860 ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
833 ib.ptr[3] = addr >> 32; 861 ib->ptr[3] = addr >> 32;
834 ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); 862 ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
835 ib.ptr[5] = 0; 863 ib->ptr[5] = 0;
836 for (i = 6; i < 16; ++i) 864 for (i = 6; i < 16; ++i)
837 ib.ptr[i] = PACKET2(0); 865 ib->ptr[i] = PACKET2(0);
838 ib.length_dw = 16; 866 ib->length_dw = 16;
839 867
840 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 868 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
869 &amdgpu_uvd_free_job,
870 AMDGPU_FENCE_OWNER_UNDEFINED,
871 &f);
841 if (r) 872 if (r)
842 goto err; 873 goto err2;
843 ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
844 874
845 if (fence) 875 ttm_eu_fence_buffer_objects(&ticket, &head, f);
846 *fence = amdgpu_fence_ref(ib.fence);
847 876
848 amdgpu_ib_free(ring->adev, &ib); 877 if (fence)
878 *fence = fence_get(f);
849 amdgpu_bo_unref(&bo); 879 amdgpu_bo_unref(&bo);
850 return 0; 880 fence_put(f);
881 if (amdgpu_enable_scheduler)
882 return 0;
851 883
884 amdgpu_ib_free(ring->adev, ib);
885 kfree(ib);
886 return 0;
887err2:
888 amdgpu_ib_free(ring->adev, ib);
889err1:
890 kfree(ib);
852err: 891err:
853 ttm_eu_backoff_reservation(&ticket, &head); 892 ttm_eu_backoff_reservation(&ticket, &head);
854 return r; 893 return r;
@@ -858,7 +897,7 @@ err:
858 crash the vcpu so just try to emmit a dummy create/destroy msg to 897 crash the vcpu so just try to emmit a dummy create/destroy msg to
859 avoid this */ 898 avoid this */
860int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 899int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
861 struct amdgpu_fence **fence) 900 struct fence **fence)
862{ 901{
863 struct amdgpu_device *adev = ring->adev; 902 struct amdgpu_device *adev = ring->adev;
864 struct amdgpu_bo *bo; 903 struct amdgpu_bo *bo;
@@ -905,7 +944,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905} 944}
906 945
907int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 946int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
908 struct amdgpu_fence **fence) 947 struct fence **fence)
909{ 948{
910 struct amdgpu_device *adev = ring->adev; 949 struct amdgpu_device *adev = ring->adev;
911 struct amdgpu_bo *bo; 950 struct amdgpu_bo *bo;