diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 121 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 10 |
2 files changed, 64 insertions, 67 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d708970244eb..80b5c453f8c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 263 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | 263 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); |
| 264 | return r; | 264 | return r; |
| 265 | } | 265 | } |
| 266 | } | ||
| 266 | 267 | ||
| 267 | ring = &adev->uvd.inst[j].ring; | 268 | ring = &adev->uvd.inst[0].ring; |
| 268 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | 269 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; |
| 269 | r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq, | 270 | r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); |
| 270 | 1, NULL); | 271 | if (r) { |
| 271 | if (r != 0) { | 272 | DRM_ERROR("Failed setting up UVD kernel entity.\n"); |
| 272 | DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); | 273 | return r; |
| 273 | return r; | ||
| 274 | } | ||
| 275 | |||
| 276 | for (i = 0; i < adev->uvd.max_handles; ++i) { | ||
| 277 | atomic_set(&adev->uvd.inst[j].handles[i], 0); | ||
| 278 | adev->uvd.inst[j].filp[i] = NULL; | ||
| 279 | } | ||
| 280 | } | 274 | } |
| 275 | for (i = 0; i < adev->uvd.max_handles; ++i) { | ||
| 276 | atomic_set(&adev->uvd.handles[i], 0); | ||
| 277 | adev->uvd.filp[i] = NULL; | ||
| 278 | } | ||
| 279 | |||
| 281 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | 280 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
| 282 | if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) | 281 | if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
| 283 | adev->uvd.address_64_bit = true; | 282 | adev->uvd.address_64_bit = true; |
| @@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |||
| 306 | { | 305 | { |
| 307 | int i, j; | 306 | int i, j; |
| 308 | 307 | ||
| 308 | drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, | ||
| 309 | &adev->uvd.entity); | ||
| 310 | |||
| 309 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { | 311 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
| 310 | kfree(adev->uvd.inst[j].saved_bo); | 312 | kfree(adev->uvd.inst[j].saved_bo); |
| 311 | 313 | ||
| 312 | drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); | ||
| 313 | |||
| 314 | amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, | 314 | amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, |
| 315 | &adev->uvd.inst[j].gpu_addr, | 315 | &adev->uvd.inst[j].gpu_addr, |
| 316 | (void **)&adev->uvd.inst[j].cpu_addr); | 316 | (void **)&adev->uvd.inst[j].cpu_addr); |
| @@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
| 333 | 333 | ||
| 334 | cancel_delayed_work_sync(&adev->uvd.idle_work); | 334 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
| 335 | 335 | ||
| 336 | /* only valid for physical mode */ | ||
| 337 | if (adev->asic_type < CHIP_POLARIS10) { | ||
| 338 | for (i = 0; i < adev->uvd.max_handles; ++i) | ||
| 339 | if (atomic_read(&adev->uvd.handles[i])) | ||
| 340 | break; | ||
| 341 | |||
| 342 | if (i == adev->uvd.max_handles) | ||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 336 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { | 346 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
| 337 | if (adev->uvd.inst[j].vcpu_bo == NULL) | 347 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
| 338 | continue; | 348 | continue; |
| 339 | 349 | ||
| 340 | /* only valid for physical mode */ | ||
| 341 | if (adev->asic_type < CHIP_POLARIS10) { | ||
| 342 | for (i = 0; i < adev->uvd.max_handles; ++i) | ||
| 343 | if (atomic_read(&adev->uvd.inst[j].handles[i])) | ||
| 344 | break; | ||
| 345 | |||
| 346 | if (i == adev->uvd.max_handles) | ||
| 347 | continue; | ||
| 348 | } | ||
| 349 | |||
| 350 | size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); | 350 | size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); |
| 351 | ptr = adev->uvd.inst[j].cpu_addr; | 351 | ptr = adev->uvd.inst[j].cpu_addr; |
| 352 | 352 | ||
| @@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) | |||
| 398 | 398 | ||
| 399 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | 399 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) |
| 400 | { | 400 | { |
| 401 | struct amdgpu_ring *ring; | 401 | struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; |
| 402 | int i, j, r; | 402 | int i, r; |
| 403 | |||
| 404 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { | ||
| 405 | ring = &adev->uvd.inst[j].ring; | ||
| 406 | 403 | ||
| 407 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 404 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 408 | uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); | 405 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); |
| 409 | if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { | ||
| 410 | struct dma_fence *fence; | ||
| 411 | |||
| 412 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | ||
| 413 | false, &fence); | ||
| 414 | if (r) { | ||
| 415 | DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); | ||
| 416 | continue; | ||
| 417 | } | ||
| 418 | 406 | ||
| 419 | dma_fence_wait(fence, false); | 407 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
| 420 | dma_fence_put(fence); | 408 | struct dma_fence *fence; |
| 421 | 409 | ||
| 422 | adev->uvd.inst[j].filp[i] = NULL; | 410 | r = amdgpu_uvd_get_destroy_msg(ring, handle, false, |
| 423 | atomic_set(&adev->uvd.inst[j].handles[i], 0); | 411 | &fence); |
| 412 | if (r) { | ||
| 413 | DRM_ERROR("Error destroying UVD %d!\n", r); | ||
| 414 | continue; | ||
| 424 | } | 415 | } |
| 416 | |||
| 417 | dma_fence_wait(fence, false); | ||
| 418 | dma_fence_put(fence); | ||
| 419 | |||
| 420 | adev->uvd.filp[i] = NULL; | ||
| 421 | atomic_set(&adev->uvd.handles[i], 0); | ||
| 425 | } | 422 | } |
| 426 | } | 423 | } |
| 427 | } | 424 | } |
| @@ -692,20 +689,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 692 | struct amdgpu_bo *bo, unsigned offset) | 689 | struct amdgpu_bo *bo, unsigned offset) |
| 693 | { | 690 | { |
| 694 | struct amdgpu_device *adev = ctx->parser->adev; | 691 | struct amdgpu_device *adev = ctx->parser->adev; |
| 695 | uint32_t ip_instance = ctx->parser->ring->me; | ||
| 696 | int32_t *msg, msg_type, handle; | 692 | int32_t *msg, msg_type, handle; |
| 697 | void *ptr; | 693 | void *ptr; |
| 698 | long r; | 694 | long r; |
| 699 | int i; | 695 | int i; |
| 700 | 696 | ||
| 701 | if (offset & 0x3F) { | 697 | if (offset & 0x3F) { |
| 702 | DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); | 698 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); |
| 703 | return -EINVAL; | 699 | return -EINVAL; |
| 704 | } | 700 | } |
| 705 | 701 | ||
| 706 | r = amdgpu_bo_kmap(bo, &ptr); | 702 | r = amdgpu_bo_kmap(bo, &ptr); |
| 707 | if (r) { | 703 | if (r) { |
| 708 | DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); | 704 | DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r); |
| 709 | return r; | 705 | return r; |
| 710 | } | 706 | } |
| 711 | 707 | ||
| @@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 715 | handle = msg[2]; | 711 | handle = msg[2]; |
| 716 | 712 | ||
| 717 | if (handle == 0) { | 713 | if (handle == 0) { |
| 718 | DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); | 714 | DRM_ERROR("Invalid UVD handle!\n"); |
| 719 | return -EINVAL; | 715 | return -EINVAL; |
| 720 | } | 716 | } |
| 721 | 717 | ||
| @@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 726 | 722 | ||
| 727 | /* try to alloc a new handle */ | 723 | /* try to alloc a new handle */ |
| 728 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 724 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 729 | if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { | 725 | if (atomic_read(&adev->uvd.handles[i]) == handle) { |
| 730 | DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); | 726 | DRM_ERROR(")Handle 0x%x already in use!\n", |
| 727 | handle); | ||
| 731 | return -EINVAL; | 728 | return -EINVAL; |
| 732 | } | 729 | } |
| 733 | 730 | ||
| 734 | if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { | 731 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { |
| 735 | adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; | 732 | adev->uvd.filp[i] = ctx->parser->filp; |
| 736 | return 0; | 733 | return 0; |
| 737 | } | 734 | } |
| 738 | } | 735 | } |
| 739 | 736 | ||
| 740 | DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); | 737 | DRM_ERROR("No more free UVD handles!\n"); |
| 741 | return -ENOSPC; | 738 | return -ENOSPC; |
| 742 | 739 | ||
| 743 | case 1: | 740 | case 1: |
| @@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 749 | 746 | ||
| 750 | /* validate the handle */ | 747 | /* validate the handle */ |
| 751 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 748 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 752 | if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { | 749 | if (atomic_read(&adev->uvd.handles[i]) == handle) { |
| 753 | if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { | 750 | if (adev->uvd.filp[i] != ctx->parser->filp) { |
| 754 | DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); | 751 | DRM_ERROR("UVD handle collision detected!\n"); |
| 755 | return -EINVAL; | 752 | return -EINVAL; |
| 756 | } | 753 | } |
| 757 | return 0; | 754 | return 0; |
| 758 | } | 755 | } |
| 759 | } | 756 | } |
| 760 | 757 | ||
| 761 | DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); | 758 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); |
| 762 | return -ENOENT; | 759 | return -ENOENT; |
| 763 | 760 | ||
| 764 | case 2: | 761 | case 2: |
| 765 | /* it's a destroy msg, free the handle */ | 762 | /* it's a destroy msg, free the handle */ |
| 766 | for (i = 0; i < adev->uvd.max_handles; ++i) | 763 | for (i = 0; i < adev->uvd.max_handles; ++i) |
| 767 | atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); | 764 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); |
| 768 | amdgpu_bo_kunmap(bo); | 765 | amdgpu_bo_kunmap(bo); |
| 769 | return 0; | 766 | return 0; |
| 770 | 767 | ||
| 771 | default: | 768 | default: |
| 772 | DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); | 769 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
| 773 | return -EINVAL; | 770 | return -EINVAL; |
| 774 | } | 771 | } |
| 775 | BUG(); | 772 | BUG(); |
| @@ -1071,7 +1068,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
| 1071 | if (r) | 1068 | if (r) |
| 1072 | goto err_free; | 1069 | goto err_free; |
| 1073 | 1070 | ||
| 1074 | r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity, | 1071 | r = amdgpu_job_submit(job, &adev->uvd.entity, |
| 1075 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 1072 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
| 1076 | if (r) | 1073 | if (r) |
| 1077 | goto err_free; | 1074 | goto err_free; |
| @@ -1273,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) | |||
| 1273 | * necessarily linear. So we need to count | 1270 | * necessarily linear. So we need to count |
| 1274 | * all non-zero handles. | 1271 | * all non-zero handles. |
| 1275 | */ | 1272 | */ |
| 1276 | if (atomic_read(&adev->uvd.inst->handles[i])) | 1273 | if (atomic_read(&adev->uvd.handles[i])) |
| 1277 | used_handles++; | 1274 | used_handles++; |
| 1278 | } | 1275 | } |
| 1279 | 1276 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index cae3f526216b..66872286ab12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
| @@ -42,12 +42,9 @@ struct amdgpu_uvd_inst { | |||
| 42 | void *cpu_addr; | 42 | void *cpu_addr; |
| 43 | uint64_t gpu_addr; | 43 | uint64_t gpu_addr; |
| 44 | void *saved_bo; | 44 | void *saved_bo; |
| 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | ||
| 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | ||
| 47 | struct amdgpu_ring ring; | 45 | struct amdgpu_ring ring; |
| 48 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; | 46 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; |
| 49 | struct amdgpu_irq_src irq; | 47 | struct amdgpu_irq_src irq; |
| 50 | struct drm_sched_entity entity; | ||
| 51 | uint32_t srbm_soft_reset; | 48 | uint32_t srbm_soft_reset; |
| 52 | }; | 49 | }; |
| 53 | 50 | ||
| @@ -56,10 +53,13 @@ struct amdgpu_uvd { | |||
| 56 | unsigned fw_version; | 53 | unsigned fw_version; |
| 57 | unsigned max_handles; | 54 | unsigned max_handles; |
| 58 | unsigned num_enc_rings; | 55 | unsigned num_enc_rings; |
| 59 | uint8_t num_uvd_inst; | 56 | uint8_t num_uvd_inst; |
| 60 | bool address_64_bit; | 57 | bool address_64_bit; |
| 61 | bool use_ctx_buf; | 58 | bool use_ctx_buf; |
| 62 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; | 59 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; |
| 60 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | ||
| 61 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | ||
| 62 | struct drm_sched_entity entity; | ||
| 63 | struct delayed_work idle_work; | 63 | struct delayed_work idle_work; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
