diff options
author | James Zhu <James.Zhu@amd.com> | 2018-05-15 15:31:24 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-05-18 17:08:12 -0400 |
commit | 10dd74eac4dba963bfa97f5092040aa75ff742d6 (patch) | |
tree | b93a33adbbe061247ae98fb024b6100ad86407d9 /drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |
parent | 2bb795f5ba9cd676536858a978b9df06f473af88 (diff) |
drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances
Vega20 has dual-UVD. Need add multiple instances support for uvd.
Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->.
Repurpose amdgpu_ring::me for instance index, and initialize to 0.
There are no any logical changes here.
Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 229 |
1 files changed, 121 insertions, 108 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 02683a039a98..e961492d357a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
127 | const char *fw_name; | 127 | const char *fw_name; |
128 | const struct common_firmware_header *hdr; | 128 | const struct common_firmware_header *hdr; |
129 | unsigned version_major, version_minor, family_id; | 129 | unsigned version_major, version_minor, family_id; |
130 | int i, r; | 130 | int i, j, r; |
131 | 131 | ||
132 | INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); | 132 | INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); |
133 | 133 | ||
@@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
236 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) | 236 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
237 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); | 237 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
238 | 238 | ||
239 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, | 239 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
240 | AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, | ||
241 | &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); | ||
242 | if (r) { | ||
243 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | ||
244 | return r; | ||
245 | } | ||
246 | 240 | ||
247 | ring = &adev->uvd.inst->ring; | 241 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
248 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | 242 | AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, |
249 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, | 243 | &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); |
250 | rq, NULL); | 244 | if (r) { |
251 | if (r != 0) { | 245 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); |
252 | DRM_ERROR("Failed setting up UVD run queue.\n"); | 246 | return r; |
253 | return r; | 247 | } |
254 | } | ||
255 | 248 | ||
256 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 249 | ring = &adev->uvd.inst[j].ring; |
257 | atomic_set(&adev->uvd.inst->handles[i], 0); | 250 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; |
258 | adev->uvd.inst->filp[i] = NULL; | 251 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, |
259 | } | 252 | rq, NULL); |
253 | if (r != 0) { | ||
254 | DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); | ||
255 | return r; | ||
256 | } | ||
260 | 257 | ||
258 | for (i = 0; i < adev->uvd.max_handles; ++i) { | ||
259 | atomic_set(&adev->uvd.inst[j].handles[i], 0); | ||
260 | adev->uvd.inst[j].filp[i] = NULL; | ||
261 | } | ||
262 | } | ||
261 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | 263 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
262 | if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) | 264 | if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
263 | adev->uvd.address_64_bit = true; | 265 | adev->uvd.address_64_bit = true; |
@@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
284 | 286 | ||
285 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | 287 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) |
286 | { | 288 | { |
287 | int i; | 289 | int i, j; |
288 | kfree(adev->uvd.inst->saved_bo); | ||
289 | 290 | ||
290 | drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); | 291 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
292 | kfree(adev->uvd.inst[j].saved_bo); | ||
291 | 293 | ||
292 | amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, | 294 | drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); |
293 | &adev->uvd.inst->gpu_addr, | ||
294 | (void **)&adev->uvd.inst->cpu_addr); | ||
295 | 295 | ||
296 | amdgpu_ring_fini(&adev->uvd.inst->ring); | 296 | amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, |
297 | &adev->uvd.inst[j].gpu_addr, | ||
298 | (void **)&adev->uvd.inst[j].cpu_addr); | ||
297 | 299 | ||
298 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) | 300 | amdgpu_ring_fini(&adev->uvd.inst[j].ring); |
299 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); | ||
300 | 301 | ||
302 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) | ||
303 | amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); | ||
304 | } | ||
301 | release_firmware(adev->uvd.fw); | 305 | release_firmware(adev->uvd.fw); |
302 | 306 | ||
303 | return 0; | 307 | return 0; |
@@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
307 | { | 311 | { |
308 | unsigned size; | 312 | unsigned size; |
309 | void *ptr; | 313 | void *ptr; |
310 | int i; | 314 | int i, j; |
311 | 315 | ||
312 | if (adev->uvd.inst->vcpu_bo == NULL) | 316 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
313 | return 0; | 317 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
318 | continue; | ||
314 | 319 | ||
315 | cancel_delayed_work_sync(&adev->uvd.inst->idle_work); | 320 | cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); |
316 | 321 | ||
317 | /* only valid for physical mode */ | 322 | /* only valid for physical mode */ |
318 | if (adev->asic_type < CHIP_POLARIS10) { | 323 | if (adev->asic_type < CHIP_POLARIS10) { |
319 | for (i = 0; i < adev->uvd.max_handles; ++i) | 324 | for (i = 0; i < adev->uvd.max_handles; ++i) |
320 | if (atomic_read(&adev->uvd.inst->handles[i])) | 325 | if (atomic_read(&adev->uvd.inst[j].handles[i])) |
321 | break; | 326 | break; |
322 | 327 | ||
323 | if (i == adev->uvd.max_handles) | 328 | if (i == adev->uvd.max_handles) |
324 | return 0; | 329 | continue; |
325 | } | 330 | } |
326 | |||
327 | size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); | ||
328 | ptr = adev->uvd.inst->cpu_addr; | ||
329 | 331 | ||
330 | adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); | 332 | size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); |
331 | if (!adev->uvd.inst->saved_bo) | 333 | ptr = adev->uvd.inst[j].cpu_addr; |
332 | return -ENOMEM; | ||
333 | 334 | ||
334 | memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); | 335 | adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); |
336 | if (!adev->uvd.inst[j].saved_bo) | ||
337 | return -ENOMEM; | ||
335 | 338 | ||
339 | memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); | ||
340 | } | ||
336 | return 0; | 341 | return 0; |
337 | } | 342 | } |
338 | 343 | ||
@@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) | |||
340 | { | 345 | { |
341 | unsigned size; | 346 | unsigned size; |
342 | void *ptr; | 347 | void *ptr; |
348 | int i; | ||
343 | 349 | ||
344 | if (adev->uvd.inst->vcpu_bo == NULL) | 350 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
345 | return -EINVAL; | 351 | if (adev->uvd.inst[i].vcpu_bo == NULL) |
352 | return -EINVAL; | ||
346 | 353 | ||
347 | size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); | 354 | size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); |
348 | ptr = adev->uvd.inst->cpu_addr; | 355 | ptr = adev->uvd.inst[i].cpu_addr; |
349 | 356 | ||
350 | if (adev->uvd.inst->saved_bo != NULL) { | 357 | if (adev->uvd.inst[i].saved_bo != NULL) { |
351 | memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); | 358 | memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); |
352 | kfree(adev->uvd.inst->saved_bo); | 359 | kfree(adev->uvd.inst[i].saved_bo); |
353 | adev->uvd.inst->saved_bo = NULL; | 360 | adev->uvd.inst[i].saved_bo = NULL; |
354 | } else { | 361 | } else { |
355 | const struct common_firmware_header *hdr; | 362 | const struct common_firmware_header *hdr; |
356 | unsigned offset; | 363 | unsigned offset; |
357 | 364 | ||
358 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | 365 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
359 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | 366 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
360 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | 367 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
361 | memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, | 368 | memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, |
362 | le32_to_cpu(hdr->ucode_size_bytes)); | 369 | le32_to_cpu(hdr->ucode_size_bytes)); |
363 | size -= le32_to_cpu(hdr->ucode_size_bytes); | 370 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
364 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | 371 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
372 | } | ||
373 | memset_io(ptr, 0, size); | ||
374 | /* to restore uvd fence seq */ | ||
375 | amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); | ||
365 | } | 376 | } |
366 | memset_io(ptr, 0, size); | ||
367 | /* to restore uvd fence seq */ | ||
368 | amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); | ||
369 | } | 377 | } |
370 | |||
371 | return 0; | 378 | return 0; |
372 | } | 379 | } |
373 | 380 | ||
374 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | 381 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) |
375 | { | 382 | { |
376 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; | 383 | struct amdgpu_ring *ring; |
377 | int i, r; | 384 | int i, j, r; |
378 | 385 | ||
379 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 386 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
380 | uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); | 387 | ring = &adev->uvd.inst[j].ring; |
381 | if (handle != 0 && adev->uvd.inst->filp[i] == filp) { | ||
382 | struct dma_fence *fence; | ||
383 | |||
384 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | ||
385 | false, &fence); | ||
386 | if (r) { | ||
387 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | ||
388 | continue; | ||
389 | } | ||
390 | 388 | ||
391 | dma_fence_wait(fence, false); | 389 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
392 | dma_fence_put(fence); | 390 | uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); |
391 | if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { | ||
392 | struct dma_fence *fence; | ||
393 | |||
394 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | ||
395 | false, &fence); | ||
396 | if (r) { | ||
397 | DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); | ||
398 | continue; | ||
399 | } | ||
393 | 400 | ||
394 | adev->uvd.inst->filp[i] = NULL; | 401 | dma_fence_wait(fence, false); |
395 | atomic_set(&adev->uvd.inst->handles[i], 0); | 402 | dma_fence_put(fence); |
403 | |||
404 | adev->uvd.inst[j].filp[i] = NULL; | ||
405 | atomic_set(&adev->uvd.inst[j].handles[i], 0); | ||
406 | } | ||
396 | } | 407 | } |
397 | } | 408 | } |
398 | } | 409 | } |
@@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
667 | void *ptr; | 678 | void *ptr; |
668 | long r; | 679 | long r; |
669 | int i; | 680 | int i; |
681 | uint32_t ip_instance = ctx->parser->job->ring->me; | ||
670 | 682 | ||
671 | if (offset & 0x3F) { | 683 | if (offset & 0x3F) { |
672 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | 684 | DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); |
673 | return -EINVAL; | 685 | return -EINVAL; |
674 | } | 686 | } |
675 | 687 | ||
676 | r = amdgpu_bo_kmap(bo, &ptr); | 688 | r = amdgpu_bo_kmap(bo, &ptr); |
677 | if (r) { | 689 | if (r) { |
678 | DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); | 690 | DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); |
679 | return r; | 691 | return r; |
680 | } | 692 | } |
681 | 693 | ||
@@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
685 | handle = msg[2]; | 697 | handle = msg[2]; |
686 | 698 | ||
687 | if (handle == 0) { | 699 | if (handle == 0) { |
688 | DRM_ERROR("Invalid UVD handle!\n"); | 700 | DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); |
689 | return -EINVAL; | 701 | return -EINVAL; |
690 | } | 702 | } |
691 | 703 | ||
@@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
696 | 708 | ||
697 | /* try to alloc a new handle */ | 709 | /* try to alloc a new handle */ |
698 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 710 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
699 | if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { | 711 | if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { |
700 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | 712 | DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); |
701 | return -EINVAL; | 713 | return -EINVAL; |
702 | } | 714 | } |
703 | 715 | ||
704 | if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { | 716 | if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { |
705 | adev->uvd.inst->filp[i] = ctx->parser->filp; | 717 | adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; |
706 | return 0; | 718 | return 0; |
707 | } | 719 | } |
708 | } | 720 | } |
709 | 721 | ||
710 | DRM_ERROR("No more free UVD handles!\n"); | 722 | DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); |
711 | return -ENOSPC; | 723 | return -ENOSPC; |
712 | 724 | ||
713 | case 1: | 725 | case 1: |
@@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
719 | 731 | ||
720 | /* validate the handle */ | 732 | /* validate the handle */ |
721 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 733 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
722 | if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { | 734 | if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { |
723 | if (adev->uvd.inst->filp[i] != ctx->parser->filp) { | 735 | if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { |
724 | DRM_ERROR("UVD handle collision detected!\n"); | 736 | DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); |
725 | return -EINVAL; | 737 | return -EINVAL; |
726 | } | 738 | } |
727 | return 0; | 739 | return 0; |
728 | } | 740 | } |
729 | } | 741 | } |
730 | 742 | ||
731 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); | 743 | DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); |
732 | return -ENOENT; | 744 | return -ENOENT; |
733 | 745 | ||
734 | case 2: | 746 | case 2: |
735 | /* it's a destroy msg, free the handle */ | 747 | /* it's a destroy msg, free the handle */ |
736 | for (i = 0; i < adev->uvd.max_handles; ++i) | 748 | for (i = 0; i < adev->uvd.max_handles; ++i) |
737 | atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); | 749 | atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); |
738 | amdgpu_bo_kunmap(bo); | 750 | amdgpu_bo_kunmap(bo); |
739 | return 0; | 751 | return 0; |
740 | 752 | ||
741 | default: | 753 | default: |
742 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | 754 | DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); |
743 | return -EINVAL; | 755 | return -EINVAL; |
744 | } | 756 | } |
745 | BUG(); | 757 | BUG(); |
@@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
1043 | if (r) | 1055 | if (r) |
1044 | goto err_free; | 1056 | goto err_free; |
1045 | 1057 | ||
1046 | r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, | 1058 | r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, |
1047 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 1059 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
1048 | if (r) | 1060 | if (r) |
1049 | goto err_free; | 1061 | goto err_free; |
@@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1189 | { | 1201 | { |
1190 | struct dma_fence *fence; | 1202 | struct dma_fence *fence; |
1191 | long r; | 1203 | long r; |
1204 | uint32_t ip_instance = ring->me; | ||
1192 | 1205 | ||
1193 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | 1206 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); |
1194 | if (r) { | 1207 | if (r) { |
1195 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | 1208 | DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); |
1196 | goto error; | 1209 | goto error; |
1197 | } | 1210 | } |
1198 | 1211 | ||
1199 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); | 1212 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); |
1200 | if (r) { | 1213 | if (r) { |
1201 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | 1214 | DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); |
1202 | goto error; | 1215 | goto error; |
1203 | } | 1216 | } |
1204 | 1217 | ||
1205 | r = dma_fence_wait_timeout(fence, false, timeout); | 1218 | r = dma_fence_wait_timeout(fence, false, timeout); |
1206 | if (r == 0) { | 1219 | if (r == 0) { |
1207 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 1220 | DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); |
1208 | r = -ETIMEDOUT; | 1221 | r = -ETIMEDOUT; |
1209 | } else if (r < 0) { | 1222 | } else if (r < 0) { |
1210 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 1223 | DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); |
1211 | } else { | 1224 | } else { |
1212 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | 1225 | DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); |
1213 | r = 0; | 1226 | r = 0; |
1214 | } | 1227 | } |
1215 | 1228 | ||