diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 102 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 19 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 27 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 25 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 77 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 135 |
9 files changed, 205 insertions, 194 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d09fcab2398f..1070f4042cbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
| 376 | struct amdgpu_device *adev = ring->adev; | 376 | struct amdgpu_device *adev = ring->adev; |
| 377 | uint64_t index; | 377 | uint64_t index; |
| 378 | 378 | ||
| 379 | if (ring != &adev->uvd.ring) { | 379 | if (ring != &adev->uvd.inst->ring) { |
| 380 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; | 380 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; |
| 381 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); | 381 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); |
| 382 | } else { | 382 | } else { |
| 383 | /* put fence directly behind firmware */ | 383 | /* put fence directly behind firmware */ |
| 384 | index = ALIGN(adev->uvd.fw->size, 8); | 384 | index = ALIGN(adev->uvd.fw->size, 8); |
| 385 | ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; | 385 | ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; |
| 386 | ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; | 386 | ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; |
| 387 | } | 387 | } |
| 388 | amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); | 388 | amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); |
| 389 | amdgpu_irq_get(adev, irq_src, irq_type); | 389 | amdgpu_irq_get(adev, irq_src, irq_type); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index eb4785e51573..5620ed291107 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 348 | break; | 348 | break; |
| 349 | case AMDGPU_HW_IP_UVD: | 349 | case AMDGPU_HW_IP_UVD: |
| 350 | type = AMD_IP_BLOCK_TYPE_UVD; | 350 | type = AMD_IP_BLOCK_TYPE_UVD; |
| 351 | ring_mask = adev->uvd.ring.ready ? 1 : 0; | 351 | ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; |
| 352 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; | 352 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; |
| 353 | ib_size_alignment = 16; | 353 | ib_size_alignment = 16; |
| 354 | break; | 354 | break; |
| @@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 362 | case AMDGPU_HW_IP_UVD_ENC: | 362 | case AMDGPU_HW_IP_UVD_ENC: |
| 363 | type = AMD_IP_BLOCK_TYPE_UVD; | 363 | type = AMD_IP_BLOCK_TYPE_UVD; |
| 364 | for (i = 0; i < adev->uvd.num_enc_rings; i++) | 364 | for (i = 0; i < adev->uvd.num_enc_rings; i++) |
| 365 | ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); | 365 | ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); |
| 366 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; | 366 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; |
| 367 | ib_size_alignment = 1; | 367 | ib_size_alignment = 1; |
| 368 | break; | 368 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 262c1267249e..2458d385e55a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | |||
| @@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, | |||
| 77 | *out_ring = &adev->sdma.instance[ring].ring; | 77 | *out_ring = &adev->sdma.instance[ring].ring; |
| 78 | break; | 78 | break; |
| 79 | case AMDGPU_HW_IP_UVD: | 79 | case AMDGPU_HW_IP_UVD: |
| 80 | *out_ring = &adev->uvd.ring; | 80 | *out_ring = &adev->uvd.inst->ring; |
| 81 | break; | 81 | break; |
| 82 | case AMDGPU_HW_IP_VCE: | 82 | case AMDGPU_HW_IP_VCE: |
| 83 | *out_ring = &adev->vce.ring[ring]; | 83 | *out_ring = &adev->vce.ring[ring]; |
| 84 | break; | 84 | break; |
| 85 | case AMDGPU_HW_IP_UVD_ENC: | 85 | case AMDGPU_HW_IP_UVD_ENC: |
| 86 | *out_ring = &adev->uvd.ring_enc[ring]; | 86 | *out_ring = &adev->uvd.inst->ring_enc[ring]; |
| 87 | break; | 87 | break; |
| 88 | case AMDGPU_HW_IP_VCN_DEC: | 88 | case AMDGPU_HW_IP_VCN_DEC: |
| 89 | *out_ring = &adev->vcn.ring_dec; | 89 | *out_ring = &adev->vcn.ring_dec; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index fd1e9cd65066..02683a039a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 129 | unsigned version_major, version_minor, family_id; | 129 | unsigned version_major, version_minor, family_id; |
| 130 | int i, r; | 130 | int i, r; |
| 131 | 131 | ||
| 132 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | 132 | INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); |
| 133 | 133 | ||
| 134 | switch (adev->asic_type) { | 134 | switch (adev->asic_type) { |
| 135 | #ifdef CONFIG_DRM_AMDGPU_CIK | 135 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| @@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 237 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); | 237 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
| 238 | 238 | ||
| 239 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, | 239 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
| 240 | AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, | 240 | AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, |
| 241 | &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); | 241 | &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); |
| 242 | if (r) { | 242 | if (r) { |
| 243 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | 243 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); |
| 244 | return r; | 244 | return r; |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | ring = &adev->uvd.ring; | 247 | ring = &adev->uvd.inst->ring; |
| 248 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | 248 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; |
| 249 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, | 249 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, |
| 250 | rq, NULL); | 250 | rq, NULL); |
| 251 | if (r != 0) { | 251 | if (r != 0) { |
| 252 | DRM_ERROR("Failed setting up UVD run queue.\n"); | 252 | DRM_ERROR("Failed setting up UVD run queue.\n"); |
| @@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 256 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 257 | atomic_set(&adev->uvd.handles[i], 0); | 257 | atomic_set(&adev->uvd.inst->handles[i], 0); |
| 258 | adev->uvd.filp[i] = NULL; | 258 | adev->uvd.inst->filp[i] = NULL; |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | 261 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
| @@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 285 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | 285 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) |
| 286 | { | 286 | { |
| 287 | int i; | 287 | int i; |
| 288 | kfree(adev->uvd.saved_bo); | 288 | kfree(adev->uvd.inst->saved_bo); |
| 289 | 289 | ||
| 290 | drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); | 290 | drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); |
| 291 | 291 | ||
| 292 | amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, | 292 | amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, |
| 293 | &adev->uvd.gpu_addr, | 293 | &adev->uvd.inst->gpu_addr, |
| 294 | (void **)&adev->uvd.cpu_addr); | 294 | (void **)&adev->uvd.inst->cpu_addr); |
| 295 | 295 | ||
| 296 | amdgpu_ring_fini(&adev->uvd.ring); | 296 | amdgpu_ring_fini(&adev->uvd.inst->ring); |
| 297 | 297 | ||
| 298 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) | 298 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) |
| 299 | amdgpu_ring_fini(&adev->uvd.ring_enc[i]); | 299 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); |
| 300 | 300 | ||
| 301 | release_firmware(adev->uvd.fw); | 301 | release_firmware(adev->uvd.fw); |
| 302 | 302 | ||
| @@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
| 309 | void *ptr; | 309 | void *ptr; |
| 310 | int i; | 310 | int i; |
| 311 | 311 | ||
| 312 | if (adev->uvd.vcpu_bo == NULL) | 312 | if (adev->uvd.inst->vcpu_bo == NULL) |
| 313 | return 0; | 313 | return 0; |
| 314 | 314 | ||
| 315 | cancel_delayed_work_sync(&adev->uvd.idle_work); | 315 | cancel_delayed_work_sync(&adev->uvd.inst->idle_work); |
| 316 | 316 | ||
| 317 | /* only valid for physical mode */ | 317 | /* only valid for physical mode */ |
| 318 | if (adev->asic_type < CHIP_POLARIS10) { | 318 | if (adev->asic_type < CHIP_POLARIS10) { |
| 319 | for (i = 0; i < adev->uvd.max_handles; ++i) | 319 | for (i = 0; i < adev->uvd.max_handles; ++i) |
| 320 | if (atomic_read(&adev->uvd.handles[i])) | 320 | if (atomic_read(&adev->uvd.inst->handles[i])) |
| 321 | break; | 321 | break; |
| 322 | 322 | ||
| 323 | if (i == adev->uvd.max_handles) | 323 | if (i == adev->uvd.max_handles) |
| 324 | return 0; | 324 | return 0; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | 327 | size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); |
| 328 | ptr = adev->uvd.cpu_addr; | 328 | ptr = adev->uvd.inst->cpu_addr; |
| 329 | 329 | ||
| 330 | adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | 330 | adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); |
| 331 | if (!adev->uvd.saved_bo) | 331 | if (!adev->uvd.inst->saved_bo) |
| 332 | return -ENOMEM; | 332 | return -ENOMEM; |
| 333 | 333 | ||
| 334 | memcpy_fromio(adev->uvd.saved_bo, ptr, size); | 334 | memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); |
| 335 | 335 | ||
| 336 | return 0; | 336 | return 0; |
| 337 | } | 337 | } |
| @@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) | |||
| 341 | unsigned size; | 341 | unsigned size; |
| 342 | void *ptr; | 342 | void *ptr; |
| 343 | 343 | ||
| 344 | if (adev->uvd.vcpu_bo == NULL) | 344 | if (adev->uvd.inst->vcpu_bo == NULL) |
| 345 | return -EINVAL; | 345 | return -EINVAL; |
| 346 | 346 | ||
| 347 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | 347 | size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); |
| 348 | ptr = adev->uvd.cpu_addr; | 348 | ptr = adev->uvd.inst->cpu_addr; |
| 349 | 349 | ||
| 350 | if (adev->uvd.saved_bo != NULL) { | 350 | if (adev->uvd.inst->saved_bo != NULL) { |
| 351 | memcpy_toio(ptr, adev->uvd.saved_bo, size); | 351 | memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); |
| 352 | kfree(adev->uvd.saved_bo); | 352 | kfree(adev->uvd.inst->saved_bo); |
| 353 | adev->uvd.saved_bo = NULL; | 353 | adev->uvd.inst->saved_bo = NULL; |
| 354 | } else { | 354 | } else { |
| 355 | const struct common_firmware_header *hdr; | 355 | const struct common_firmware_header *hdr; |
| 356 | unsigned offset; | 356 | unsigned offset; |
| @@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) | |||
| 358 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | 358 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
| 359 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | 359 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
| 360 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | 360 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
| 361 | memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, | 361 | memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, |
| 362 | le32_to_cpu(hdr->ucode_size_bytes)); | 362 | le32_to_cpu(hdr->ucode_size_bytes)); |
| 363 | size -= le32_to_cpu(hdr->ucode_size_bytes); | 363 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
| 364 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | 364 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
| 365 | } | 365 | } |
| 366 | memset_io(ptr, 0, size); | 366 | memset_io(ptr, 0, size); |
| 367 | /* to restore uvd fence seq */ | 367 | /* to restore uvd fence seq */ |
| 368 | amdgpu_fence_driver_force_completion(&adev->uvd.ring); | 368 | amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | return 0; | 371 | return 0; |
| @@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) | |||
| 373 | 373 | ||
| 374 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | 374 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) |
| 375 | { | 375 | { |
| 376 | struct amdgpu_ring *ring = &adev->uvd.ring; | 376 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 377 | int i, r; | 377 | int i, r; |
| 378 | 378 | ||
| 379 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 379 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 380 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | 380 | uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); |
| 381 | if (handle != 0 && adev->uvd.filp[i] == filp) { | 381 | if (handle != 0 && adev->uvd.inst->filp[i] == filp) { |
| 382 | struct dma_fence *fence; | 382 | struct dma_fence *fence; |
| 383 | 383 | ||
| 384 | r = amdgpu_uvd_get_destroy_msg(ring, handle, | 384 | r = amdgpu_uvd_get_destroy_msg(ring, handle, |
| @@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
| 391 | dma_fence_wait(fence, false); | 391 | dma_fence_wait(fence, false); |
| 392 | dma_fence_put(fence); | 392 | dma_fence_put(fence); |
| 393 | 393 | ||
| 394 | adev->uvd.filp[i] = NULL; | 394 | adev->uvd.inst->filp[i] = NULL; |
| 395 | atomic_set(&adev->uvd.handles[i], 0); | 395 | atomic_set(&adev->uvd.inst->handles[i], 0); |
| 396 | } | 396 | } |
| 397 | } | 397 | } |
| 398 | } | 398 | } |
| @@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 696 | 696 | ||
| 697 | /* try to alloc a new handle */ | 697 | /* try to alloc a new handle */ |
| 698 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 698 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 699 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | 699 | if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { |
| 700 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | 700 | DRM_ERROR("Handle 0x%x already in use!\n", handle); |
| 701 | return -EINVAL; | 701 | return -EINVAL; |
| 702 | } | 702 | } |
| 703 | 703 | ||
| 704 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | 704 | if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { |
| 705 | adev->uvd.filp[i] = ctx->parser->filp; | 705 | adev->uvd.inst->filp[i] = ctx->parser->filp; |
| 706 | return 0; | 706 | return 0; |
| 707 | } | 707 | } |
| 708 | } | 708 | } |
| @@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 719 | 719 | ||
| 720 | /* validate the handle */ | 720 | /* validate the handle */ |
| 721 | for (i = 0; i < adev->uvd.max_handles; ++i) { | 721 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
| 722 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | 722 | if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { |
| 723 | if (adev->uvd.filp[i] != ctx->parser->filp) { | 723 | if (adev->uvd.inst->filp[i] != ctx->parser->filp) { |
| 724 | DRM_ERROR("UVD handle collision detected!\n"); | 724 | DRM_ERROR("UVD handle collision detected!\n"); |
| 725 | return -EINVAL; | 725 | return -EINVAL; |
| 726 | } | 726 | } |
| @@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
| 734 | case 2: | 734 | case 2: |
| 735 | /* it's a destroy msg, free the handle */ | 735 | /* it's a destroy msg, free the handle */ |
| 736 | for (i = 0; i < adev->uvd.max_handles; ++i) | 736 | for (i = 0; i < adev->uvd.max_handles; ++i) |
| 737 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | 737 | atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); |
| 738 | amdgpu_bo_kunmap(bo); | 738 | amdgpu_bo_kunmap(bo); |
| 739 | return 0; | 739 | return 0; |
| 740 | 740 | ||
| @@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | |||
| 810 | } | 810 | } |
| 811 | 811 | ||
| 812 | if ((cmd == 0 || cmd == 0x3) && | 812 | if ((cmd == 0 || cmd == 0x3) && |
| 813 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { | 813 | (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { |
| 814 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | 814 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", |
| 815 | start, end); | 815 | start, end); |
| 816 | return -EINVAL; | 816 | return -EINVAL; |
| @@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
| 1043 | if (r) | 1043 | if (r) |
| 1044 | goto err_free; | 1044 | goto err_free; |
| 1045 | 1045 | ||
| 1046 | r = amdgpu_job_submit(job, ring, &adev->uvd.entity, | 1046 | r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, |
| 1047 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 1047 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
| 1048 | if (r) | 1048 | if (r) |
| 1049 | goto err_free; | 1049 | goto err_free; |
| @@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
| 1131 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | 1131 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) |
| 1132 | { | 1132 | { |
| 1133 | struct amdgpu_device *adev = | 1133 | struct amdgpu_device *adev = |
| 1134 | container_of(work, struct amdgpu_device, uvd.idle_work.work); | 1134 | container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); |
| 1135 | unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); | 1135 | unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring); |
| 1136 | 1136 | ||
| 1137 | if (fences == 0) { | 1137 | if (fences == 0) { |
| 1138 | if (adev->pm.dpm_enabled) { | 1138 | if (adev->pm.dpm_enabled) { |
| @@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |||
| 1146 | AMD_CG_STATE_GATE); | 1146 | AMD_CG_STATE_GATE); |
| 1147 | } | 1147 | } |
| 1148 | } else { | 1148 | } else { |
| 1149 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); | 1149 | schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); |
| 1150 | } | 1150 | } |
| 1151 | } | 1151 | } |
| 1152 | 1152 | ||
| @@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
| 1158 | if (amdgpu_sriov_vf(adev)) | 1158 | if (amdgpu_sriov_vf(adev)) |
| 1159 | return; | 1159 | return; |
| 1160 | 1160 | ||
| 1161 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | 1161 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); |
| 1162 | if (set_clocks) { | 1162 | if (set_clocks) { |
| 1163 | if (adev->pm.dpm_enabled) { | 1163 | if (adev->pm.dpm_enabled) { |
| 1164 | amdgpu_dpm_enable_uvd(adev, true); | 1164 | amdgpu_dpm_enable_uvd(adev, true); |
| @@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
| 1175 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | 1175 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) |
| 1176 | { | 1176 | { |
| 1177 | if (!amdgpu_sriov_vf(ring->adev)) | 1177 | if (!amdgpu_sriov_vf(ring->adev)) |
| 1178 | schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); | 1178 | schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); |
| 1179 | } | 1179 | } |
| 1180 | 1180 | ||
| 1181 | /** | 1181 | /** |
| @@ -1209,7 +1209,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
| 1209 | } else if (r < 0) { | 1209 | } else if (r < 0) { |
| 1210 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 1210 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
| 1211 | } else { | 1211 | } else { |
| 1212 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | 1212 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
| 1213 | r = 0; | 1213 | r = 0; |
| 1214 | } | 1214 | } |
| 1215 | 1215 | ||
| @@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) | |||
| 1237 | * necessarily linear. So we need to count | 1237 | * necessarily linear. So we need to count |
| 1238 | * all non-zero handles. | 1238 | * all non-zero handles. |
| 1239 | */ | 1239 | */ |
| 1240 | if (atomic_read(&adev->uvd.handles[i])) | 1240 | if (atomic_read(&adev->uvd.inst->handles[i])) |
| 1241 | used_handles++; | 1241 | used_handles++; |
| 1242 | } | 1242 | } |
| 1243 | 1243 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 32ea20b99e53..b1579fba134c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
| @@ -31,30 +31,37 @@ | |||
| 31 | #define AMDGPU_UVD_SESSION_SIZE (50*1024) | 31 | #define AMDGPU_UVD_SESSION_SIZE (50*1024) |
| 32 | #define AMDGPU_UVD_FIRMWARE_OFFSET 256 | 32 | #define AMDGPU_UVD_FIRMWARE_OFFSET 256 |
| 33 | 33 | ||
| 34 | #define AMDGPU_MAX_UVD_INSTANCES 2 | ||
| 35 | |||
| 34 | #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ | 36 | #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ |
| 35 | (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ | 37 | (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ |
| 36 | 8) - AMDGPU_UVD_FIRMWARE_OFFSET) | 38 | 8) - AMDGPU_UVD_FIRMWARE_OFFSET) |
| 37 | 39 | ||
| 38 | struct amdgpu_uvd { | 40 | struct amdgpu_uvd_inst { |
| 39 | struct amdgpu_bo *vcpu_bo; | 41 | struct amdgpu_bo *vcpu_bo; |
| 40 | void *cpu_addr; | 42 | void *cpu_addr; |
| 41 | uint64_t gpu_addr; | 43 | uint64_t gpu_addr; |
| 42 | unsigned fw_version; | ||
| 43 | void *saved_bo; | 44 | void *saved_bo; |
| 44 | unsigned max_handles; | ||
| 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
| 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
| 47 | struct delayed_work idle_work; | 47 | struct delayed_work idle_work; |
| 48 | const struct firmware *fw; /* UVD firmware */ | ||
| 49 | struct amdgpu_ring ring; | 48 | struct amdgpu_ring ring; |
| 50 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; | 49 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; |
| 51 | struct amdgpu_irq_src irq; | 50 | struct amdgpu_irq_src irq; |
| 52 | bool address_64_bit; | ||
| 53 | bool use_ctx_buf; | ||
| 54 | struct drm_sched_entity entity; | 51 | struct drm_sched_entity entity; |
| 55 | struct drm_sched_entity entity_enc; | 52 | struct drm_sched_entity entity_enc; |
| 56 | uint32_t srbm_soft_reset; | 53 | uint32_t srbm_soft_reset; |
| 54 | }; | ||
| 55 | |||
| 56 | struct amdgpu_uvd { | ||
| 57 | const struct firmware *fw; /* UVD firmware */ | ||
| 58 | unsigned fw_version; | ||
| 59 | unsigned max_handles; | ||
| 57 | unsigned num_enc_rings; | 60 | unsigned num_enc_rings; |
| 61 | uint8_t num_uvd_inst; | ||
| 62 | bool address_64_bit; | ||
| 63 | bool use_ctx_buf; | ||
| 64 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; | ||
| 58 | }; | 65 | }; |
| 59 | 66 | ||
| 60 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | 67 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 87cbb142dd0b..5f22135de77f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
| @@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 93 | static int uvd_v4_2_early_init(void *handle) | 93 | static int uvd_v4_2_early_init(void *handle) |
| 94 | { | 94 | { |
| 95 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 95 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 96 | adev->uvd.num_uvd_inst = 1; | ||
| 96 | 97 | ||
| 97 | uvd_v4_2_set_ring_funcs(adev); | 98 | uvd_v4_2_set_ring_funcs(adev); |
| 98 | uvd_v4_2_set_irq_funcs(adev); | 99 | uvd_v4_2_set_irq_funcs(adev); |
| @@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) | |||
| 107 | int r; | 108 | int r; |
| 108 | 109 | ||
| 109 | /* UVD TRAP */ | 110 | /* UVD TRAP */ |
| 110 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); | 111 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); |
| 111 | if (r) | 112 | if (r) |
| 112 | return r; | 113 | return r; |
| 113 | 114 | ||
| @@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) | |||
| 119 | if (r) | 120 | if (r) |
| 120 | return r; | 121 | return r; |
| 121 | 122 | ||
| 122 | ring = &adev->uvd.ring; | 123 | ring = &adev->uvd.inst->ring; |
| 123 | sprintf(ring->name, "uvd"); | 124 | sprintf(ring->name, "uvd"); |
| 124 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 125 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 125 | 126 | ||
| 126 | return r; | 127 | return r; |
| 127 | } | 128 | } |
| @@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, | |||
| 150 | static int uvd_v4_2_hw_init(void *handle) | 151 | static int uvd_v4_2_hw_init(void *handle) |
| 151 | { | 152 | { |
| 152 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 153 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 153 | struct amdgpu_ring *ring = &adev->uvd.ring; | 154 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 154 | uint32_t tmp; | 155 | uint32_t tmp; |
| 155 | int r; | 156 | int r; |
| 156 | 157 | ||
| @@ -208,7 +209,7 @@ done: | |||
| 208 | static int uvd_v4_2_hw_fini(void *handle) | 209 | static int uvd_v4_2_hw_fini(void *handle) |
| 209 | { | 210 | { |
| 210 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 211 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 211 | struct amdgpu_ring *ring = &adev->uvd.ring; | 212 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 212 | 213 | ||
| 213 | if (RREG32(mmUVD_STATUS) != 0) | 214 | if (RREG32(mmUVD_STATUS) != 0) |
| 214 | uvd_v4_2_stop(adev); | 215 | uvd_v4_2_stop(adev); |
| @@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) | |||
| 251 | */ | 252 | */ |
| 252 | static int uvd_v4_2_start(struct amdgpu_device *adev) | 253 | static int uvd_v4_2_start(struct amdgpu_device *adev) |
| 253 | { | 254 | { |
| 254 | struct amdgpu_ring *ring = &adev->uvd.ring; | 255 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 255 | uint32_t rb_bufsz; | 256 | uint32_t rb_bufsz; |
| 256 | int i, j, r; | 257 | int i, j, r; |
| 257 | u32 tmp; | 258 | u32 tmp; |
| @@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) | |||
| 536 | uint32_t size; | 537 | uint32_t size; |
| 537 | 538 | ||
| 538 | /* programm the VCPU memory controller bits 0-27 */ | 539 | /* programm the VCPU memory controller bits 0-27 */ |
| 539 | addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; | 540 | addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; |
| 540 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; | 541 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; |
| 541 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); | 542 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); |
| 542 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | 543 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); |
| @@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) | |||
| 553 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | 554 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); |
| 554 | 555 | ||
| 555 | /* bits 28-31 */ | 556 | /* bits 28-31 */ |
| 556 | addr = (adev->uvd.gpu_addr >> 28) & 0xF; | 557 | addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; |
| 557 | WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); | 558 | WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); |
| 558 | 559 | ||
| 559 | /* bits 32-39 */ | 560 | /* bits 32-39 */ |
| 560 | addr = (adev->uvd.gpu_addr >> 32) & 0xFF; | 561 | addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; |
| 561 | WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); | 562 | WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); |
| 562 | 563 | ||
| 563 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | 564 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); |
| @@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, | |||
| 664 | struct amdgpu_iv_entry *entry) | 665 | struct amdgpu_iv_entry *entry) |
| 665 | { | 666 | { |
| 666 | DRM_DEBUG("IH: UVD TRAP\n"); | 667 | DRM_DEBUG("IH: UVD TRAP\n"); |
| 667 | amdgpu_fence_process(&adev->uvd.ring); | 668 | amdgpu_fence_process(&adev->uvd.inst->ring); |
| 668 | return 0; | 669 | return 0; |
| 669 | } | 670 | } |
| 670 | 671 | ||
| @@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { | |||
| 753 | 754 | ||
| 754 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) | 755 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) |
| 755 | { | 756 | { |
| 756 | adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; | 757 | adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; |
| 757 | } | 758 | } |
| 758 | 759 | ||
| 759 | static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { | 760 | static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { |
| @@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { | |||
| 763 | 764 | ||
| 764 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) | 765 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) |
| 765 | { | 766 | { |
| 766 | adev->uvd.irq.num_types = 1; | 767 | adev->uvd.inst->irq.num_types = 1; |
| 767 | adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; | 768 | adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; |
| 768 | } | 769 | } |
| 769 | 770 | ||
| 770 | const struct amdgpu_ip_block_version uvd_v4_2_ip_block = | 771 | const struct amdgpu_ip_block_version uvd_v4_2_ip_block = |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 6445d55e7d5a..f5d074a887fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
| @@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 89 | static int uvd_v5_0_early_init(void *handle) | 89 | static int uvd_v5_0_early_init(void *handle) |
| 90 | { | 90 | { |
| 91 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 91 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 92 | adev->uvd.num_uvd_inst = 1; | ||
| 92 | 93 | ||
| 93 | uvd_v5_0_set_ring_funcs(adev); | 94 | uvd_v5_0_set_ring_funcs(adev); |
| 94 | uvd_v5_0_set_irq_funcs(adev); | 95 | uvd_v5_0_set_irq_funcs(adev); |
| @@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) | |||
| 103 | int r; | 104 | int r; |
| 104 | 105 | ||
| 105 | /* UVD TRAP */ | 106 | /* UVD TRAP */ |
| 106 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); | 107 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); |
| 107 | if (r) | 108 | if (r) |
| 108 | return r; | 109 | return r; |
| 109 | 110 | ||
| @@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) | |||
| 115 | if (r) | 116 | if (r) |
| 116 | return r; | 117 | return r; |
| 117 | 118 | ||
| 118 | ring = &adev->uvd.ring; | 119 | ring = &adev->uvd.inst->ring; |
| 119 | sprintf(ring->name, "uvd"); | 120 | sprintf(ring->name, "uvd"); |
| 120 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 121 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 121 | 122 | ||
| 122 | return r; | 123 | return r; |
| 123 | } | 124 | } |
| @@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) | |||
| 144 | static int uvd_v5_0_hw_init(void *handle) | 145 | static int uvd_v5_0_hw_init(void *handle) |
| 145 | { | 146 | { |
| 146 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 147 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 147 | struct amdgpu_ring *ring = &adev->uvd.ring; | 148 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 148 | uint32_t tmp; | 149 | uint32_t tmp; |
| 149 | int r; | 150 | int r; |
| 150 | 151 | ||
| @@ -204,7 +205,7 @@ done: | |||
| 204 | static int uvd_v5_0_hw_fini(void *handle) | 205 | static int uvd_v5_0_hw_fini(void *handle) |
| 205 | { | 206 | { |
| 206 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 207 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 207 | struct amdgpu_ring *ring = &adev->uvd.ring; | 208 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 208 | 209 | ||
| 209 | if (RREG32(mmUVD_STATUS) != 0) | 210 | if (RREG32(mmUVD_STATUS) != 0) |
| 210 | uvd_v5_0_stop(adev); | 211 | uvd_v5_0_stop(adev); |
| @@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) | |||
| 253 | 254 | ||
| 254 | /* programm memory controller bits 0-27 */ | 255 | /* programm memory controller bits 0-27 */ |
| 255 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | 256 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
| 256 | lower_32_bits(adev->uvd.gpu_addr)); | 257 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
| 257 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | 258 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
| 258 | upper_32_bits(adev->uvd.gpu_addr)); | 259 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
| 259 | 260 | ||
| 260 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | 261 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; |
| 261 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); | 262 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); |
| @@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) | |||
| 287 | */ | 288 | */ |
| 288 | static int uvd_v5_0_start(struct amdgpu_device *adev) | 289 | static int uvd_v5_0_start(struct amdgpu_device *adev) |
| 289 | { | 290 | { |
| 290 | struct amdgpu_ring *ring = &adev->uvd.ring; | 291 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 291 | uint32_t rb_bufsz, tmp; | 292 | uint32_t rb_bufsz, tmp; |
| 292 | uint32_t lmi_swap_cntl; | 293 | uint32_t lmi_swap_cntl; |
| 293 | uint32_t mp_swap_cntl; | 294 | uint32_t mp_swap_cntl; |
| @@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, | |||
| 586 | struct amdgpu_iv_entry *entry) | 587 | struct amdgpu_iv_entry *entry) |
| 587 | { | 588 | { |
| 588 | DRM_DEBUG("IH: UVD TRAP\n"); | 589 | DRM_DEBUG("IH: UVD TRAP\n"); |
| 589 | amdgpu_fence_process(&adev->uvd.ring); | 590 | amdgpu_fence_process(&adev->uvd.inst->ring); |
| 590 | return 0; | 591 | return 0; |
| 591 | } | 592 | } |
| 592 | 593 | ||
| @@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | |||
| 861 | 862 | ||
| 862 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | 863 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) |
| 863 | { | 864 | { |
| 864 | adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; | 865 | adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; |
| 865 | } | 866 | } |
| 866 | 867 | ||
| 867 | static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { | 868 | static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { |
| @@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { | |||
| 871 | 872 | ||
| 872 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | 873 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) |
| 873 | { | 874 | { |
| 874 | adev->uvd.irq.num_types = 1; | 875 | adev->uvd.inst->irq.num_types = 1; |
| 875 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; | 876 | adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; |
| 876 | } | 877 | } |
| 877 | 878 | ||
| 878 | const struct amdgpu_ip_block_version uvd_v5_0_ip_block = | 879 | const struct amdgpu_ip_block_version uvd_v5_0_ip_block = |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ca6ab56357b5..dc391693d7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
| @@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 91 | { | 91 | { |
| 92 | struct amdgpu_device *adev = ring->adev; | 92 | struct amdgpu_device *adev = ring->adev; |
| 93 | 93 | ||
| 94 | if (ring == &adev->uvd.ring_enc[0]) | 94 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 95 | return RREG32(mmUVD_RB_RPTR); | 95 | return RREG32(mmUVD_RB_RPTR); |
| 96 | else | 96 | else |
| 97 | return RREG32(mmUVD_RB_RPTR2); | 97 | return RREG32(mmUVD_RB_RPTR2); |
| @@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) | |||
| 121 | { | 121 | { |
| 122 | struct amdgpu_device *adev = ring->adev; | 122 | struct amdgpu_device *adev = ring->adev; |
| 123 | 123 | ||
| 124 | if (ring == &adev->uvd.ring_enc[0]) | 124 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 125 | return RREG32(mmUVD_RB_WPTR); | 125 | return RREG32(mmUVD_RB_WPTR); |
| 126 | else | 126 | else |
| 127 | return RREG32(mmUVD_RB_WPTR2); | 127 | return RREG32(mmUVD_RB_WPTR2); |
| @@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 152 | { | 152 | { |
| 153 | struct amdgpu_device *adev = ring->adev; | 153 | struct amdgpu_device *adev = ring->adev; |
| 154 | 154 | ||
| 155 | if (ring == &adev->uvd.ring_enc[0]) | 155 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 156 | WREG32(mmUVD_RB_WPTR, | 156 | WREG32(mmUVD_RB_WPTR, |
| 157 | lower_32_bits(ring->wptr)); | 157 | lower_32_bits(ring->wptr)); |
| 158 | else | 158 | else |
| @@ -375,6 +375,7 @@ error: | |||
| 375 | static int uvd_v6_0_early_init(void *handle) | 375 | static int uvd_v6_0_early_init(void *handle) |
| 376 | { | 376 | { |
| 377 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 377 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 378 | adev->uvd.num_uvd_inst = 1; | ||
| 378 | 379 | ||
| 379 | if (!(adev->flags & AMD_IS_APU) && | 380 | if (!(adev->flags & AMD_IS_APU) && |
| 380 | (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) | 381 | (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) |
| @@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) | |||
| 399 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 400 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 400 | 401 | ||
| 401 | /* UVD TRAP */ | 402 | /* UVD TRAP */ |
| 402 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); | 403 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); |
| 403 | if (r) | 404 | if (r) |
| 404 | return r; | 405 | return r; |
| 405 | 406 | ||
| 406 | /* UVD ENC TRAP */ | 407 | /* UVD ENC TRAP */ |
| 407 | if (uvd_v6_0_enc_support(adev)) { | 408 | if (uvd_v6_0_enc_support(adev)) { |
| 408 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 409 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 409 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); | 410 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); |
| 410 | if (r) | 411 | if (r) |
| 411 | return r; | 412 | return r; |
| 412 | } | 413 | } |
| @@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) | |||
| 418 | 419 | ||
| 419 | if (!uvd_v6_0_enc_support(adev)) { | 420 | if (!uvd_v6_0_enc_support(adev)) { |
| 420 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | 421 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
| 421 | adev->uvd.ring_enc[i].funcs = NULL; | 422 | adev->uvd.inst->ring_enc[i].funcs = NULL; |
| 422 | 423 | ||
| 423 | adev->uvd.irq.num_types = 1; | 424 | adev->uvd.inst->irq.num_types = 1; |
| 424 | adev->uvd.num_enc_rings = 0; | 425 | adev->uvd.num_enc_rings = 0; |
| 425 | 426 | ||
| 426 | DRM_INFO("UVD ENC is disabled\n"); | 427 | DRM_INFO("UVD ENC is disabled\n"); |
| 427 | } else { | 428 | } else { |
| 428 | struct drm_sched_rq *rq; | 429 | struct drm_sched_rq *rq; |
| 429 | ring = &adev->uvd.ring_enc[0]; | 430 | ring = &adev->uvd.inst->ring_enc[0]; |
| 430 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | 431 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; |
| 431 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, | 432 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, |
| 432 | rq, NULL); | 433 | rq, NULL); |
| 433 | if (r) { | 434 | if (r) { |
| 434 | DRM_ERROR("Failed setting up UVD ENC run queue.\n"); | 435 | DRM_ERROR("Failed setting up UVD ENC run queue.\n"); |
| @@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) | |||
| 440 | if (r) | 441 | if (r) |
| 441 | return r; | 442 | return r; |
| 442 | 443 | ||
| 443 | ring = &adev->uvd.ring; | 444 | ring = &adev->uvd.inst->ring; |
| 444 | sprintf(ring->name, "uvd"); | 445 | sprintf(ring->name, "uvd"); |
| 445 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 446 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 446 | if (r) | 447 | if (r) |
| 447 | return r; | 448 | return r; |
| 448 | 449 | ||
| 449 | if (uvd_v6_0_enc_support(adev)) { | 450 | if (uvd_v6_0_enc_support(adev)) { |
| 450 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 451 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 451 | ring = &adev->uvd.ring_enc[i]; | 452 | ring = &adev->uvd.inst->ring_enc[i]; |
| 452 | sprintf(ring->name, "uvd_enc%d", i); | 453 | sprintf(ring->name, "uvd_enc%d", i); |
| 453 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 454 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 454 | if (r) | 455 | if (r) |
| 455 | return r; | 456 | return r; |
| 456 | } | 457 | } |
| @@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) | |||
| 469 | return r; | 470 | return r; |
| 470 | 471 | ||
| 471 | if (uvd_v6_0_enc_support(adev)) { | 472 | if (uvd_v6_0_enc_support(adev)) { |
| 472 | drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); | 473 | drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); |
| 473 | 474 | ||
| 474 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | 475 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
| 475 | amdgpu_ring_fini(&adev->uvd.ring_enc[i]); | 476 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); |
| 476 | } | 477 | } |
| 477 | 478 | ||
| 478 | return amdgpu_uvd_sw_fini(adev); | 479 | return amdgpu_uvd_sw_fini(adev); |
| @@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) | |||
| 488 | static int uvd_v6_0_hw_init(void *handle) | 489 | static int uvd_v6_0_hw_init(void *handle) |
| 489 | { | 490 | { |
| 490 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 491 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 491 | struct amdgpu_ring *ring = &adev->uvd.ring; | 492 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 492 | uint32_t tmp; | 493 | uint32_t tmp; |
| 493 | int i, r; | 494 | int i, r; |
| 494 | 495 | ||
| @@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) | |||
| 532 | 533 | ||
| 533 | if (uvd_v6_0_enc_support(adev)) { | 534 | if (uvd_v6_0_enc_support(adev)) { |
| 534 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 535 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 535 | ring = &adev->uvd.ring_enc[i]; | 536 | ring = &adev->uvd.inst->ring_enc[i]; |
| 536 | ring->ready = true; | 537 | ring->ready = true; |
| 537 | r = amdgpu_ring_test_ring(ring); | 538 | r = amdgpu_ring_test_ring(ring); |
| 538 | if (r) { | 539 | if (r) { |
| @@ -563,7 +564,7 @@ done: | |||
| 563 | static int uvd_v6_0_hw_fini(void *handle) | 564 | static int uvd_v6_0_hw_fini(void *handle) |
| 564 | { | 565 | { |
| 565 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 566 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 566 | struct amdgpu_ring *ring = &adev->uvd.ring; | 567 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 567 | 568 | ||
| 568 | if (RREG32(mmUVD_STATUS) != 0) | 569 | if (RREG32(mmUVD_STATUS) != 0) |
| 569 | uvd_v6_0_stop(adev); | 570 | uvd_v6_0_stop(adev); |
| @@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | |||
| 611 | 612 | ||
| 612 | /* programm memory controller bits 0-27 */ | 613 | /* programm memory controller bits 0-27 */ |
| 613 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | 614 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
| 614 | lower_32_bits(adev->uvd.gpu_addr)); | 615 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
| 615 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | 616 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
| 616 | upper_32_bits(adev->uvd.gpu_addr)); | 617 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
| 617 | 618 | ||
| 618 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | 619 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; |
| 619 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); | 620 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); |
| @@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, | |||
| 726 | */ | 727 | */ |
| 727 | static int uvd_v6_0_start(struct amdgpu_device *adev) | 728 | static int uvd_v6_0_start(struct amdgpu_device *adev) |
| 728 | { | 729 | { |
| 729 | struct amdgpu_ring *ring = &adev->uvd.ring; | 730 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 730 | uint32_t rb_bufsz, tmp; | 731 | uint32_t rb_bufsz, tmp; |
| 731 | uint32_t lmi_swap_cntl; | 732 | uint32_t lmi_swap_cntl; |
| 732 | uint32_t mp_swap_cntl; | 733 | uint32_t mp_swap_cntl; |
| @@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) | |||
| 866 | WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); | 867 | WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); |
| 867 | 868 | ||
| 868 | if (uvd_v6_0_enc_support(adev)) { | 869 | if (uvd_v6_0_enc_support(adev)) { |
| 869 | ring = &adev->uvd.ring_enc[0]; | 870 | ring = &adev->uvd.inst->ring_enc[0]; |
| 870 | WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); | 871 | WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
| 871 | WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); | 872 | WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
| 872 | WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); | 873 | WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); |
| 873 | WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | 874 | WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
| 874 | WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); | 875 | WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); |
| 875 | 876 | ||
| 876 | ring = &adev->uvd.ring_enc[1]; | 877 | ring = &adev->uvd.inst->ring_enc[1]; |
| 877 | WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); | 878 | WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
| 878 | WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); | 879 | WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
| 879 | WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); | 880 | WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); |
| @@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) | |||
| 1158 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); | 1159 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); |
| 1159 | 1160 | ||
| 1160 | if (srbm_soft_reset) { | 1161 | if (srbm_soft_reset) { |
| 1161 | adev->uvd.srbm_soft_reset = srbm_soft_reset; | 1162 | adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; |
| 1162 | return true; | 1163 | return true; |
| 1163 | } else { | 1164 | } else { |
| 1164 | adev->uvd.srbm_soft_reset = 0; | 1165 | adev->uvd.inst->srbm_soft_reset = 0; |
| 1165 | return false; | 1166 | return false; |
| 1166 | } | 1167 | } |
| 1167 | } | 1168 | } |
| @@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) | |||
| 1170 | { | 1171 | { |
| 1171 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1172 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1172 | 1173 | ||
| 1173 | if (!adev->uvd.srbm_soft_reset) | 1174 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1174 | return 0; | 1175 | return 0; |
| 1175 | 1176 | ||
| 1176 | uvd_v6_0_stop(adev); | 1177 | uvd_v6_0_stop(adev); |
| @@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle) | |||
| 1182 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1183 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1183 | u32 srbm_soft_reset; | 1184 | u32 srbm_soft_reset; |
| 1184 | 1185 | ||
| 1185 | if (!adev->uvd.srbm_soft_reset) | 1186 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1186 | return 0; | 1187 | return 0; |
| 1187 | srbm_soft_reset = adev->uvd.srbm_soft_reset; | 1188 | srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; |
| 1188 | 1189 | ||
| 1189 | if (srbm_soft_reset) { | 1190 | if (srbm_soft_reset) { |
| 1190 | u32 tmp; | 1191 | u32 tmp; |
| @@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) | |||
| 1212 | { | 1213 | { |
| 1213 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1214 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1214 | 1215 | ||
| 1215 | if (!adev->uvd.srbm_soft_reset) | 1216 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1216 | return 0; | 1217 | return 0; |
| 1217 | 1218 | ||
| 1218 | mdelay(5); | 1219 | mdelay(5); |
| @@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | |||
| 1238 | 1239 | ||
| 1239 | switch (entry->src_id) { | 1240 | switch (entry->src_id) { |
| 1240 | case 124: | 1241 | case 124: |
| 1241 | amdgpu_fence_process(&adev->uvd.ring); | 1242 | amdgpu_fence_process(&adev->uvd.inst->ring); |
| 1242 | break; | 1243 | break; |
| 1243 | case 119: | 1244 | case 119: |
| 1244 | if (likely(uvd_v6_0_enc_support(adev))) | 1245 | if (likely(uvd_v6_0_enc_support(adev))) |
| 1245 | amdgpu_fence_process(&adev->uvd.ring_enc[0]); | 1246 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); |
| 1246 | else | 1247 | else |
| 1247 | int_handled = false; | 1248 | int_handled = false; |
| 1248 | break; | 1249 | break; |
| 1249 | case 120: | 1250 | case 120: |
| 1250 | if (likely(uvd_v6_0_enc_support(adev))) | 1251 | if (likely(uvd_v6_0_enc_support(adev))) |
| 1251 | amdgpu_fence_process(&adev->uvd.ring_enc[1]); | 1252 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); |
| 1252 | else | 1253 | else |
| 1253 | int_handled = false; | 1254 | int_handled = false; |
| 1254 | break; | 1255 | break; |
| @@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { | |||
| 1612 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | 1613 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
| 1613 | { | 1614 | { |
| 1614 | if (adev->asic_type >= CHIP_POLARIS10) { | 1615 | if (adev->asic_type >= CHIP_POLARIS10) { |
| 1615 | adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; | 1616 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; |
| 1616 | DRM_INFO("UVD is enabled in VM mode\n"); | 1617 | DRM_INFO("UVD is enabled in VM mode\n"); |
| 1617 | } else { | 1618 | } else { |
| 1618 | adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; | 1619 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; |
| 1619 | DRM_INFO("UVD is enabled in physical mode\n"); | 1620 | DRM_INFO("UVD is enabled in physical mode\n"); |
| 1620 | } | 1621 | } |
| 1621 | } | 1622 | } |
| @@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) | |||
| 1625 | int i; | 1626 | int i; |
| 1626 | 1627 | ||
| 1627 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | 1628 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
| 1628 | adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; | 1629 | adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; |
| 1629 | 1630 | ||
| 1630 | DRM_INFO("UVD ENC is enabled in VM mode\n"); | 1631 | DRM_INFO("UVD ENC is enabled in VM mode\n"); |
| 1631 | } | 1632 | } |
| @@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { | |||
| 1638 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | 1639 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) |
| 1639 | { | 1640 | { |
| 1640 | if (uvd_v6_0_enc_support(adev)) | 1641 | if (uvd_v6_0_enc_support(adev)) |
| 1641 | adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; | 1642 | adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; |
| 1642 | else | 1643 | else |
| 1643 | adev->uvd.irq.num_types = 1; | 1644 | adev->uvd.inst->irq.num_types = 1; |
| 1644 | 1645 | ||
| 1645 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | 1646 | adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; |
| 1646 | } | 1647 | } |
| 1647 | 1648 | ||
| 1648 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = | 1649 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0ca63d588670..66d4bea5fb2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | |||
| @@ -72,7 +72,7 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 72 | { | 72 | { |
| 73 | struct amdgpu_device *adev = ring->adev; | 73 | struct amdgpu_device *adev = ring->adev; |
| 74 | 74 | ||
| 75 | if (ring == &adev->uvd.ring_enc[0]) | 75 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 76 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); | 76 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); |
| 77 | else | 77 | else |
| 78 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); | 78 | return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); |
| @@ -106,7 +106,7 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) | |||
| 106 | if (ring->use_doorbell) | 106 | if (ring->use_doorbell) |
| 107 | return adev->wb.wb[ring->wptr_offs]; | 107 | return adev->wb.wb[ring->wptr_offs]; |
| 108 | 108 | ||
| 109 | if (ring == &adev->uvd.ring_enc[0]) | 109 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 110 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); | 110 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); |
| 111 | else | 111 | else |
| 112 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); | 112 | return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); |
| @@ -144,7 +144,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 144 | return; | 144 | return; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | if (ring == &adev->uvd.ring_enc[0]) | 147 | if (ring == &adev->uvd.inst->ring_enc[0]) |
| 148 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, | 148 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, |
| 149 | lower_32_bits(ring->wptr)); | 149 | lower_32_bits(ring->wptr)); |
| 150 | else | 150 | else |
| @@ -170,8 +170,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
| 170 | 170 | ||
| 171 | r = amdgpu_ring_alloc(ring, 16); | 171 | r = amdgpu_ring_alloc(ring, 16); |
| 172 | if (r) { | 172 | if (r) { |
| 173 | DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", | 173 | DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", |
| 174 | ring->idx, r); | 174 | ring->me, ring->idx, r); |
| 175 | return r; | 175 | return r; |
| 176 | } | 176 | } |
| 177 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); | 177 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
| @@ -184,11 +184,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | if (i < adev->usec_timeout) { | 186 | if (i < adev->usec_timeout) { |
| 187 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 187 | DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", |
| 188 | ring->idx, i); | 188 | ring->me, ring->idx, i); |
| 189 | } else { | 189 | } else { |
| 190 | DRM_ERROR("amdgpu: ring %d test failed\n", | 190 | DRM_ERROR("amdgpu: (%d)ring %d test failed\n", |
| 191 | ring->idx); | 191 | ring->me, ring->idx); |
| 192 | r = -ETIMEDOUT; | 192 | r = -ETIMEDOUT; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| @@ -342,24 +342,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
| 342 | 342 | ||
| 343 | r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); | 343 | r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); |
| 344 | if (r) { | 344 | if (r) { |
| 345 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | 345 | DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); |
| 346 | goto error; | 346 | goto error; |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); | 349 | r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); |
| 350 | if (r) { | 350 | if (r) { |
| 351 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | 351 | DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); |
| 352 | goto error; | 352 | goto error; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | r = dma_fence_wait_timeout(fence, false, timeout); | 355 | r = dma_fence_wait_timeout(fence, false, timeout); |
| 356 | if (r == 0) { | 356 | if (r == 0) { |
| 357 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 357 | DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); |
| 358 | r = -ETIMEDOUT; | 358 | r = -ETIMEDOUT; |
| 359 | } else if (r < 0) { | 359 | } else if (r < 0) { |
| 360 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 360 | DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); |
| 361 | } else { | 361 | } else { |
| 362 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | 362 | DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); |
| 363 | r = 0; | 363 | r = 0; |
| 364 | } | 364 | } |
| 365 | error: | 365 | error: |
| @@ -370,6 +370,7 @@ error: | |||
| 370 | static int uvd_v7_0_early_init(void *handle) | 370 | static int uvd_v7_0_early_init(void *handle) |
| 371 | { | 371 | { |
| 372 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 372 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 373 | adev->uvd.num_uvd_inst = 1; | ||
| 373 | 374 | ||
| 374 | if (amdgpu_sriov_vf(adev)) | 375 | if (amdgpu_sriov_vf(adev)) |
| 375 | adev->uvd.num_enc_rings = 1; | 376 | adev->uvd.num_enc_rings = 1; |
| @@ -390,13 +391,13 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 390 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 391 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 391 | 392 | ||
| 392 | /* UVD TRAP */ | 393 | /* UVD TRAP */ |
| 393 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq); | 394 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq); |
| 394 | if (r) | 395 | if (r) |
| 395 | return r; | 396 | return r; |
| 396 | 397 | ||
| 397 | /* UVD ENC TRAP */ | 398 | /* UVD ENC TRAP */ |
| 398 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 399 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 399 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); | 400 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq); |
| 400 | if (r) | 401 | if (r) |
| 401 | return r; | 402 | return r; |
| 402 | } | 403 | } |
| @@ -415,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 415 | DRM_INFO("PSP loading UVD firmware\n"); | 416 | DRM_INFO("PSP loading UVD firmware\n"); |
| 416 | } | 417 | } |
| 417 | 418 | ||
| 418 | ring = &adev->uvd.ring_enc[0]; | 419 | ring = &adev->uvd.inst->ring_enc[0]; |
| 419 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | 420 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; |
| 420 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, | 421 | r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, |
| 421 | rq, NULL); | 422 | rq, NULL); |
| 422 | if (r) { | 423 | if (r) { |
| 423 | DRM_ERROR("Failed setting up UVD ENC run queue.\n"); | 424 | DRM_ERROR("Failed setting up UVD ENC run queue.\n"); |
| @@ -428,15 +429,15 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 428 | if (r) | 429 | if (r) |
| 429 | return r; | 430 | return r; |
| 430 | if (!amdgpu_sriov_vf(adev)) { | 431 | if (!amdgpu_sriov_vf(adev)) { |
| 431 | ring = &adev->uvd.ring; | 432 | ring = &adev->uvd.inst->ring; |
| 432 | sprintf(ring->name, "uvd"); | 433 | sprintf(ring->name, "uvd"); |
| 433 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 434 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 434 | if (r) | 435 | if (r) |
| 435 | return r; | 436 | return r; |
| 436 | } | 437 | } |
| 437 | 438 | ||
| 438 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 439 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 439 | ring = &adev->uvd.ring_enc[i]; | 440 | ring = &adev->uvd.inst->ring_enc[i]; |
| 440 | sprintf(ring->name, "uvd_enc%d", i); | 441 | sprintf(ring->name, "uvd_enc%d", i); |
| 441 | if (amdgpu_sriov_vf(adev)) { | 442 | if (amdgpu_sriov_vf(adev)) { |
| 442 | ring->use_doorbell = true; | 443 | ring->use_doorbell = true; |
| @@ -449,7 +450,7 @@ static int uvd_v7_0_sw_init(void *handle) | |||
| 449 | else | 450 | else |
| 450 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; | 451 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; |
| 451 | } | 452 | } |
| 452 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 453 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
| 453 | if (r) | 454 | if (r) |
| 454 | return r; | 455 | return r; |
| 455 | } | 456 | } |
| @@ -472,10 +473,10 @@ static int uvd_v7_0_sw_fini(void *handle) | |||
| 472 | if (r) | 473 | if (r) |
| 473 | return r; | 474 | return r; |
| 474 | 475 | ||
| 475 | drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); | 476 | drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); |
| 476 | 477 | ||
| 477 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | 478 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
| 478 | amdgpu_ring_fini(&adev->uvd.ring_enc[i]); | 479 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); |
| 479 | 480 | ||
| 480 | return amdgpu_uvd_sw_fini(adev); | 481 | return amdgpu_uvd_sw_fini(adev); |
| 481 | } | 482 | } |
| @@ -490,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle) | |||
| 490 | static int uvd_v7_0_hw_init(void *handle) | 491 | static int uvd_v7_0_hw_init(void *handle) |
| 491 | { | 492 | { |
| 492 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 493 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 493 | struct amdgpu_ring *ring = &adev->uvd.ring; | 494 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 494 | uint32_t tmp; | 495 | uint32_t tmp; |
| 495 | int i, r; | 496 | int i, r; |
| 496 | 497 | ||
| @@ -543,7 +544,7 @@ static int uvd_v7_0_hw_init(void *handle) | |||
| 543 | } | 544 | } |
| 544 | 545 | ||
| 545 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 546 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
| 546 | ring = &adev->uvd.ring_enc[i]; | 547 | ring = &adev->uvd.inst->ring_enc[i]; |
| 547 | ring->ready = true; | 548 | ring->ready = true; |
| 548 | r = amdgpu_ring_test_ring(ring); | 549 | r = amdgpu_ring_test_ring(ring); |
| 549 | if (r) { | 550 | if (r) { |
| @@ -569,7 +570,7 @@ done: | |||
| 569 | static int uvd_v7_0_hw_fini(void *handle) | 570 | static int uvd_v7_0_hw_fini(void *handle) |
| 570 | { | 571 | { |
| 571 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 572 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 572 | struct amdgpu_ring *ring = &adev->uvd.ring; | 573 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 573 | 574 | ||
| 574 | if (!amdgpu_sriov_vf(adev)) | 575 | if (!amdgpu_sriov_vf(adev)) |
| 575 | uvd_v7_0_stop(adev); | 576 | uvd_v7_0_stop(adev); |
| @@ -627,9 +628,9 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) | |||
| 627 | offset = 0; | 628 | offset = 0; |
| 628 | } else { | 629 | } else { |
| 629 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | 630 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
| 630 | lower_32_bits(adev->uvd.gpu_addr)); | 631 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
| 631 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | 632 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
| 632 | upper_32_bits(adev->uvd.gpu_addr)); | 633 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
| 633 | offset = size; | 634 | offset = size; |
| 634 | } | 635 | } |
| 635 | 636 | ||
| @@ -638,16 +639,16 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) | |||
| 638 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); | 639 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); |
| 639 | 640 | ||
| 640 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, | 641 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
| 641 | lower_32_bits(adev->uvd.gpu_addr + offset)); | 642 | lower_32_bits(adev->uvd.inst->gpu_addr + offset)); |
| 642 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, | 643 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
| 643 | upper_32_bits(adev->uvd.gpu_addr + offset)); | 644 | upper_32_bits(adev->uvd.inst->gpu_addr + offset)); |
| 644 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); | 645 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); |
| 645 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); | 646 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); |
| 646 | 647 | ||
| 647 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, | 648 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
| 648 | lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); | 649 | lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
| 649 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, | 650 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
| 650 | upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); | 651 | upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
| 651 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); | 652 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); |
| 652 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, | 653 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, |
| 653 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); | 654 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); |
| @@ -688,10 +689,10 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, | |||
| 688 | /* 4, set resp to zero */ | 689 | /* 4, set resp to zero */ |
| 689 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); | 690 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); |
| 690 | 691 | ||
| 691 | WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); | 692 | WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0); |
| 692 | adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; | 693 | adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0; |
| 693 | adev->uvd.ring_enc[0].wptr = 0; | 694 | adev->uvd.inst->ring_enc[0].wptr = 0; |
| 694 | adev->uvd.ring_enc[0].wptr_old = 0; | 695 | adev->uvd.inst->ring_enc[0].wptr_old = 0; |
| 695 | 696 | ||
| 696 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ | 697 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ |
| 697 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); | 698 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); |
| @@ -742,7 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
| 742 | 743 | ||
| 743 | init_table += header->uvd_table_offset; | 744 | init_table += header->uvd_table_offset; |
| 744 | 745 | ||
| 745 | ring = &adev->uvd.ring; | 746 | ring = &adev->uvd.inst->ring; |
| 746 | ring->wptr = 0; | 747 | ring->wptr = 0; |
| 747 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | 748 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); |
| 748 | 749 | ||
| @@ -757,9 +758,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
| 757 | offset = 0; | 758 | offset = 0; |
| 758 | } else { | 759 | } else { |
| 759 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), | 760 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
| 760 | lower_32_bits(adev->uvd.gpu_addr)); | 761 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
| 761 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), | 762 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
| 762 | upper_32_bits(adev->uvd.gpu_addr)); | 763 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
| 763 | offset = size; | 764 | offset = size; |
| 764 | } | 765 | } |
| 765 | 766 | ||
| @@ -768,16 +769,16 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
| 768 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); | 769 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); |
| 769 | 770 | ||
| 770 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), | 771 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), |
| 771 | lower_32_bits(adev->uvd.gpu_addr + offset)); | 772 | lower_32_bits(adev->uvd.inst->gpu_addr + offset)); |
| 772 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), | 773 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), |
| 773 | upper_32_bits(adev->uvd.gpu_addr + offset)); | 774 | upper_32_bits(adev->uvd.inst->gpu_addr + offset)); |
| 774 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); | 775 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); |
| 775 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); | 776 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); |
| 776 | 777 | ||
| 777 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), | 778 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), |
| 778 | lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); | 779 | lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
| 779 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), | 780 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), |
| 780 | upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); | 781 | upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
| 781 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); | 782 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); |
| 782 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), | 783 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), |
| 783 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); | 784 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); |
| @@ -841,7 +842,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
| 841 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | 842 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
| 842 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); | 843 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); |
| 843 | 844 | ||
| 844 | ring = &adev->uvd.ring_enc[0]; | 845 | ring = &adev->uvd.inst->ring_enc[0]; |
| 845 | ring->wptr = 0; | 846 | ring->wptr = 0; |
| 846 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); | 847 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); |
| 847 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); | 848 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); |
| @@ -874,7 +875,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
| 874 | */ | 875 | */ |
| 875 | static int uvd_v7_0_start(struct amdgpu_device *adev) | 876 | static int uvd_v7_0_start(struct amdgpu_device *adev) |
| 876 | { | 877 | { |
| 877 | struct amdgpu_ring *ring = &adev->uvd.ring; | 878 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
| 878 | uint32_t rb_bufsz, tmp; | 879 | uint32_t rb_bufsz, tmp; |
| 879 | uint32_t lmi_swap_cntl; | 880 | uint32_t lmi_swap_cntl; |
| 880 | uint32_t mp_swap_cntl; | 881 | uint32_t mp_swap_cntl; |
| @@ -1027,14 +1028,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) | |||
| 1027 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, | 1028 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, |
| 1028 | ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | 1029 | ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); |
| 1029 | 1030 | ||
| 1030 | ring = &adev->uvd.ring_enc[0]; | 1031 | ring = &adev->uvd.inst->ring_enc[0]; |
| 1031 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); | 1032 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
| 1032 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); | 1033 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
| 1033 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); | 1034 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); |
| 1034 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | 1035 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
| 1035 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); | 1036 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); |
| 1036 | 1037 | ||
| 1037 | ring = &adev->uvd.ring_enc[1]; | 1038 | ring = &adev->uvd.inst->ring_enc[1]; |
| 1038 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); | 1039 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
| 1039 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); | 1040 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
| 1040 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); | 1041 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
| @@ -1162,8 +1163,8 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
| 1162 | WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 1163 | WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
| 1163 | r = amdgpu_ring_alloc(ring, 3); | 1164 | r = amdgpu_ring_alloc(ring, 3); |
| 1164 | if (r) { | 1165 | if (r) { |
| 1165 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 1166 | DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", |
| 1166 | ring->idx, r); | 1167 | ring->me, ring->idx, r); |
| 1167 | return r; | 1168 | return r; |
| 1168 | } | 1169 | } |
| 1169 | amdgpu_ring_write(ring, | 1170 | amdgpu_ring_write(ring, |
| @@ -1178,11 +1179,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
| 1178 | } | 1179 | } |
| 1179 | 1180 | ||
| 1180 | if (i < adev->usec_timeout) { | 1181 | if (i < adev->usec_timeout) { |
| 1181 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 1182 | DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", |
| 1182 | ring->idx, i); | 1183 | ring->me, ring->idx, i); |
| 1183 | } else { | 1184 | } else { |
| 1184 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | 1185 | DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", |
| 1185 | ring->idx, tmp); | 1186 | ring->me, ring->idx, tmp); |
| 1186 | r = -EINVAL; | 1187 | r = -EINVAL; |
| 1187 | } | 1188 | } |
| 1188 | return r; | 1189 | return r; |
| @@ -1365,10 +1366,10 @@ static bool uvd_v7_0_check_soft_reset(void *handle) | |||
| 1365 | SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); | 1366 | SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); |
| 1366 | 1367 | ||
| 1367 | if (srbm_soft_reset) { | 1368 | if (srbm_soft_reset) { |
| 1368 | adev->uvd.srbm_soft_reset = srbm_soft_reset; | 1369 | adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; |
| 1369 | return true; | 1370 | return true; |
| 1370 | } else { | 1371 | } else { |
| 1371 | adev->uvd.srbm_soft_reset = 0; | 1372 | adev->uvd.inst->srbm_soft_reset = 0; |
| 1372 | return false; | 1373 | return false; |
| 1373 | } | 1374 | } |
| 1374 | } | 1375 | } |
| @@ -1377,7 +1378,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) | |||
| 1377 | { | 1378 | { |
| 1378 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1379 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1379 | 1380 | ||
| 1380 | if (!adev->uvd.srbm_soft_reset) | 1381 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1381 | return 0; | 1382 | return 0; |
| 1382 | 1383 | ||
| 1383 | uvd_v7_0_stop(adev); | 1384 | uvd_v7_0_stop(adev); |
| @@ -1389,9 +1390,9 @@ static int uvd_v7_0_soft_reset(void *handle) | |||
| 1389 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1390 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1390 | u32 srbm_soft_reset; | 1391 | u32 srbm_soft_reset; |
| 1391 | 1392 | ||
| 1392 | if (!adev->uvd.srbm_soft_reset) | 1393 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1393 | return 0; | 1394 | return 0; |
| 1394 | srbm_soft_reset = adev->uvd.srbm_soft_reset; | 1395 | srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; |
| 1395 | 1396 | ||
| 1396 | if (srbm_soft_reset) { | 1397 | if (srbm_soft_reset) { |
| 1397 | u32 tmp; | 1398 | u32 tmp; |
| @@ -1419,7 +1420,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) | |||
| 1419 | { | 1420 | { |
| 1420 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1421 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1421 | 1422 | ||
| 1422 | if (!adev->uvd.srbm_soft_reset) | 1423 | if (!adev->uvd.inst->srbm_soft_reset) |
| 1423 | return 0; | 1424 | return 0; |
| 1424 | 1425 | ||
| 1425 | mdelay(5); | 1426 | mdelay(5); |
| @@ -1444,14 +1445,14 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, | |||
| 1444 | DRM_DEBUG("IH: UVD TRAP\n"); | 1445 | DRM_DEBUG("IH: UVD TRAP\n"); |
| 1445 | switch (entry->src_id) { | 1446 | switch (entry->src_id) { |
| 1446 | case 124: | 1447 | case 124: |
| 1447 | amdgpu_fence_process(&adev->uvd.ring); | 1448 | amdgpu_fence_process(&adev->uvd.inst->ring); |
| 1448 | break; | 1449 | break; |
| 1449 | case 119: | 1450 | case 119: |
| 1450 | amdgpu_fence_process(&adev->uvd.ring_enc[0]); | 1451 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); |
| 1451 | break; | 1452 | break; |
| 1452 | case 120: | 1453 | case 120: |
| 1453 | if (!amdgpu_sriov_vf(adev)) | 1454 | if (!amdgpu_sriov_vf(adev)) |
| 1454 | amdgpu_fence_process(&adev->uvd.ring_enc[1]); | 1455 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); |
| 1455 | break; | 1456 | break; |
| 1456 | default: | 1457 | default: |
| 1457 | DRM_ERROR("Unhandled interrupt: %d %d\n", | 1458 | DRM_ERROR("Unhandled interrupt: %d %d\n", |
| @@ -1719,7 +1720,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { | |||
| 1719 | 1720 | ||
| 1720 | static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) | 1721 | static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) |
| 1721 | { | 1722 | { |
| 1722 | adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs; | 1723 | adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs; |
| 1723 | DRM_INFO("UVD is enabled in VM mode\n"); | 1724 | DRM_INFO("UVD is enabled in VM mode\n"); |
| 1724 | } | 1725 | } |
| 1725 | 1726 | ||
| @@ -1728,7 +1729,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) | |||
| 1728 | int i; | 1729 | int i; |
| 1729 | 1730 | ||
| 1730 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | 1731 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
| 1731 | adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; | 1732 | adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; |
| 1732 | 1733 | ||
| 1733 | DRM_INFO("UVD ENC is enabled in VM mode\n"); | 1734 | DRM_INFO("UVD ENC is enabled in VM mode\n"); |
| 1734 | } | 1735 | } |
| @@ -1740,8 +1741,8 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { | |||
| 1740 | 1741 | ||
| 1741 | static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) | 1742 | static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) |
| 1742 | { | 1743 | { |
| 1743 | adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; | 1744 | adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; |
| 1744 | adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs; | 1745 | adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs; |
| 1745 | } | 1746 | } |
| 1746 | 1747 | ||
| 1747 | const struct amdgpu_ip_block_version uvd_v7_0_ip_block = | 1748 | const struct amdgpu_ip_block_version uvd_v7_0_ip_block = |
