aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2018-07-25 16:11:34 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-27 10:07:44 -0400
commitf1e582ebfd703ea01dc4caf4d339b7c84ec3ff29 (patch)
tree56bfd3cd4840cf6dee41cbbf77d468823065872b /drivers/gpu/drm/amd
parentd04cc604a66b07eff8fce824fb6c0fdc0875d2e3 (diff)
drm/amdgpu: implement harvesting support for UVD 7.2 (v3)
Properly handle cases where one or more instance of the IP block may be harvested. v2: make sure ip_num_rings is initialized amdgpu_queue_mgr.c v3: rebase on Christian's UVD changes, drop unused var Reviewed-by: James Zhu <James.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c56
5 files changed, 89 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c7dce14fd47d..dd2132fa2b89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
286 struct drm_crtc *crtc; 286 struct drm_crtc *crtc;
287 uint32_t ui32 = 0; 287 uint32_t ui32 = 0;
288 uint64_t ui64 = 0; 288 uint64_t ui64 = 0;
289 int i, found; 289 int i, j, found;
290 int ui32_size = sizeof(ui32); 290 int ui32_size = sizeof(ui32);
291 291
292 if (!info->return_size || !info->return_pointer) 292 if (!info->return_size || !info->return_pointer)
@@ -348,7 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
348 break; 348 break;
349 case AMDGPU_HW_IP_UVD: 349 case AMDGPU_HW_IP_UVD:
350 type = AMD_IP_BLOCK_TYPE_UVD; 350 type = AMD_IP_BLOCK_TYPE_UVD;
351 ring_mask |= adev->uvd.inst[0].ring.ready; 351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
352 if (adev->uvd.harvest_config & (1 << i))
353 continue;
354 ring_mask |= adev->uvd.inst[i].ring.ready;
355 }
352 ib_start_alignment = 64; 356 ib_start_alignment = 64;
353 ib_size_alignment = 64; 357 ib_size_alignment = 64;
354 break; 358 break;
@@ -361,9 +365,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
361 break; 365 break;
362 case AMDGPU_HW_IP_UVD_ENC: 366 case AMDGPU_HW_IP_UVD_ENC:
363 type = AMD_IP_BLOCK_TYPE_UVD; 367 type = AMD_IP_BLOCK_TYPE_UVD;
364 for (i = 0; i < adev->uvd.num_enc_rings; i++) 368 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
365 ring_mask |= 369 if (adev->uvd.harvest_config & (1 << i))
366 adev->uvd.inst[0].ring_enc[i].ready << i; 370 continue;
371 for (j = 0; j < adev->uvd.num_enc_rings; j++)
372 ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
373 }
367 ib_start_alignment = 64; 374 ib_start_alignment = 64;
368 ib_size_alignment = 64; 375 ib_size_alignment = 64;
369 break; 376 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index d8357290ad09..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
214 u32 hw_ip, u32 instance, u32 ring, 214 u32 hw_ip, u32 instance, u32 ring,
215 struct amdgpu_ring **out_ring) 215 struct amdgpu_ring **out_ring)
216{ 216{
217 int r, ip_num_rings; 217 int i, r, ip_num_rings = 0;
218 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; 218 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
219 219
220 if (!adev || !mgr || !out_ring) 220 if (!adev || !mgr || !out_ring)
@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
243 ip_num_rings = adev->sdma.num_instances; 243 ip_num_rings = adev->sdma.num_instances;
244 break; 244 break;
245 case AMDGPU_HW_IP_UVD: 245 case AMDGPU_HW_IP_UVD:
246 ip_num_rings = adev->uvd.num_uvd_inst; 246 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
247 if (!(adev->uvd.harvest_config & (1 << i)))
248 ip_num_rings++;
249 }
247 break; 250 break;
248 case AMDGPU_HW_IP_VCE: 251 case AMDGPU_HW_IP_VCE:
249 ip_num_rings = adev->vce.num_rings; 252 ip_num_rings = adev->vce.num_rings;
250 break; 253 break;
251 case AMDGPU_HW_IP_UVD_ENC: 254 case AMDGPU_HW_IP_UVD_ENC:
255 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
256 if (!(adev->uvd.harvest_config & (1 << i)))
257 ip_num_rings++;
258 }
252 ip_num_rings = 259 ip_num_rings =
253 adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst; 260 adev->uvd.num_enc_rings * ip_num_rings;
254 break; 261 break;
255 case AMDGPU_HW_IP_VCN_DEC: 262 case AMDGPU_HW_IP_VCN_DEC:
256 ip_num_rings = 1; 263 ip_num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index fca86d71fafc..632fa5980ff4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
256 256
257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
258 258 if (adev->uvd.harvest_config & (1 << j))
259 continue;
259 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 260 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
260 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, 261 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
261 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); 262 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -308,6 +309,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
308 drm_sched_entity_destroy(&adev->uvd.entity); 309 drm_sched_entity_destroy(&adev->uvd.entity);
309 310
310 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 311 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
312 if (adev->uvd.harvest_config & (1 << j))
313 continue;
311 kfree(adev->uvd.inst[j].saved_bo); 314 kfree(adev->uvd.inst[j].saved_bo);
312 315
313 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 316 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
@@ -343,6 +346,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
343 } 346 }
344 347
345 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 348 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
349 if (adev->uvd.harvest_config & (1 << j))
350 continue;
346 if (adev->uvd.inst[j].vcpu_bo == NULL) 351 if (adev->uvd.inst[j].vcpu_bo == NULL)
347 continue; 352 continue;
348 353
@@ -365,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
365 int i; 370 int i;
366 371
367 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
373 if (adev->uvd.harvest_config & (1 << i))
374 continue;
368 if (adev->uvd.inst[i].vcpu_bo == NULL) 375 if (adev->uvd.inst[i].vcpu_bo == NULL)
369 return -EINVAL; 376 return -EINVAL;
370 377
@@ -1159,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1159 unsigned fences = 0, i, j; 1166 unsigned fences = 0, i, j;
1160 1167
1161 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1168 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1169 if (adev->uvd.harvest_config & (1 << i))
1170 continue;
1162 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); 1171 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1163 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { 1172 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1164 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); 1173 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 66872286ab12..33c5f806f925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
48 uint32_t srbm_soft_reset; 48 uint32_t srbm_soft_reset;
49}; 49};
50 50
51#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
52#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
53
51struct amdgpu_uvd { 54struct amdgpu_uvd {
52 const struct firmware *fw; /* UVD firmware */ 55 const struct firmware *fw; /* UVD firmware */
53 unsigned fw_version; 56 unsigned fw_version;
@@ -61,6 +64,7 @@ struct amdgpu_uvd {
61 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 64 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
62 struct drm_sched_entity entity; 65 struct drm_sched_entity entity;
63 struct delayed_work idle_work; 66 struct delayed_work idle_work;
67 unsigned harvest_config;
64}; 68};
65 69
66int amdgpu_uvd_sw_init(struct amdgpu_device *adev); 70int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index d74c1b242667..5fab3560a71d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -41,6 +41,12 @@
41#include "mmhub/mmhub_1_0_sh_mask.h" 41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" 42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43 43
44#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46//UVD_PG0_CC_UVD_HARVESTING
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
44#define UVD7_MAX_HW_INSTANCES_VEGA20 2 50#define UVD7_MAX_HW_INSTANCES_VEGA20 2
45 51
46static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); 52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -370,10 +376,25 @@ error:
370static int uvd_v7_0_early_init(void *handle) 376static int uvd_v7_0_early_init(void *handle)
371{ 377{
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 if (adev->asic_type == CHIP_VEGA20) 379
380 if (adev->asic_type == CHIP_VEGA20) {
381 u32 harvest;
382 int i;
383
374 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; 384 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
375 else 385 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
386 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
387 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
388 adev->uvd.harvest_config |= 1 << i;
389 }
390 }
391 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
392 AMDGPU_UVD_HARVEST_UVD1))
393 /* both instances are harvested, disable the block */
394 return -ENOENT;
395 } else {
376 adev->uvd.num_uvd_inst = 1; 396 adev->uvd.num_uvd_inst = 1;
397 }
377 398
378 if (amdgpu_sriov_vf(adev)) 399 if (amdgpu_sriov_vf(adev))
379 adev->uvd.num_enc_rings = 1; 400 adev->uvd.num_enc_rings = 1;
@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
393 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
394 415
395 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 416 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
417 if (adev->uvd.harvest_config & (1 << j))
418 continue;
396 /* UVD TRAP */ 419 /* UVD TRAP */
397 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq); 420 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
398 if (r) 421 if (r)
@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
425 return r; 448 return r;
426 449
427 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 450 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
451 if (adev->uvd.harvest_config & (1 << j))
452 continue;
428 if (!amdgpu_sriov_vf(adev)) { 453 if (!amdgpu_sriov_vf(adev)) {
429 ring = &adev->uvd.inst[j].ring; 454 ring = &adev->uvd.inst[j].ring;
430 sprintf(ring->name, "uvd<%d>", j); 455 sprintf(ring->name, "uvd<%d>", j);
@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
472 return r; 497 return r;
473 498
474 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 499 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
500 if (adev->uvd.harvest_config & (1 << j))
501 continue;
475 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 502 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
476 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 503 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
477 } 504 }
@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
500 goto done; 527 goto done;
501 528
502 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 529 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
530 if (adev->uvd.harvest_config & (1 << j))
531 continue;
503 ring = &adev->uvd.inst[j].ring; 532 ring = &adev->uvd.inst[j].ring;
504 533
505 if (!amdgpu_sriov_vf(adev)) { 534 if (!amdgpu_sriov_vf(adev)) {
@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
579 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 608 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
580 } 609 }
581 610
582 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) 611 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
612 if (adev->uvd.harvest_config & (1 << i))
613 continue;
583 adev->uvd.inst[i].ring.ready = false; 614 adev->uvd.inst[i].ring.ready = false;
615 }
584 616
585 return 0; 617 return 0;
586} 618}
@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
623 int i; 655 int i;
624 656
625 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 657 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
658 if (adev->uvd.harvest_config & (1 << i))
659 continue;
626 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 660 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
627 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 661 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
628 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 662 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
695 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); 729 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
696 730
697 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 731 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
732 if (adev->uvd.harvest_config & (1 << i))
733 continue;
698 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); 734 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
699 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; 735 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
700 adev->uvd.inst[i].ring_enc[0].wptr = 0; 736 adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
751 init_table += header->uvd_table_offset; 787 init_table += header->uvd_table_offset;
752 788
753 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 789 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
790 if (adev->uvd.harvest_config & (1 << i))
791 continue;
754 ring = &adev->uvd.inst[i].ring; 792 ring = &adev->uvd.inst[i].ring;
755 ring->wptr = 0; 793 ring->wptr = 0;
756 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); 794 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
890 int i, j, k, r; 928 int i, j, k, r;
891 929
892 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { 930 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
931 if (adev->uvd.harvest_config & (1 << k))
932 continue;
893 /* disable DPG */ 933 /* disable DPG */
894 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, 934 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
895 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 935 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
902 uvd_v7_0_mc_resume(adev); 942 uvd_v7_0_mc_resume(adev);
903 943
904 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { 944 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
945 if (adev->uvd.harvest_config & (1 << k))
946 continue;
905 ring = &adev->uvd.inst[k].ring; 947 ring = &adev->uvd.inst[k].ring;
906 /* disable clock gating */ 948 /* disable clock gating */
907 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, 949 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
1069 uint8_t i = 0; 1111 uint8_t i = 0;
1070 1112
1071 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1113 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1114 if (adev->uvd.harvest_config & (1 << i))
1115 continue;
1072 /* force RBC into idle state */ 1116 /* force RBC into idle state */
1073 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); 1117 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1074 1118
@@ -1785,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1785 int i; 1829 int i;
1786 1830
1787 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 1831 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1832 if (adev->uvd.harvest_config & (1 << i))
1833 continue;
1788 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; 1834 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1789 adev->uvd.inst[i].ring.me = i; 1835 adev->uvd.inst[i].ring.me = i;
1790 DRM_INFO("UVD(%d) is enabled in VM mode\n", i); 1836 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1796,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1796 int i, j; 1842 int i, j;
1797 1843
1798 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 1844 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1845 if (adev->uvd.harvest_config & (1 << j))
1846 continue;
1799 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 1847 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1800 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; 1848 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1801 adev->uvd.inst[j].ring_enc[i].me = j; 1849 adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1815,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1815 int i; 1863 int i;
1816 1864
1817 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 1865 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1866 if (adev->uvd.harvest_config & (1 << i))
1867 continue;
1818 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; 1868 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1819 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; 1869 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1820 } 1870 }