aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Zhu <James.Zhu@amd.com>2018-09-21 14:35:32 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-09-26 22:09:24 -0400
commit63e9bb1d98eff3584190295acb231e631ba5e59e (patch)
tree3276e609c62a4146483e2a8ba2adf5100178564b
parent03d6e3aac81634a91dd2790f8c199ffb3927fe3c (diff)
drm/amdgpu:Add DPG mode support for vcn 1.0
Add DPG mode start/stop/mc_resume/clock_gating to support vcn 1.0 DPG mode. Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c319
1 files changed, 313 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 2cde0b4046db..63d7f97e81b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -198,7 +198,8 @@ static int vcn_v1_0_hw_init(void *handle)
198 198
199done: 199done:
200 if (!r) 200 if (!r)
201 DRM_INFO("VCN decode and encode initialized successfully.\n"); 201 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
202 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
202 203
203 return r; 204 return r;
204} 205}
@@ -266,13 +267,13 @@ static int vcn_v1_0_resume(void *handle)
266} 267}
267 268
268/** 269/**
269 * vcn_v1_0_mc_resume - memory controller programming 270 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
270 * 271 *
271 * @adev: amdgpu_device pointer 272 * @adev: amdgpu_device pointer
272 * 273 *
273 * Let the VCN memory controller know it's offsets 274 * Let the VCN memory controller know it's offsets
274 */ 275 */
275static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) 276static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
276{ 277{
277 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 278 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
278 uint32_t offset; 279 uint32_t offset;
@@ -319,6 +320,65 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
319 adev->gfx.config.gb_addr_config); 320 adev->gfx.config.gb_addr_config);
320} 321}
321 322
323static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
324{
325 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
326 uint32_t offset;
327
328 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
329 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
330 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
331 0xFFFFFFFF, 0);
332 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
333 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
334 0xFFFFFFFF, 0);
335 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
336 0xFFFFFFFF, 0);
337 offset = 0;
338 } else {
339 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
340 lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
341 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
342 upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
343 offset = size;
344 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
345 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
346 }
347
348 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
349
350 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
351 lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
352 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
353 upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
354 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
355 0xFFFFFFFF, 0);
356 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE,
357 0xFFFFFFFF, 0);
358
359 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
360 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
361 0xFFFFFFFF, 0);
362 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
363 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
364 0xFFFFFFFF, 0);
365 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
366 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
367 AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40),
368 0xFFFFFFFF, 0);
369
370 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
371 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
372 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
373 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
374 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
375 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
376 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
377 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
378 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
379 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
380}
381
322/** 382/**
323 * vcn_v1_0_disable_clock_gating - disable VCN clock gating 383 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
324 * 384 *
@@ -519,6 +579,62 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
519 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); 579 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
520} 580}
521 581
582static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
583{
584 uint32_t reg_data = 0;
585
586 /* disable JPEG CGC */
587 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
588 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
589 else
590 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
591 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
592 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
593 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
594
595 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
596
597 /* enable sw clock gating control */
598 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
599 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
600 else
601 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
602 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
603 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
604 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
605
606 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
607 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
608 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
609 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
610 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
611 UVD_CGC_CTRL__SYS_MODE_MASK |
612 UVD_CGC_CTRL__UDEC_MODE_MASK |
613 UVD_CGC_CTRL__MPEG2_MODE_MASK |
614 UVD_CGC_CTRL__REGS_MODE_MASK |
615 UVD_CGC_CTRL__RBC_MODE_MASK |
616 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
617 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
618 UVD_CGC_CTRL__IDCT_MODE_MASK |
619 UVD_CGC_CTRL__MPRD_MODE_MASK |
620 UVD_CGC_CTRL__MPC_MODE_MASK |
621 UVD_CGC_CTRL__LBSI_MODE_MASK |
622 UVD_CGC_CTRL__LRBBM_MODE_MASK |
623 UVD_CGC_CTRL__WCB_MODE_MASK |
624 UVD_CGC_CTRL__VCPU_MODE_MASK |
625 UVD_CGC_CTRL__SCPU_MODE_MASK);
626 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
627
628 /* turn off clock gating */
629 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
630
631 /* turn on SUVD clock gating */
632 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
633
634 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
635 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
636}
637
522static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) 638static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
523{ 639{
524 uint32_t data = 0; 640 uint32_t data = 0;
@@ -614,7 +730,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
614 * 730 *
615 * Setup and start the VCN block 731 * Setup and start the VCN block
616 */ 732 */
617static int vcn_v1_0_start(struct amdgpu_device *adev) 733static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
618{ 734{
619 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 735 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
620 uint32_t rb_bufsz, tmp; 736 uint32_t rb_bufsz, tmp;
@@ -628,7 +744,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
628 /* disable clock gating */ 744 /* disable clock gating */
629 vcn_v1_0_disable_clock_gating(adev); 745 vcn_v1_0_disable_clock_gating(adev);
630 746
631 vcn_v1_0_mc_resume(adev); 747 vcn_v1_0_mc_resume_spg_mode(adev);
632 748
633 /* disable interupt */ 749 /* disable interupt */
634 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, 750 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -799,6 +915,170 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
799 return 0; 915 return 0;
800} 916}
801 917
918static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
919{
920 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
921 uint32_t rb_bufsz, tmp, reg_data;
922 uint32_t lmi_swap_cntl;
923
924 /* disable byte swapping */
925 lmi_swap_cntl = 0;
926
927 vcn_1_0_enable_static_power_gating(adev);
928
929 /* enable dynamic power gating mode */
930 reg_data = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
931 reg_data |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
932 reg_data |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
933 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data);
934
935 /* enable clock gating */
936 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
937
938 /* enable VCPU clock */
939 reg_data = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
940 reg_data |= UVD_VCPU_CNTL__CLK_EN_MASK;
941 reg_data |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
942 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, reg_data, 0xFFFFFFFF, 0);
943
944 /* disable interupt */
945 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
946 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
947
948 /* stall UMC and register bus before resetting VCPU */
949 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
950 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
951
952 /* put LMI, VCPU, RBC etc... into reset */
953 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
954 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
955 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
956 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
957 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
958 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
959 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
960 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
961 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
962 0xFFFFFFFF, 0);
963
964 /* initialize VCN memory controller */
965 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
966 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
967 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
968 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
969 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
970 UVD_LMI_CTRL__REQ_MODE_MASK |
971 0x00100000L, 0xFFFFFFFF, 0);
972
973#ifdef __BIG_ENDIAN
974 /* swap (8 in 32) RB and IB */
975 lmi_swap_cntl = 0xa;
976#endif
977 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
978
979 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040, 0xFFFFFFFF, 0);
980 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0, 0xFFFFFFFF, 0);
981 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040, 0xFFFFFFFF, 0);
982 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0, 0xFFFFFFFF, 0);
983 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_ALU, 0, 0xFFFFFFFF, 0);
984 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, 0x88, 0xFFFFFFFF, 0);
985
986 vcn_v1_0_mc_resume_dpg_mode(adev);
987
988 /* take all subblocks out of reset, except VCPU */
989 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
990 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 0xFFFFFFFF, 0);
991
992 /* enable VCPU clock */
993 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL,
994 UVD_VCPU_CNTL__CLK_EN_MASK, 0xFFFFFFFF, 0);
995
996 /* enable UMC */
997 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
998 0, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
999
1000 /* boot up the VCPU */
1001 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1002
1003 /* enable master interrupt */
1004 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
1005 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1006 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 0);
1007
1008 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1009 /* setup mmUVD_LMI_CTRL */
1010 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1011 (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1012 UVD_LMI_CTRL__CRC_RESET_MASK |
1013 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1014 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1015 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1016 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1017 0x00100000L), 0xFFFFFFFF, 1);
1018
1019 tmp = adev->gfx.config.gb_addr_config;
1020 /* setup VCN global tiling registers */
1021 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1022 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1023
1024 /* enable System Interrupt for JRBC */
1025 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
1026 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1027
1028 /* force RBC into idle state */
1029 rb_bufsz = order_base_2(ring->ring_size);
1030 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1031 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1032 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1033 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1034 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1035 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1036 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1037
1038 /* set the write pointer delay */
1039 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1040
1041 /* set the wb address */
1042 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1043 (upper_32_bits(ring->gpu_addr) >> 2));
1044
1045 /* programm the RB_BASE for ring buffer */
1046 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1047 lower_32_bits(ring->gpu_addr));
1048 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1049 upper_32_bits(ring->gpu_addr));
1050
1051 /* Initialize the ring buffer's read and write pointers */
1052 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1053
1054 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1055 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1056 lower_32_bits(ring->wptr));
1057
1058 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1059 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1060
1061 /* initialize wptr */
1062 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1063
1064 /* copy patch commands to the jpeg ring */
1065 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
1066 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
1067
1068 return 0;
1069}
1070
1071static int vcn_v1_0_start(struct amdgpu_device *adev)
1072{
1073 int r;
1074
1075 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1076 r = vcn_v1_0_start_dpg_mode(adev);
1077 else
1078 r = vcn_v1_0_start_spg_mode(adev);
1079 return r;
1080}
1081
802/** 1082/**
803 * vcn_v1_0_stop - stop VCN block 1083 * vcn_v1_0_stop - stop VCN block
804 * 1084 *
@@ -806,7 +1086,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
806 * 1086 *
807 * stop the VCN block 1087 * stop the VCN block
808 */ 1088 */
809static int vcn_v1_0_stop(struct amdgpu_device *adev) 1089static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
810{ 1090{
811 /* force RBC into idle state */ 1091 /* force RBC into idle state */
812 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101); 1092 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -836,6 +1116,33 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
836 return 0; 1116 return 0;
837} 1117}
838 1118
1119static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1120{
1121 int ret_code;
1122
1123 /* Wait for power status to be 1 */
1124 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1125 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1126
1127 /* disable dynamic power gating mode */
1128 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1129 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1130
1131 return 0;
1132}
1133
1134static int vcn_v1_0_stop(struct amdgpu_device *adev)
1135{
1136 int r;
1137
1138 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1139 r = vcn_v1_0_stop_dpg_mode(adev);
1140 else
1141 r = vcn_v1_0_stop_spg_mode(adev);
1142
1143 return r;
1144}
1145
839static bool vcn_v1_0_is_idle(void *handle) 1146static bool vcn_v1_0_is_idle(void *handle)
840{ 1147{
841 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1148 struct amdgpu_device *adev = (struct amdgpu_device *)handle;