aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-05-15 18:21:51 -0400
committerDave Airlie <airlied@redhat.com>2018-05-15 18:31:29 -0400
commit95d2c3e15da613afd53b4b8f2cdb352dc7d12221 (patch)
treeb407c31c3ad2a7e133e61ba36edb31274492fb84
parentb8a71080ad288eb3fe42f101e64526cdd2823f93 (diff)
parent8344c53f57057b42a5da87e9557c40fcda18fb7a (diff)
Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next
Main changes for 4.18. I'd like to do a separate pull for vega20 later this week or next. Highlights: - Reserve pre-OS scanout buffer during init for seemless transition from console to driver - VEGAM support - Improved GPU scheduler documentation - Initial gfxoff support for raven - SR-IOV fixes - Default to non-AGP on PowerPC for radeon - Fine grained clock voltage control for vega10 - Power profiles for vega10 - Further clean up of powerplay/driver interface - Underlay fixes - Display link bw updates - Gamma fixes - Scatter/Gather display support on CZ/ST - Misc bug fixes and clean ups [airlied: fixup v3d vs scheduler API change] Link: https://patchwork.freedesktop.org/patch/msgid/20180515185450.1113-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h)38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c130
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c225
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h579
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c221
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1490
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h524
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h40
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h64
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c382
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h48
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c65
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h19
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h7
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h170
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h170
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c489
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c90
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c204
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c99
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c186
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c378
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c206
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c37
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c98
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c951
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c95
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c37
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu75.h760
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h886
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c52
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2382
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h75
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h8
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c76
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h (renamed from include/drm/gpu_scheduler_trace.h)2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c2
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/gpu_scheduler.h55
-rw-r--r--include/uapi/drm/amdgpu_drm.h20
259 files changed, 14419 insertions, 4589 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index f3002020df6c..68e9f584c570 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
64amdgpu-y += \ 64amdgpu-y += \
65 vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o 65 vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
66 66
67# add DF block
68amdgpu-y += \
69 df_v1_7.o
70
67# add GMC block 71# add GMC block
68amdgpu-y += \ 72amdgpu-y += \
69 gmc_v7_0.o \ 73 gmc_v7_0.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c8b605f3dc05..03a2c0be0bf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -129,6 +129,7 @@ extern int amdgpu_lbpw;
129extern int amdgpu_compute_multipipe; 129extern int amdgpu_compute_multipipe;
130extern int amdgpu_gpu_recovery; 130extern int amdgpu_gpu_recovery;
131extern int amdgpu_emu_mode; 131extern int amdgpu_emu_mode;
132extern uint amdgpu_smu_memory_pool_size;
132 133
133#ifdef CONFIG_DRM_AMDGPU_SI 134#ifdef CONFIG_DRM_AMDGPU_SI
134extern int amdgpu_si_support; 135extern int amdgpu_si_support;
@@ -137,6 +138,7 @@ extern int amdgpu_si_support;
137extern int amdgpu_cik_support; 138extern int amdgpu_cik_support;
138#endif 139#endif
139 140
141#define AMDGPU_SG_THRESHOLD (256*1024*1024)
140#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 142#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
141#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 143#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
142#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 144#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -222,10 +224,10 @@ enum amdgpu_kiq_irq {
222 AMDGPU_CP_KIQ_IRQ_LAST 224 AMDGPU_CP_KIQ_IRQ_LAST
223}; 225};
224 226
225int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, 227int amdgpu_device_ip_set_clockgating_state(void *dev,
226 enum amd_ip_block_type block_type, 228 enum amd_ip_block_type block_type,
227 enum amd_clockgating_state state); 229 enum amd_clockgating_state state);
228int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, 230int amdgpu_device_ip_set_powergating_state(void *dev,
229 enum amd_ip_block_type block_type, 231 enum amd_ip_block_type block_type,
230 enum amd_powergating_state state); 232 enum amd_powergating_state state);
231void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 233void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
@@ -681,6 +683,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
681int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); 683int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
682 684
683void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 685void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
686void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
687void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
684void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 688void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
685 689
686 690
@@ -771,9 +775,18 @@ struct amdgpu_rlc {
771 u32 starting_offsets_start; 775 u32 starting_offsets_start;
772 u32 reg_list_format_size_bytes; 776 u32 reg_list_format_size_bytes;
773 u32 reg_list_size_bytes; 777 u32 reg_list_size_bytes;
778 u32 reg_list_format_direct_reg_list_length;
779 u32 save_restore_list_cntl_size_bytes;
780 u32 save_restore_list_gpm_size_bytes;
781 u32 save_restore_list_srm_size_bytes;
774 782
775 u32 *register_list_format; 783 u32 *register_list_format;
776 u32 *register_restore; 784 u32 *register_restore;
785 u8 *save_restore_list_cntl;
786 u8 *save_restore_list_gpm;
787 u8 *save_restore_list_srm;
788
789 bool is_rlc_v2_1;
777}; 790};
778 791
779#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES 792#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -867,6 +880,8 @@ struct amdgpu_gfx_config {
867 880
868 /* gfx configure feature */ 881 /* gfx configure feature */
869 uint32_t double_offchip_lds_buf; 882 uint32_t double_offchip_lds_buf;
883 /* cached value of DB_DEBUG2 */
884 uint32_t db_debug2;
870}; 885};
871 886
872struct amdgpu_cu_info { 887struct amdgpu_cu_info {
@@ -938,6 +953,12 @@ struct amdgpu_gfx {
938 uint32_t ce_feature_version; 953 uint32_t ce_feature_version;
939 uint32_t pfp_feature_version; 954 uint32_t pfp_feature_version;
940 uint32_t rlc_feature_version; 955 uint32_t rlc_feature_version;
956 uint32_t rlc_srlc_fw_version;
957 uint32_t rlc_srlc_feature_version;
958 uint32_t rlc_srlg_fw_version;
959 uint32_t rlc_srlg_feature_version;
960 uint32_t rlc_srls_fw_version;
961 uint32_t rlc_srls_feature_version;
941 uint32_t mec_feature_version; 962 uint32_t mec_feature_version;
942 uint32_t mec2_feature_version; 963 uint32_t mec2_feature_version;
943 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 964 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
@@ -1204,6 +1225,8 @@ struct amdgpu_asic_funcs {
1204 /* invalidate hdp read cache */ 1225 /* invalidate hdp read cache */
1205 void (*invalidate_hdp)(struct amdgpu_device *adev, 1226 void (*invalidate_hdp)(struct amdgpu_device *adev,
1206 struct amdgpu_ring *ring); 1227 struct amdgpu_ring *ring);
1228 /* check if the asic needs a full reset of if soft reset will work */
1229 bool (*need_full_reset)(struct amdgpu_device *adev);
1207}; 1230};
1208 1231
1209/* 1232/*
@@ -1368,7 +1391,17 @@ struct amdgpu_nbio_funcs {
1368 void (*detect_hw_virt)(struct amdgpu_device *adev); 1391 void (*detect_hw_virt)(struct amdgpu_device *adev);
1369}; 1392};
1370 1393
1371 1394struct amdgpu_df_funcs {
1395 void (*init)(struct amdgpu_device *adev);
1396 void (*enable_broadcast_mode)(struct amdgpu_device *adev,
1397 bool enable);
1398 u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
1399 u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
1400 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1401 bool enable);
1402 void (*get_clockgating_state)(struct amdgpu_device *adev,
1403 u32 *flags);
1404};
1372/* Define the HW IP blocks will be used in driver , add more if necessary */ 1405/* Define the HW IP blocks will be used in driver , add more if necessary */
1373enum amd_hw_ip_block_type { 1406enum amd_hw_ip_block_type {
1374 GC_HWIP = 1, 1407 GC_HWIP = 1,
@@ -1398,6 +1431,7 @@ enum amd_hw_ip_block_type {
1398struct amd_powerplay { 1431struct amd_powerplay {
1399 void *pp_handle; 1432 void *pp_handle;
1400 const struct amd_pm_funcs *pp_funcs; 1433 const struct amd_pm_funcs *pp_funcs;
1434 uint32_t pp_feature;
1401}; 1435};
1402 1436
1403#define AMDGPU_RESET_MAGIC_NUM 64 1437#define AMDGPU_RESET_MAGIC_NUM 64
@@ -1590,6 +1624,7 @@ struct amdgpu_device {
1590 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 1624 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1591 1625
1592 const struct amdgpu_nbio_funcs *nbio_funcs; 1626 const struct amdgpu_nbio_funcs *nbio_funcs;
1627 const struct amdgpu_df_funcs *df_funcs;
1593 1628
1594 /* delayed work_func for deferring clockgating during resume */ 1629 /* delayed work_func for deferring clockgating during resume */
1595 struct delayed_work late_init_work; 1630 struct delayed_work late_init_work;
@@ -1764,6 +1799,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1764#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1799#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1765#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) 1800#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
1766#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) 1801#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
1802#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1767#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) 1803#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
1768#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) 1804#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
1769#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) 1805#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
@@ -1790,6 +1826,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1790#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) 1826#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1791#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) 1827#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1792#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) 1828#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
1829#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
1793#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) 1830#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1794#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 1831#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1795#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 1832#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a29362f9ef41..03ee36739efe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -290,12 +290,11 @@ static int acp_hw_init(void *handle)
290 else if (r) 290 else if (r)
291 return r; 291 return r;
292 292
293 r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO, 293 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
294 0x5289, 0, &acp_base); 294 return -EINVAL;
295 if (r == -ENODEV) 295
296 return 0; 296 acp_base = adev->rmmio_base;
297 else if (r) 297
298 return r;
299 if (adev->asic_type != CHIP_STONEY) { 298 if (adev->asic_type != CHIP_STONEY) {
300 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); 299 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
301 if (adev->acp.acp_genpd == NULL) 300 if (adev->acp.acp_genpd == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index cd0e8f192e6a..bd36ee9f7e6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,13 +243,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
243{ 243{
244 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 244 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
245 struct amdgpu_bo *bo = NULL; 245 struct amdgpu_bo *bo = NULL;
246 struct amdgpu_bo_param bp;
246 int r; 247 int r;
247 uint64_t gpu_addr_tmp = 0; 248 uint64_t gpu_addr_tmp = 0;
248 void *cpu_ptr_tmp = NULL; 249 void *cpu_ptr_tmp = NULL;
249 250
250 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 251 memset(&bp, 0, sizeof(bp));
251 AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel, 252 bp.size = size;
252 NULL, &bo); 253 bp.byte_align = PAGE_SIZE;
254 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
255 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
256 bp.type = ttm_bo_type_kernel;
257 bp.resv = NULL;
258 r = amdgpu_bo_create(adev, &bp, &bo);
253 if (r) { 259 if (r) {
254 dev_err(adev->dev, 260 dev_err(adev->dev,
255 "failed to allocate BO for amdkfd (%d)\n", r); 261 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 5296e24fd662..72ab2b1ffe75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1143,6 +1143,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1143 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1143 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1144 uint64_t user_addr = 0; 1144 uint64_t user_addr = 0;
1145 struct amdgpu_bo *bo; 1145 struct amdgpu_bo *bo;
1146 struct amdgpu_bo_param bp;
1146 int byte_align; 1147 int byte_align;
1147 u32 domain, alloc_domain; 1148 u32 domain, alloc_domain;
1148 u64 alloc_flags; 1149 u64 alloc_flags;
@@ -1215,8 +1216,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1215 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1216 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1216 va, size, domain_string(alloc_domain)); 1217 va, size, domain_string(alloc_domain));
1217 1218
1218 ret = amdgpu_bo_create(adev, size, byte_align, 1219 memset(&bp, 0, sizeof(bp));
1219 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo); 1220 bp.size = size;
1221 bp.byte_align = byte_align;
1222 bp.domain = alloc_domain;
1223 bp.flags = alloc_flags;
1224 bp.type = ttm_bo_type_device;
1225 bp.resv = NULL;
1226 ret = amdgpu_bo_create(adev, &bp, &bo);
1220 if (ret) { 1227 if (ret) {
1221 pr_debug("Failed to create BO on domain %s. ret %d\n", 1228 pr_debug("Failed to create BO on domain %s. ret %d\n",
1222 domain_string(alloc_domain), ret); 1229 domain_string(alloc_domain), ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 1ae5ae8c45a4..1bcb2b247335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void)
550 * look up whether we are the integrated or discrete GPU (all asics). 550 * look up whether we are the integrated or discrete GPU (all asics).
551 * Returns the client id. 551 * Returns the client id.
552 */ 552 */
553static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) 553static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
554{ 554{
555 if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) 555 if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
556 return VGA_SWITCHEROO_IGD; 556 return VGA_SWITCHEROO_IGD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 02b849be083b..19cfff31f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
75{ 75{
76 struct amdgpu_bo *dobj = NULL; 76 struct amdgpu_bo *dobj = NULL;
77 struct amdgpu_bo *sobj = NULL; 77 struct amdgpu_bo *sobj = NULL;
78 struct amdgpu_bo_param bp;
78 uint64_t saddr, daddr; 79 uint64_t saddr, daddr;
79 int r, n; 80 int r, n;
80 int time; 81 int time;
81 82
83 memset(&bp, 0, sizeof(bp));
84 bp.size = size;
85 bp.byte_align = PAGE_SIZE;
86 bp.domain = sdomain;
87 bp.flags = 0;
88 bp.type = ttm_bo_type_kernel;
89 bp.resv = NULL;
82 n = AMDGPU_BENCHMARK_ITERATIONS; 90 n = AMDGPU_BENCHMARK_ITERATIONS;
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0, 91 r = amdgpu_bo_create(adev, &bp, &sobj);
84 ttm_bo_type_kernel, NULL, &sobj);
85 if (r) { 92 if (r) {
86 goto out_cleanup; 93 goto out_cleanup;
87 } 94 }
@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
93 if (r) { 100 if (r) {
94 goto out_cleanup; 101 goto out_cleanup;
95 } 102 }
96 r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0, 103 bp.domain = ddomain;
97 ttm_bo_type_kernel, NULL, &dobj); 104 r = amdgpu_bo_create(adev, &bp, &dobj);
98 if (r) { 105 if (r) {
99 goto out_cleanup; 106 goto out_cleanup;
100 } 107 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 71a57b2f7f04..5b3d3bf5b599 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <drm/drmP.h> 26#include <drm/drmP.h>
28#include <linux/firmware.h> 27#include <linux/firmware.h>
29#include <drm/amdgpu_drm.h> 28#include <drm/amdgpu_drm.h>
@@ -109,121 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
109 WARN(1, "Invalid indirect register space"); 108 WARN(1, "Invalid indirect register space");
110} 109}
111 110
112static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
113 enum cgs_resource_type resource_type,
114 uint64_t size,
115 uint64_t offset,
116 uint64_t *resource_base)
117{
118 CGS_FUNC_ADEV;
119
120 if (resource_base == NULL)
121 return -EINVAL;
122
123 switch (resource_type) {
124 case CGS_RESOURCE_TYPE_MMIO:
125 if (adev->rmmio_size == 0)
126 return -ENOENT;
127 if ((offset + size) > adev->rmmio_size)
128 return -EINVAL;
129 *resource_base = adev->rmmio_base;
130 return 0;
131 case CGS_RESOURCE_TYPE_DOORBELL:
132 if (adev->doorbell.size == 0)
133 return -ENOENT;
134 if ((offset + size) > adev->doorbell.size)
135 return -EINVAL;
136 *resource_base = adev->doorbell.base;
137 return 0;
138 case CGS_RESOURCE_TYPE_FB:
139 case CGS_RESOURCE_TYPE_IO:
140 case CGS_RESOURCE_TYPE_ROM:
141 default:
142 return -EINVAL;
143 }
144}
145
146static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
147 unsigned table, uint16_t *size,
148 uint8_t *frev, uint8_t *crev)
149{
150 CGS_FUNC_ADEV;
151 uint16_t data_start;
152
153 if (amdgpu_atom_parse_data_header(
154 adev->mode_info.atom_context, table, size,
155 frev, crev, &data_start))
156 return (uint8_t*)adev->mode_info.atom_context->bios +
157 data_start;
158
159 return NULL;
160}
161
162static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
163 uint8_t *frev, uint8_t *crev)
164{
165 CGS_FUNC_ADEV;
166
167 if (amdgpu_atom_parse_cmd_header(
168 adev->mode_info.atom_context, table,
169 frev, crev))
170 return 0;
171
172 return -EINVAL;
173}
174
175static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
176 void *args)
177{
178 CGS_FUNC_ADEV;
179
180 return amdgpu_atom_execute_table(
181 adev->mode_info.atom_context, table, args);
182}
183
184static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
185 enum amd_ip_block_type block_type,
186 enum amd_clockgating_state state)
187{
188 CGS_FUNC_ADEV;
189 int i, r = -1;
190
191 for (i = 0; i < adev->num_ip_blocks; i++) {
192 if (!adev->ip_blocks[i].status.valid)
193 continue;
194
195 if (adev->ip_blocks[i].version->type == block_type) {
196 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
197 (void *)adev,
198 state);
199 break;
200 }
201 }
202 return r;
203}
204
205static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
206 enum amd_ip_block_type block_type,
207 enum amd_powergating_state state)
208{
209 CGS_FUNC_ADEV;
210 int i, r = -1;
211
212 for (i = 0; i < adev->num_ip_blocks; i++) {
213 if (!adev->ip_blocks[i].status.valid)
214 continue;
215
216 if (adev->ip_blocks[i].version->type == block_type) {
217 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
218 (void *)adev,
219 state);
220 break;
221 }
222 }
223 return r;
224}
225
226
227static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) 111static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
228{ 112{
229 CGS_FUNC_ADEV; 113 CGS_FUNC_ADEV;
@@ -271,18 +155,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
271 return result; 155 return result;
272} 156}
273 157
274static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
275{
276 CGS_FUNC_ADEV;
277 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
278 release_firmware(adev->pm.fw);
279 adev->pm.fw = NULL;
280 return 0;
281 }
282 /* cannot release other firmware because they are not created by cgs */
283 return -EINVAL;
284}
285
286static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, 158static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
287 enum cgs_ucode_id type) 159 enum cgs_ucode_id type)
288{ 160{
@@ -326,34 +198,6 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
326 return fw_version; 198 return fw_version;
327} 199}
328 200
329static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
330 bool en)
331{
332 CGS_FUNC_ADEV;
333
334 if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
335 adev->gfx.rlc.funcs->exit_safe_mode == NULL)
336 return 0;
337
338 if (en)
339 adev->gfx.rlc.funcs->enter_safe_mode(adev);
340 else
341 adev->gfx.rlc.funcs->exit_safe_mode(adev);
342
343 return 0;
344}
345
346static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
347 bool lock)
348{
349 CGS_FUNC_ADEV;
350
351 if (lock)
352 mutex_lock(&adev->grbm_idx_mutex);
353 else
354 mutex_unlock(&adev->grbm_idx_mutex);
355}
356
357static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 201static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
358 enum cgs_ucode_id type, 202 enum cgs_ucode_id type,
359 struct cgs_firmware_info *info) 203 struct cgs_firmware_info *info)
@@ -541,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
541 case CHIP_POLARIS12: 385 case CHIP_POLARIS12:
542 strcpy(fw_name, "amdgpu/polaris12_smc.bin"); 386 strcpy(fw_name, "amdgpu/polaris12_smc.bin");
543 break; 387 break;
388 case CHIP_VEGAM:
389 strcpy(fw_name, "amdgpu/vegam_smc.bin");
390 break;
544 case CHIP_VEGA10: 391 case CHIP_VEGA10:
545 if ((adev->pdev->device == 0x687f) && 392 if ((adev->pdev->device == 0x687f) &&
546 ((adev->pdev->revision == 0xc0) || 393 ((adev->pdev->revision == 0xc0) ||
@@ -598,97 +445,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
598 return 0; 445 return 0;
599} 446}
600 447
601static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
602{
603 CGS_FUNC_ADEV;
604 return amdgpu_sriov_vf(adev);
605}
606
607static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
608 struct cgs_display_info *info)
609{
610 CGS_FUNC_ADEV;
611 struct cgs_mode_info *mode_info;
612
613 if (info == NULL)
614 return -EINVAL;
615
616 mode_info = info->mode_info;
617 if (mode_info)
618 /* if the displays are off, vblank time is max */
619 mode_info->vblank_time_us = 0xffffffff;
620
621 if (!amdgpu_device_has_dc_support(adev)) {
622 struct amdgpu_crtc *amdgpu_crtc;
623 struct drm_device *ddev = adev->ddev;
624 struct drm_crtc *crtc;
625 uint32_t line_time_us, vblank_lines;
626
627 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
628 list_for_each_entry(crtc,
629 &ddev->mode_config.crtc_list, head) {
630 amdgpu_crtc = to_amdgpu_crtc(crtc);
631 if (crtc->enabled) {
632 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
633 info->display_count++;
634 }
635 if (mode_info != NULL &&
636 crtc->enabled && amdgpu_crtc->enabled &&
637 amdgpu_crtc->hw_mode.clock) {
638 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
639 amdgpu_crtc->hw_mode.clock;
640 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
641 amdgpu_crtc->hw_mode.crtc_vdisplay +
642 (amdgpu_crtc->v_border * 2);
643 mode_info->vblank_time_us = vblank_lines * line_time_us;
644 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
645 /* we have issues with mclk switching with refresh rates
646 * over 120 hz on the non-DC code.
647 */
648 if (mode_info->refresh_rate > 120)
649 mode_info->vblank_time_us = 0;
650 mode_info = NULL;
651 }
652 }
653 }
654 } else {
655 info->display_count = adev->pm.pm_display_cfg.num_display;
656 if (mode_info != NULL) {
657 mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
658 mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
659 }
660 }
661 return 0;
662}
663
664
665static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
666{
667 CGS_FUNC_ADEV;
668
669 adev->pm.dpm_enabled = enabled;
670
671 return 0;
672}
673
674static const struct cgs_ops amdgpu_cgs_ops = { 448static const struct cgs_ops amdgpu_cgs_ops = {
675 .read_register = amdgpu_cgs_read_register, 449 .read_register = amdgpu_cgs_read_register,
676 .write_register = amdgpu_cgs_write_register, 450 .write_register = amdgpu_cgs_write_register,
677 .read_ind_register = amdgpu_cgs_read_ind_register, 451 .read_ind_register = amdgpu_cgs_read_ind_register,
678 .write_ind_register = amdgpu_cgs_write_ind_register, 452 .write_ind_register = amdgpu_cgs_write_ind_register,
679 .get_pci_resource = amdgpu_cgs_get_pci_resource,
680 .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
681 .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
682 .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
683 .get_firmware_info = amdgpu_cgs_get_firmware_info, 453 .get_firmware_info = amdgpu_cgs_get_firmware_info,
684 .rel_firmware = amdgpu_cgs_rel_firmware,
685 .set_powergating_state = amdgpu_cgs_set_powergating_state,
686 .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
687 .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
688 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
689 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
690 .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
691 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
692}; 454};
693 455
694struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) 456struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 96501ff0e55b..8e66851eb427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -691,7 +691,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
691 return ret; 691 return ret;
692} 692}
693 693
694static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, 694static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
695 struct drm_display_mode *mode) 695 struct drm_display_mode *mode)
696{ 696{
697 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); 697 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -843,7 +843,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
843 return ret; 843 return ret;
844} 844}
845 845
846static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector, 846static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
847 struct drm_display_mode *mode) 847 struct drm_display_mode *mode)
848{ 848{
849 struct drm_device *dev = connector->dev; 849 struct drm_device *dev = connector->dev;
@@ -1172,7 +1172,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
1172 amdgpu_connector->use_digital = true; 1172 amdgpu_connector->use_digital = true;
1173} 1173}
1174 1174
1175static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, 1175static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
1176 struct drm_display_mode *mode) 1176 struct drm_display_mode *mode)
1177{ 1177{
1178 struct drm_device *dev = connector->dev; 1178 struct drm_device *dev = connector->dev;
@@ -1448,7 +1448,7 @@ out:
1448 return ret; 1448 return ret;
1449} 1449}
1450 1450
1451static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector, 1451static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
1452 struct drm_display_mode *mode) 1452 struct drm_display_mode *mode)
1453{ 1453{
1454 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 1454 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8e66f3702b7c..9c1d491d742e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -382,8 +382,7 @@ retry:
382 382
383 p->bytes_moved += ctx.bytes_moved; 383 p->bytes_moved += ctx.bytes_moved;
384 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && 384 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
385 bo->tbo.mem.mem_type == TTM_PL_VRAM && 385 amdgpu_bo_in_cpu_visible_vram(bo))
386 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
387 p->bytes_moved_vis += ctx.bytes_moved; 386 p->bytes_moved_vis += ctx.bytes_moved;
388 387
389 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 388 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -411,7 +410,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
411 struct amdgpu_bo_list_entry *candidate = p->evictable; 410 struct amdgpu_bo_list_entry *candidate = p->evictable;
412 struct amdgpu_bo *bo = candidate->robj; 411 struct amdgpu_bo *bo = candidate->robj;
413 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 412 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
414 u64 initial_bytes_moved, bytes_moved;
415 bool update_bytes_moved_vis; 413 bool update_bytes_moved_vis;
416 uint32_t other; 414 uint32_t other;
417 415
@@ -435,18 +433,14 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
435 continue; 433 continue;
436 434
437 /* Good we can try to move this BO somewhere else */ 435 /* Good we can try to move this BO somewhere else */
438 amdgpu_ttm_placement_from_domain(bo, other);
439 update_bytes_moved_vis = 436 update_bytes_moved_vis =
440 adev->gmc.visible_vram_size < adev->gmc.real_vram_size && 437 adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
441 bo->tbo.mem.mem_type == TTM_PL_VRAM && 438 amdgpu_bo_in_cpu_visible_vram(bo);
442 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT; 439 amdgpu_ttm_placement_from_domain(bo, other);
443 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
444 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 440 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
445 bytes_moved = atomic64_read(&adev->num_bytes_moved) - 441 p->bytes_moved += ctx.bytes_moved;
446 initial_bytes_moved;
447 p->bytes_moved += bytes_moved;
448 if (update_bytes_moved_vis) 442 if (update_bytes_moved_vis)
449 p->bytes_moved_vis += bytes_moved; 443 p->bytes_moved_vis += ctx.bytes_moved;
450 444
451 if (unlikely(r)) 445 if (unlikely(r))
452 break; 446 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 09d35051fdd6..a8e531d604fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
91 continue; 91 continue;
92 92
93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
94 rq, amdgpu_sched_jobs, &ctx->guilty); 94 rq, &ctx->guilty);
95 if (r) 95 if (r)
96 goto failed; 96 goto failed;
97 } 97 }
@@ -111,8 +111,9 @@ failed:
111 return r; 111 return r;
112} 112}
113 113
114static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 114static void amdgpu_ctx_fini(struct kref *ref)
115{ 115{
116 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
116 struct amdgpu_device *adev = ctx->adev; 117 struct amdgpu_device *adev = ctx->adev;
117 unsigned i, j; 118 unsigned i, j;
118 119
@@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
125 kfree(ctx->fences); 126 kfree(ctx->fences);
126 ctx->fences = NULL; 127 ctx->fences = NULL;
127 128
128 for (i = 0; i < adev->num_rings; i++)
129 drm_sched_entity_fini(&adev->rings[i]->sched,
130 &ctx->rings[i].entity);
131
132 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); 129 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
133 130
134 mutex_destroy(&ctx->lock); 131 mutex_destroy(&ctx->lock);
132
133 kfree(ctx);
135} 134}
136 135
137static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 136static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -170,12 +169,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
170static void amdgpu_ctx_do_release(struct kref *ref) 169static void amdgpu_ctx_do_release(struct kref *ref)
171{ 170{
172 struct amdgpu_ctx *ctx; 171 struct amdgpu_ctx *ctx;
172 u32 i;
173 173
174 ctx = container_of(ref, struct amdgpu_ctx, refcount); 174 ctx = container_of(ref, struct amdgpu_ctx, refcount);
175 175
176 amdgpu_ctx_fini(ctx); 176 for (i = 0; i < ctx->adev->num_rings; i++)
177 drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
178 &ctx->rings[i].entity);
177 179
178 kfree(ctx); 180 amdgpu_ctx_fini(ref);
179} 181}
180 182
181static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 183static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
@@ -419,9 +421,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
419 421
420 if (other) { 422 if (other) {
421 signed long r; 423 signed long r;
422 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 424 r = dma_fence_wait(other, true);
423 if (r < 0) { 425 if (r < 0) {
424 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 426 if (r != -ERESTARTSYS)
427 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
428
425 return r; 429 return r;
426 } 430 }
427 } 431 }
@@ -435,16 +439,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
435 idr_init(&mgr->ctx_handles); 439 idr_init(&mgr->ctx_handles);
436} 440}
437 441
442void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
443{
444 struct amdgpu_ctx *ctx;
445 struct idr *idp;
446 uint32_t id, i;
447
448 idp = &mgr->ctx_handles;
449
450 idr_for_each_entry(idp, ctx, id) {
451
452 if (!ctx->adev)
453 return;
454
455 for (i = 0; i < ctx->adev->num_rings; i++)
456 if (kref_read(&ctx->refcount) == 1)
457 drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
458 &ctx->rings[i].entity);
459 else
460 DRM_ERROR("ctx %p is still alive\n", ctx);
461 }
462}
463
464void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
465{
466 struct amdgpu_ctx *ctx;
467 struct idr *idp;
468 uint32_t id, i;
469
470 idp = &mgr->ctx_handles;
471
472 idr_for_each_entry(idp, ctx, id) {
473
474 if (!ctx->adev)
475 return;
476
477 for (i = 0; i < ctx->adev->num_rings; i++)
478 if (kref_read(&ctx->refcount) == 1)
479 drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
480 &ctx->rings[i].entity);
481 else
482 DRM_ERROR("ctx %p is still alive\n", ctx);
483 }
484}
485
438void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 486void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
439{ 487{
440 struct amdgpu_ctx *ctx; 488 struct amdgpu_ctx *ctx;
441 struct idr *idp; 489 struct idr *idp;
442 uint32_t id; 490 uint32_t id;
443 491
492 amdgpu_ctx_mgr_entity_cleanup(mgr);
493
444 idp = &mgr->ctx_handles; 494 idp = &mgr->ctx_handles;
445 495
446 idr_for_each_entry(idp, ctx, id) { 496 idr_for_each_entry(idp, ctx, id) {
447 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) 497 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
448 DRM_ERROR("ctx %p is still alive\n", ctx); 498 DRM_ERROR("ctx %p is still alive\n", ctx);
449 } 499 }
450 500
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 448d69fe3756..f5fb93795a69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -28,8 +28,13 @@
28#include <linux/debugfs.h> 28#include <linux/debugfs.h>
29#include "amdgpu.h" 29#include "amdgpu.h"
30 30
31/* 31/**
32 * Debugfs 32 * amdgpu_debugfs_add_files - Add simple debugfs entries
33 *
34 * @adev: Device to attach debugfs entries to
35 * @files: Array of function callbacks that respond to reads
36 * @nfiles: Number of callbacks to register
37 *
33 */ 38 */
34int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 39int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
35 const struct drm_info_list *files, 40 const struct drm_info_list *files,
@@ -64,7 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
64 69
65#if defined(CONFIG_DEBUG_FS) 70#if defined(CONFIG_DEBUG_FS)
66 71
67 72/**
73 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
74 *
75 * @read: True if reading
76 * @f: open file handle
77 * @buf: User buffer to write/read to
78 * @size: Number of bytes to write/read
79 * @pos: Offset to seek to
80 *
81 * This debugfs entry has special meaning on the offset being sought.
82 * Various bits have different meanings:
83 *
84 * Bit 62: Indicates a GRBM bank switch is needed
85 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
86 * zero)
87 * Bits 24..33: The SE or ME selector if needed
88 * Bits 34..43: The SH (or SA) or PIPE selector if needed
89 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
90 *
91 * Bit 23: Indicates that the PM power gating lock should be held
92 * This is necessary to read registers that might be
93 * unreliable during a power gating transistion.
94 *
95 * The lower bits are the BYTE offset of the register to read. This
96 * allows reading multiple registers in a single call and having
97 * the returned size reflect that.
98 */
68static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 99static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
69 char __user *buf, size_t size, loff_t *pos) 100 char __user *buf, size_t size, loff_t *pos)
70{ 101{
@@ -164,19 +195,37 @@ end:
164 return result; 195 return result;
165} 196}
166 197
167 198/**
199 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
200 */
168static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 201static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
169 size_t size, loff_t *pos) 202 size_t size, loff_t *pos)
170{ 203{
171 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 204 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
172} 205}
173 206
207/**
208 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
209 */
174static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 210static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
175 size_t size, loff_t *pos) 211 size_t size, loff_t *pos)
176{ 212{
177 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 213 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
178} 214}
179 215
216
217/**
218 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
219 *
220 * @f: open file handle
221 * @buf: User buffer to store read data in
222 * @size: Number of bytes to read
223 * @pos: Offset to seek to
224 *
225 * The lower bits are the BYTE offset of the register to read. This
226 * allows reading multiple registers in a single call and having
227 * the returned size reflect that.
228 */
180static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 229static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
181 size_t size, loff_t *pos) 230 size_t size, loff_t *pos)
182{ 231{
@@ -204,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
204 return result; 253 return result;
205} 254}
206 255
256/**
257 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
258 *
259 * @f: open file handle
260 * @buf: User buffer to write data from
261 * @size: Number of bytes to write
262 * @pos: Offset to seek to
263 *
264 * The lower bits are the BYTE offset of the register to write. This
265 * allows writing multiple registers in a single call and having
266 * the returned size reflect that.
267 */
207static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 268static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
208 size_t size, loff_t *pos) 269 size_t size, loff_t *pos)
209{ 270{
@@ -232,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
232 return result; 293 return result;
233} 294}
234 295
296/**
297 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
298 *
299 * @f: open file handle
300 * @buf: User buffer to store read data in
301 * @size: Number of bytes to read
302 * @pos: Offset to seek to
303 *
304 * The lower bits are the BYTE offset of the register to read. This
305 * allows reading multiple registers in a single call and having
306 * the returned size reflect that.
307 */
235static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 308static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
236 size_t size, loff_t *pos) 309 size_t size, loff_t *pos)
237{ 310{
@@ -259,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
259 return result; 332 return result;
260} 333}
261 334
335/**
336 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
337 *
338 * @f: open file handle
339 * @buf: User buffer to write data from
340 * @size: Number of bytes to write
341 * @pos: Offset to seek to
342 *
343 * The lower bits are the BYTE offset of the register to write. This
344 * allows writing multiple registers in a single call and having
345 * the returned size reflect that.
346 */
262static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 347static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
263 size_t size, loff_t *pos) 348 size_t size, loff_t *pos)
264{ 349{
@@ -287,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
287 return result; 372 return result;
288} 373}
289 374
375/**
376 * amdgpu_debugfs_regs_smc_read - Read from a SMC register
377 *
378 * @f: open file handle
379 * @buf: User buffer to store read data in
380 * @size: Number of bytes to read
381 * @pos: Offset to seek to
382 *
383 * The lower bits are the BYTE offset of the register to read. This
384 * allows reading multiple registers in a single call and having
385 * the returned size reflect that.
386 */
290static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 387static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
291 size_t size, loff_t *pos) 388 size_t size, loff_t *pos)
292{ 389{
@@ -314,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
314 return result; 411 return result;
315} 412}
316 413
414/**
415 * amdgpu_debugfs_regs_smc_write - Write to a SMC register
416 *
417 * @f: open file handle
418 * @buf: User buffer to write data from
419 * @size: Number of bytes to write
420 * @pos: Offset to seek to
421 *
422 * The lower bits are the BYTE offset of the register to write. This
423 * allows writing multiple registers in a single call and having
424 * the returned size reflect that.
425 */
317static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 426static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
318 size_t size, loff_t *pos) 427 size_t size, loff_t *pos)
319{ 428{
@@ -342,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
342 return result; 451 return result;
343} 452}
344 453
454/**
455 * amdgpu_debugfs_gca_config_read - Read from gfx config data
456 *
457 * @f: open file handle
458 * @buf: User buffer to store read data in
459 * @size: Number of bytes to read
460 * @pos: Offset to seek to
461 *
462 * This file is used to access configuration data in a somewhat
463 * stable fashion. The format is a series of DWORDs with the first
464 * indicating which revision it is. New content is appended to the
465 * end so that older software can still read the data.
466 */
467
345static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 468static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
346 size_t size, loff_t *pos) 469 size_t size, loff_t *pos)
347{ 470{
@@ -418,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
418 return result; 541 return result;
419} 542}
420 543
544/**
545 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
546 *
547 * @f: open file handle
548 * @buf: User buffer to store read data in
549 * @size: Number of bytes to read
550 * @pos: Offset to seek to
551 *
552 * The offset is treated as the BYTE address of one of the sensors
553 * enumerated in amd/include/kgd_pp_interface.h under the
554 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
555 * you would use the offset 3 * 4 = 12.
556 */
421static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 557static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
422 size_t size, loff_t *pos) 558 size_t size, loff_t *pos)
423{ 559{
@@ -428,7 +564,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
428 if (size & 3 || *pos & 0x3) 564 if (size & 3 || *pos & 0x3)
429 return -EINVAL; 565 return -EINVAL;
430 566
431 if (amdgpu_dpm == 0) 567 if (!adev->pm.dpm_enabled)
432 return -EINVAL; 568 return -EINVAL;
433 569
434 /* convert offset to sensor number */ 570 /* convert offset to sensor number */
@@ -457,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
457 return !r ? outsize : r; 593 return !r ? outsize : r;
458} 594}
459 595
596/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
597 *
598 * @f: open file handle
599 * @buf: User buffer to store read data in
600 * @size: Number of bytes to read
601 * @pos: Offset to seek to
602 *
603 * The offset being sought changes which wave that the status data
604 * will be returned for. The bits are used as follows:
605 *
606 * Bits 0..6: Byte offset into data
607 * Bits 7..14: SE selector
608 * Bits 15..22: SH/SA selector
609 * Bits 23..30: CU/{WGP+SIMD} selector
610 * Bits 31..36: WAVE ID selector
611 * Bits 37..44: SIMD ID selector
612 *
613 * The returned data begins with one DWORD of version information
614 * Followed by WAVE STATUS registers relevant to the GFX IP version
615 * being used. See gfx_v8_0_read_wave_data() for an example output.
616 */
460static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 617static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
461 size_t size, loff_t *pos) 618 size_t size, loff_t *pos)
462{ 619{
@@ -507,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
507 return result; 664 return result;
508} 665}
509 666
667/** amdgpu_debugfs_gpr_read - Read wave gprs
668 *
669 * @f: open file handle
670 * @buf: User buffer to store read data in
671 * @size: Number of bytes to read
672 * @pos: Offset to seek to
673 *
674 * The offset being sought changes which wave that the status data
675 * will be returned for. The bits are used as follows:
676 *
677 * Bits 0..11: Byte offset into data
678 * Bits 12..19: SE selector
679 * Bits 20..27: SH/SA selector
680 * Bits 28..35: CU/{WGP+SIMD} selector
681 * Bits 36..43: WAVE ID selector
682 * Bits 37..44: SIMD ID selector
683 * Bits 52..59: Thread selector
684 * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
685 *
686 * The return data comes from the SGPR or VGPR register bank for
687 * the selected operational unit.
688 */
510static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 689static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
511 size_t size, loff_t *pos) 690 size_t size, loff_t *pos)
512{ 691{
@@ -637,6 +816,12 @@ static const char *debugfs_regs_names[] = {
637 "amdgpu_gpr", 816 "amdgpu_gpr",
638}; 817};
639 818
819/**
820 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
821 * register access.
822 *
823 * @adev: The device to attach the debugfs entries to
824 */
640int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 825int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
641{ 826{
642 struct drm_minor *minor = adev->ddev->primary; 827 struct drm_minor *minor = adev->ddev->primary;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 34af664b9f93..9fb20a53d5b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
83 "POLARIS10", 83 "POLARIS10",
84 "POLARIS11", 84 "POLARIS11",
85 "POLARIS12", 85 "POLARIS12",
86 "VEGAM",
86 "VEGA10", 87 "VEGA10",
87 "VEGA12", 88 "VEGA12",
88 "RAVEN", 89 "RAVEN",
@@ -690,6 +691,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
690{ 691{
691 u64 size_af, size_bf; 692 u64 size_af, size_bf;
692 693
694 mc->gart_size += adev->pm.smu_prv_buffer_size;
695
693 size_af = adev->gmc.mc_mask - mc->vram_end; 696 size_af = adev->gmc.mc_mask - mc->vram_end;
694 size_bf = mc->vram_start; 697 size_bf = mc->vram_start;
695 if (size_bf > size_af) { 698 if (size_bf > size_af) {
@@ -907,6 +910,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
907 } 910 }
908} 911}
909 912
913static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
914{
915 struct sysinfo si;
916 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
917 uint64_t total_memory;
918 uint64_t dram_size_seven_GB = 0x1B8000000;
919 uint64_t dram_size_three_GB = 0xB8000000;
920
921 if (amdgpu_smu_memory_pool_size == 0)
922 return;
923
924 if (!is_os_64) {
925 DRM_WARN("Not 64-bit OS, feature not supported\n");
926 goto def_value;
927 }
928 si_meminfo(&si);
929 total_memory = (uint64_t)si.totalram * si.mem_unit;
930
931 if ((amdgpu_smu_memory_pool_size == 1) ||
932 (amdgpu_smu_memory_pool_size == 2)) {
933 if (total_memory < dram_size_three_GB)
934 goto def_value1;
935 } else if ((amdgpu_smu_memory_pool_size == 4) ||
936 (amdgpu_smu_memory_pool_size == 8)) {
937 if (total_memory < dram_size_seven_GB)
938 goto def_value1;
939 } else {
940 DRM_WARN("Smu memory pool size not supported\n");
941 goto def_value;
942 }
943 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
944
945 return;
946
947def_value1:
948 DRM_WARN("No enough system memory\n");
949def_value:
950 adev->pm.smu_prv_buffer_size = 0;
951}
952
910/** 953/**
911 * amdgpu_device_check_arguments - validate module params 954 * amdgpu_device_check_arguments - validate module params
912 * 955 *
@@ -948,6 +991,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
948 amdgpu_vm_fragment_size = -1; 991 amdgpu_vm_fragment_size = -1;
949 } 992 }
950 993
994 amdgpu_device_check_smu_prv_buffer_size(adev);
995
951 amdgpu_device_check_vm_size(adev); 996 amdgpu_device_check_vm_size(adev);
952 997
953 amdgpu_device_check_block_size(adev); 998 amdgpu_device_check_block_size(adev);
@@ -1039,10 +1084,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1039 * the hardware IP specified. 1084 * the hardware IP specified.
1040 * Returns the error code from the last instance. 1085 * Returns the error code from the last instance.
1041 */ 1086 */
1042int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, 1087int amdgpu_device_ip_set_clockgating_state(void *dev,
1043 enum amd_ip_block_type block_type, 1088 enum amd_ip_block_type block_type,
1044 enum amd_clockgating_state state) 1089 enum amd_clockgating_state state)
1045{ 1090{
1091 struct amdgpu_device *adev = dev;
1046 int i, r = 0; 1092 int i, r = 0;
1047 1093
1048 for (i = 0; i < adev->num_ip_blocks; i++) { 1094 for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1072,10 +1118,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
1072 * the hardware IP specified. 1118 * the hardware IP specified.
1073 * Returns the error code from the last instance. 1119 * Returns the error code from the last instance.
1074 */ 1120 */
1075int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, 1121int amdgpu_device_ip_set_powergating_state(void *dev,
1076 enum amd_ip_block_type block_type, 1122 enum amd_ip_block_type block_type,
1077 enum amd_powergating_state state) 1123 enum amd_powergating_state state)
1078{ 1124{
1125 struct amdgpu_device *adev = dev;
1079 int i, r = 0; 1126 int i, r = 0;
1080 1127
1081 for (i = 0; i < adev->num_ip_blocks; i++) { 1128 for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1320,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1320 case CHIP_TOPAZ: 1367 case CHIP_TOPAZ:
1321 case CHIP_TONGA: 1368 case CHIP_TONGA:
1322 case CHIP_FIJI: 1369 case CHIP_FIJI:
1323 case CHIP_POLARIS11:
1324 case CHIP_POLARIS10: 1370 case CHIP_POLARIS10:
1371 case CHIP_POLARIS11:
1325 case CHIP_POLARIS12: 1372 case CHIP_POLARIS12:
1373 case CHIP_VEGAM:
1326 case CHIP_CARRIZO: 1374 case CHIP_CARRIZO:
1327 case CHIP_STONEY: 1375 case CHIP_STONEY:
1328#ifdef CONFIG_DRM_AMDGPU_SI 1376#ifdef CONFIG_DRM_AMDGPU_SI
@@ -1428,9 +1476,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1428 case CHIP_TOPAZ: 1476 case CHIP_TOPAZ:
1429 case CHIP_TONGA: 1477 case CHIP_TONGA:
1430 case CHIP_FIJI: 1478 case CHIP_FIJI:
1431 case CHIP_POLARIS11:
1432 case CHIP_POLARIS10: 1479 case CHIP_POLARIS10:
1480 case CHIP_POLARIS11:
1433 case CHIP_POLARIS12: 1481 case CHIP_POLARIS12:
1482 case CHIP_VEGAM:
1434 case CHIP_CARRIZO: 1483 case CHIP_CARRIZO:
1435 case CHIP_STONEY: 1484 case CHIP_STONEY:
1436 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1485 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@@ -1499,6 +1548,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1499 return -EAGAIN; 1548 return -EAGAIN;
1500 } 1549 }
1501 1550
1551 adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
1552
1502 for (i = 0; i < adev->num_ip_blocks; i++) { 1553 for (i = 0; i < adev->num_ip_blocks; i++) {
1503 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1554 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1504 DRM_ERROR("disabled ip block: %d <%s>\n", 1555 DRM_ERROR("disabled ip block: %d <%s>\n",
@@ -1654,6 +1705,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1654 if (amdgpu_emu_mode == 1) 1705 if (amdgpu_emu_mode == 1)
1655 return 0; 1706 return 0;
1656 1707
1708 r = amdgpu_ib_ring_tests(adev);
1709 if (r)
1710 DRM_ERROR("ib ring test failed (%d).\n", r);
1711
1657 for (i = 0; i < adev->num_ip_blocks; i++) { 1712 for (i = 0; i < adev->num_ip_blocks; i++) {
1658 if (!adev->ip_blocks[i].status.valid) 1713 if (!adev->ip_blocks[i].status.valid)
1659 continue; 1714 continue;
@@ -1704,8 +1759,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1704 } 1759 }
1705 } 1760 }
1706 1761
1707 mod_delayed_work(system_wq, &adev->late_init_work, 1762 queue_delayed_work(system_wq, &adev->late_init_work,
1708 msecs_to_jiffies(AMDGPU_RESUME_MS)); 1763 msecs_to_jiffies(AMDGPU_RESUME_MS));
1709 1764
1710 amdgpu_device_fill_reset_magic(adev); 1765 amdgpu_device_fill_reset_magic(adev);
1711 1766
@@ -1850,6 +1905,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1850 if (amdgpu_sriov_vf(adev)) 1905 if (amdgpu_sriov_vf(adev))
1851 amdgpu_virt_request_full_gpu(adev, false); 1906 amdgpu_virt_request_full_gpu(adev, false);
1852 1907
1908 /* ungate SMC block powergating */
1909 if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
1910 amdgpu_device_ip_set_powergating_state(adev,
1911 AMD_IP_BLOCK_TYPE_SMC,
1912 AMD_CG_STATE_UNGATE);
1913
1853 /* ungate SMC block first */ 1914 /* ungate SMC block first */
1854 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, 1915 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1855 AMD_CG_STATE_UNGATE); 1916 AMD_CG_STATE_UNGATE);
@@ -2086,14 +2147,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2086 case CHIP_MULLINS: 2147 case CHIP_MULLINS:
2087 case CHIP_CARRIZO: 2148 case CHIP_CARRIZO:
2088 case CHIP_STONEY: 2149 case CHIP_STONEY:
2089 case CHIP_POLARIS11:
2090 case CHIP_POLARIS10: 2150 case CHIP_POLARIS10:
2151 case CHIP_POLARIS11:
2091 case CHIP_POLARIS12: 2152 case CHIP_POLARIS12:
2153 case CHIP_VEGAM:
2092 case CHIP_TONGA: 2154 case CHIP_TONGA:
2093 case CHIP_FIJI: 2155 case CHIP_FIJI:
2094#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2095 return amdgpu_dc != 0;
2096#endif
2097 case CHIP_VEGA10: 2156 case CHIP_VEGA10:
2098 case CHIP_VEGA12: 2157 case CHIP_VEGA12:
2099#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2158#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -2375,10 +2434,6 @@ fence_driver_init:
2375 goto failed; 2434 goto failed;
2376 } 2435 }
2377 2436
2378 r = amdgpu_ib_ring_tests(adev);
2379 if (r)
2380 DRM_ERROR("ib ring test failed (%d).\n", r);
2381
2382 if (amdgpu_sriov_vf(adev)) 2437 if (amdgpu_sriov_vf(adev))
2383 amdgpu_virt_init_data_exchange(adev); 2438 amdgpu_virt_init_data_exchange(adev);
2384 2439
@@ -2539,7 +2594,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2539 /* unpin the front buffers and cursors */ 2594 /* unpin the front buffers and cursors */
2540 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2595 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2596 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2542 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); 2597 struct drm_framebuffer *fb = crtc->primary->fb;
2543 struct amdgpu_bo *robj; 2598 struct amdgpu_bo *robj;
2544 2599
2545 if (amdgpu_crtc->cursor_bo) { 2600 if (amdgpu_crtc->cursor_bo) {
@@ -2551,10 +2606,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2551 } 2606 }
2552 } 2607 }
2553 2608
2554 if (rfb == NULL || rfb->obj == NULL) { 2609 if (fb == NULL || fb->obj[0] == NULL) {
2555 continue; 2610 continue;
2556 } 2611 }
2557 robj = gem_to_amdgpu_bo(rfb->obj); 2612 robj = gem_to_amdgpu_bo(fb->obj[0]);
2558 /* don't unpin kernel fb objects */ 2613 /* don't unpin kernel fb objects */
2559 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 2614 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2560 r = amdgpu_bo_reserve(robj, true); 2615 r = amdgpu_bo_reserve(robj, true);
@@ -2640,11 +2695,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2640 } 2695 }
2641 amdgpu_fence_driver_resume(adev); 2696 amdgpu_fence_driver_resume(adev);
2642 2697
2643 if (resume) {
2644 r = amdgpu_ib_ring_tests(adev);
2645 if (r)
2646 DRM_ERROR("ib ring test failed (%d).\n", r);
2647 }
2648 2698
2649 r = amdgpu_device_ip_late_init(adev); 2699 r = amdgpu_device_ip_late_init(adev);
2650 if (r) 2700 if (r)
@@ -2736,6 +2786,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2736 if (amdgpu_sriov_vf(adev)) 2786 if (amdgpu_sriov_vf(adev))
2737 return true; 2787 return true;
2738 2788
2789 if (amdgpu_asic_need_full_reset(adev))
2790 return true;
2791
2739 for (i = 0; i < adev->num_ip_blocks; i++) { 2792 for (i = 0; i < adev->num_ip_blocks; i++) {
2740 if (!adev->ip_blocks[i].status.valid) 2793 if (!adev->ip_blocks[i].status.valid)
2741 continue; 2794 continue;
@@ -2792,6 +2845,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2792{ 2845{
2793 int i; 2846 int i;
2794 2847
2848 if (amdgpu_asic_need_full_reset(adev))
2849 return true;
2850
2795 for (i = 0; i < adev->num_ip_blocks; i++) { 2851 for (i = 0; i < adev->num_ip_blocks; i++) {
2796 if (!adev->ip_blocks[i].status.valid) 2852 if (!adev->ip_blocks[i].status.valid)
2797 continue; 2853 continue;
@@ -3087,20 +3143,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3087 3143
3088 /* now we are okay to resume SMC/CP/SDMA */ 3144 /* now we are okay to resume SMC/CP/SDMA */
3089 r = amdgpu_device_ip_reinit_late_sriov(adev); 3145 r = amdgpu_device_ip_reinit_late_sriov(adev);
3090 amdgpu_virt_release_full_gpu(adev, true);
3091 if (r) 3146 if (r)
3092 goto error; 3147 goto error;
3093 3148
3094 amdgpu_irq_gpu_reset_resume_helper(adev); 3149 amdgpu_irq_gpu_reset_resume_helper(adev);
3095 r = amdgpu_ib_ring_tests(adev); 3150 r = amdgpu_ib_ring_tests(adev);
3096 3151
3152error:
3153 amdgpu_virt_release_full_gpu(adev, true);
3097 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3154 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3098 atomic_inc(&adev->vram_lost_counter); 3155 atomic_inc(&adev->vram_lost_counter);
3099 r = amdgpu_device_handle_vram_lost(adev); 3156 r = amdgpu_device_handle_vram_lost(adev);
3100 } 3157 }
3101 3158
3102error:
3103
3104 return r; 3159 return r;
3105} 3160}
3106 3161
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 93f700ab1bfb..76ee8e04ff11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,7 @@
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <drm/drm_crtc_helper.h> 36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h> 37#include <drm/drm_edid.h>
38#include <drm/drm_gem_framebuffer_helper.h>
38#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
39 40
40static void amdgpu_display_flip_callback(struct dma_fence *f, 41static void amdgpu_display_flip_callback(struct dma_fence *f,
@@ -151,8 +152,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
151 struct drm_device *dev = crtc->dev; 152 struct drm_device *dev = crtc->dev;
152 struct amdgpu_device *adev = dev->dev_private; 153 struct amdgpu_device *adev = dev->dev_private;
153 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 154 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
154 struct amdgpu_framebuffer *old_amdgpu_fb;
155 struct amdgpu_framebuffer *new_amdgpu_fb;
156 struct drm_gem_object *obj; 155 struct drm_gem_object *obj;
157 struct amdgpu_flip_work *work; 156 struct amdgpu_flip_work *work;
158 struct amdgpu_bo *new_abo; 157 struct amdgpu_bo *new_abo;
@@ -174,15 +173,13 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
174 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; 173 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
175 174
176 /* schedule unpin of the old buffer */ 175 /* schedule unpin of the old buffer */
177 old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 176 obj = crtc->primary->fb->obj[0];
178 obj = old_amdgpu_fb->obj;
179 177
180 /* take a reference to the old object */ 178 /* take a reference to the old object */
181 work->old_abo = gem_to_amdgpu_bo(obj); 179 work->old_abo = gem_to_amdgpu_bo(obj);
182 amdgpu_bo_ref(work->old_abo); 180 amdgpu_bo_ref(work->old_abo);
183 181
184 new_amdgpu_fb = to_amdgpu_framebuffer(fb); 182 obj = fb->obj[0];
185 obj = new_amdgpu_fb->obj;
186 new_abo = gem_to_amdgpu_bo(obj); 183 new_abo = gem_to_amdgpu_bo(obj);
187 184
188 /* pin the new buffer */ 185 /* pin the new buffer */
@@ -192,7 +189,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
192 goto cleanup; 189 goto cleanup;
193 } 190 }
194 191
195 r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base); 192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
196 if (unlikely(r != 0)) { 193 if (unlikely(r != 0)) {
197 DRM_ERROR("failed to pin new abo buffer before flip\n"); 194 DRM_ERROR("failed to pin new abo buffer before flip\n");
198 goto unreserve; 195 goto unreserve;
@@ -482,31 +479,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
482 return true; 479 return true;
483} 480}
484 481
485static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
486{
487 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
488
489 drm_gem_object_put_unlocked(amdgpu_fb->obj);
490 drm_framebuffer_cleanup(fb);
491 kfree(amdgpu_fb);
492}
493
494static int amdgpu_display_user_framebuffer_create_handle(
495 struct drm_framebuffer *fb,
496 struct drm_file *file_priv,
497 unsigned int *handle)
498{
499 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
500
501 return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
502}
503
504static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { 482static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
505 .destroy = amdgpu_display_user_framebuffer_destroy, 483 .destroy = drm_gem_fb_destroy,
506 .create_handle = amdgpu_display_user_framebuffer_create_handle, 484 .create_handle = drm_gem_fb_create_handle,
507}; 485};
508 486
509uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev) 487uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
510{ 488{
511 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 489 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
512 490
@@ -526,11 +504,11 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
526 struct drm_gem_object *obj) 504 struct drm_gem_object *obj)
527{ 505{
528 int ret; 506 int ret;
529 rfb->obj = obj; 507 rfb->base.obj[0] = obj;
530 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); 508 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
531 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); 509 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
532 if (ret) { 510 if (ret) {
533 rfb->obj = NULL; 511 rfb->base.obj[0] = NULL;
534 return ret; 512 return ret;
535 } 513 }
536 return 0; 514 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 2b11d808f297..f66e3e3fef0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,7 +23,7 @@
23#ifndef __AMDGPU_DISPLAY_H__ 23#ifndef __AMDGPU_DISPLAY_H__
24#define __AMDGPU_DISPLAY_H__ 24#define __AMDGPU_DISPLAY_H__
25 25
26uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev); 26uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
27struct drm_framebuffer * 27struct drm_framebuffer *
28amdgpu_display_user_framebuffer_create(struct drm_device *dev, 28amdgpu_display_user_framebuffer_create(struct drm_device *dev,
29 struct drm_file *file_priv, 29 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index e997ebbe43ea..def1010ac05e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -115,6 +115,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
115 pr_cont("\n"); 115 pr_cont("\n");
116} 116}
117 117
118void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
119{
120 struct drm_device *ddev = adev->ddev;
121 struct drm_crtc *crtc;
122 struct amdgpu_crtc *amdgpu_crtc;
123
124 adev->pm.dpm.new_active_crtcs = 0;
125 adev->pm.dpm.new_active_crtc_count = 0;
126 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
127 list_for_each_entry(crtc,
128 &ddev->mode_config.crtc_list, head) {
129 amdgpu_crtc = to_amdgpu_crtc(crtc);
130 if (amdgpu_crtc->enabled) {
131 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
132 adev->pm.dpm.new_active_crtc_count++;
133 }
134 }
135 }
136}
137
118 138
119u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 139u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
120{ 140{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 643d008410c6..dd6203a0a6b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -52,8 +52,6 @@ enum amdgpu_dpm_event_src {
52 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 52 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
53}; 53};
54 54
55#define SCLK_DEEP_SLEEP_MASK 0x8
56
57struct amdgpu_ps { 55struct amdgpu_ps {
58 u32 caps; /* vbios flags */ 56 u32 caps; /* vbios flags */
59 u32 class; /* vbios flags */ 57 u32 class; /* vbios flags */
@@ -349,12 +347,6 @@ enum amdgpu_pcie_gen {
349 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ 347 ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
350 (adev)->powerplay.pp_handle, msg_id)) 348 (adev)->powerplay.pp_handle, msg_id))
351 349
352#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
353 virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
354 ((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
355 (adev)->powerplay.pp_handle, virtual_addr_low, \
356 virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
357
358#define amdgpu_dpm_get_power_profile_mode(adev, buf) \ 350#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
359 ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ 351 ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
360 (adev)->powerplay.pp_handle, buf)) 352 (adev)->powerplay.pp_handle, buf))
@@ -445,6 +437,8 @@ struct amdgpu_pm {
445 uint32_t pcie_gen_mask; 437 uint32_t pcie_gen_mask;
446 uint32_t pcie_mlw_mask; 438 uint32_t pcie_mlw_mask;
447 struct amd_pp_display_configuration pm_display_cfg;/* set by dc */ 439 struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
440 uint32_t smu_prv_buffer_size;
441 struct amdgpu_bo *smu_prv_buffer;
448}; 442};
449 443
450#define R600_SSTU_DFLT 0 444#define R600_SSTU_DFLT 0
@@ -482,6 +476,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
482 struct amdgpu_ps *rps); 476 struct amdgpu_ps *rps);
483u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); 477u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
484u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); 478u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
479void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
485bool amdgpu_is_uvd_state(u32 class, u32 class2); 480bool amdgpu_is_uvd_state(u32 class, u32 class2);
486void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 481void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
487 u32 *p, u32 *u); 482 u32 *p, u32 *u);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b19482b36b8..739e7e09c8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -75,9 +75,10 @@
75 * - 3.23.0 - Add query for VRAM lost counter 75 * - 3.23.0 - Add query for VRAM lost counter
76 * - 3.24.0 - Add high priority compute support for gfx9 76 * - 3.24.0 - Add high priority compute support for gfx9
77 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). 77 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
78 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
78 */ 79 */
79#define KMS_DRIVER_MAJOR 3 80#define KMS_DRIVER_MAJOR 3
80#define KMS_DRIVER_MINOR 25 81#define KMS_DRIVER_MINOR 26
81#define KMS_DRIVER_PATCHLEVEL 0 82#define KMS_DRIVER_PATCHLEVEL 0
82 83
83int amdgpu_vram_limit = 0; 84int amdgpu_vram_limit = 0;
@@ -121,7 +122,7 @@ uint amdgpu_pg_mask = 0xffffffff;
121uint amdgpu_sdma_phase_quantum = 32; 122uint amdgpu_sdma_phase_quantum = 32;
122char *amdgpu_disable_cu = NULL; 123char *amdgpu_disable_cu = NULL;
123char *amdgpu_virtual_display = NULL; 124char *amdgpu_virtual_display = NULL;
124uint amdgpu_pp_feature_mask = 0xffffbfff; 125uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
125int amdgpu_ngg = 0; 126int amdgpu_ngg = 0;
126int amdgpu_prim_buf_per_se = 0; 127int amdgpu_prim_buf_per_se = 0;
127int amdgpu_pos_buf_per_se = 0; 128int amdgpu_pos_buf_per_se = 0;
@@ -132,6 +133,7 @@ int amdgpu_lbpw = -1;
132int amdgpu_compute_multipipe = -1; 133int amdgpu_compute_multipipe = -1;
133int amdgpu_gpu_recovery = -1; /* auto */ 134int amdgpu_gpu_recovery = -1; /* auto */
134int amdgpu_emu_mode = 0; 135int amdgpu_emu_mode = 0;
136uint amdgpu_smu_memory_pool_size = 0;
135 137
136MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 138MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
137module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 139module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -316,6 +318,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
316module_param_named(cik_support, amdgpu_cik_support, int, 0444); 318module_param_named(cik_support, amdgpu_cik_support, int, 0444);
317#endif 319#endif
318 320
321MODULE_PARM_DESC(smu_memory_pool_size,
322 "reserve gtt for smu debug usage, 0 = disable,"
323 "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
324module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
325
319static const struct pci_device_id pciidlist[] = { 326static const struct pci_device_id pciidlist[] = {
320#ifdef CONFIG_DRM_AMDGPU_SI 327#ifdef CONFIG_DRM_AMDGPU_SI
321 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 328 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -534,6 +541,9 @@ static const struct pci_device_id pciidlist[] = {
534 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 541 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
535 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 542 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
536 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 543 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
544 /* VEGAM */
545 {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
546 {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
537 /* Vega 10 */ 547 /* Vega 10 */
538 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 548 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
539 {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 549 {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 12063019751b..bc5fd8ebab5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
137 /* need to align pitch with crtc limits */ 137 /* need to align pitch with crtc limits */
138 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, 138 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
139 fb_tiled); 139 fb_tiled);
140 domain = amdgpu_display_framebuffer_domains(adev); 140 domain = amdgpu_display_supported_domains(adev);
141 141
142 height = ALIGN(mode_cmd->height, 8); 142 height = ALIGN(mode_cmd->height, 8);
143 size = mode_cmd->pitches[0] * height; 143 size = mode_cmd->pitches[0] * height;
@@ -292,9 +292,9 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
292 292
293 drm_fb_helper_unregister_fbi(&rfbdev->helper); 293 drm_fb_helper_unregister_fbi(&rfbdev->helper);
294 294
295 if (rfb->obj) { 295 if (rfb->base.obj[0]) {
296 amdgpufb_destroy_pinned_object(rfb->obj); 296 amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
297 rfb->obj = NULL; 297 rfb->base.obj[0] = NULL;
298 drm_framebuffer_unregister_private(&rfb->base); 298 drm_framebuffer_unregister_private(&rfb->base);
299 drm_framebuffer_cleanup(&rfb->base); 299 drm_framebuffer_cleanup(&rfb->base);
300 } 300 }
@@ -377,7 +377,7 @@ int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
377 if (!adev->mode_info.rfbdev) 377 if (!adev->mode_info.rfbdev)
378 return 0; 378 return 0;
379 379
380 robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj); 380 robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
381 size += amdgpu_bo_size(robj); 381 size += amdgpu_bo_size(robj);
382 return size; 382 return size;
383} 383}
@@ -386,7 +386,7 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
386{ 386{
387 if (!adev->mode_info.rfbdev) 387 if (!adev->mode_info.rfbdev)
388 return false; 388 return false;
389 if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj)) 389 if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
390 return true; 390 return true;
391 return false; 391 return false;
392} 392}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 97449e06a242..d09fcab2398f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
131 * Emits a fence command on the requested ring (all asics). 131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure. 132 * Returns 0 on success, -ENOMEM on failure.
133 */ 133 */
134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) 134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
135 unsigned flags)
135{ 136{
136 struct amdgpu_device *adev = ring->adev; 137 struct amdgpu_device *adev = ring->adev;
137 struct amdgpu_fence *fence; 138 struct amdgpu_fence *fence;
@@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
149 adev->fence_context + ring->idx, 150 adev->fence_context + ring->idx,
150 seq); 151 seq);
151 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 152 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
152 seq, AMDGPU_FENCE_FLAG_INT); 153 seq, flags | AMDGPU_FENCE_FLAG_INT);
153 154
154 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 155 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
155 /* This function can't be called concurrently anyway, otherwise 156 /* This function can't be called concurrently anyway, otherwise
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cf0f186c6092..17d6b9fb6d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
113 int r; 113 int r;
114 114
115 if (adev->gart.robj == NULL) { 115 if (adev->gart.robj == NULL) {
116 r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE, 116 struct amdgpu_bo_param bp;
117 AMDGPU_GEM_DOMAIN_VRAM, 117
118 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 118 memset(&bp, 0, sizeof(bp));
119 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 119 bp.size = adev->gart.table_size;
120 ttm_bo_type_kernel, NULL, 120 bp.byte_align = PAGE_SIZE;
121 &adev->gart.robj); 121 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
122 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
123 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
124 bp.type = ttm_bo_type_kernel;
125 bp.resv = NULL;
126 r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
122 if (r) { 127 if (r) {
123 return r; 128 return r;
124 } 129 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 46b9ea4e6103..2c8e27370284 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -48,17 +48,25 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
48 struct drm_gem_object **obj) 48 struct drm_gem_object **obj)
49{ 49{
50 struct amdgpu_bo *bo; 50 struct amdgpu_bo *bo;
51 struct amdgpu_bo_param bp;
51 int r; 52 int r;
52 53
54 memset(&bp, 0, sizeof(bp));
53 *obj = NULL; 55 *obj = NULL;
54 /* At least align on page size */ 56 /* At least align on page size */
55 if (alignment < PAGE_SIZE) { 57 if (alignment < PAGE_SIZE) {
56 alignment = PAGE_SIZE; 58 alignment = PAGE_SIZE;
57 } 59 }
58 60
61 bp.size = size;
62 bp.byte_align = alignment;
63 bp.type = type;
64 bp.resv = resv;
65 bp.preferred_domain = initial_domain;
59retry: 66retry:
60 r = amdgpu_bo_create(adev, size, alignment, initial_domain, 67 bp.flags = flags;
61 flags, type, resv, &bo); 68 bp.domain = initial_domain;
69 r = amdgpu_bo_create(adev, &bp, &bo);
62 if (r) { 70 if (r) {
63 if (r != -ERESTARTSYS) { 71 if (r != -ERESTARTSYS) {
64 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 72 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
@@ -221,12 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
221 return -EINVAL; 229 return -EINVAL;
222 230
223 /* reject invalid gem domains */ 231 /* reject invalid gem domains */
224 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU | 232 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
225 AMDGPU_GEM_DOMAIN_GTT |
226 AMDGPU_GEM_DOMAIN_VRAM |
227 AMDGPU_GEM_DOMAIN_GDS |
228 AMDGPU_GEM_DOMAIN_GWS |
229 AMDGPU_GEM_DOMAIN_OA))
230 return -EINVAL; 233 return -EINVAL;
231 234
232 /* create a gem object to contain this object in */ 235 /* create a gem object to contain this object in */
@@ -771,16 +774,23 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
771} 774}
772 775
773#if defined(CONFIG_DEBUG_FS) 776#if defined(CONFIG_DEBUG_FS)
777
778#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
779 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
780 seq_printf((m), " " #flag); \
781 }
782
774static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) 783static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
775{ 784{
776 struct drm_gem_object *gobj = ptr; 785 struct drm_gem_object *gobj = ptr;
777 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 786 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
778 struct seq_file *m = data; 787 struct seq_file *m = data;
779 788
789 struct dma_buf_attachment *attachment;
790 struct dma_buf *dma_buf;
780 unsigned domain; 791 unsigned domain;
781 const char *placement; 792 const char *placement;
782 unsigned pin_count; 793 unsigned pin_count;
783 uint64_t offset;
784 794
785 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 795 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
786 switch (domain) { 796 switch (domain) {
@@ -798,13 +808,27 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
798 seq_printf(m, "\t0x%08x: %12ld byte %s", 808 seq_printf(m, "\t0x%08x: %12ld byte %s",
799 id, amdgpu_bo_size(bo), placement); 809 id, amdgpu_bo_size(bo), placement);
800 810
801 offset = READ_ONCE(bo->tbo.mem.start);
802 if (offset != AMDGPU_BO_INVALID_OFFSET)
803 seq_printf(m, " @ 0x%010Lx", offset);
804
805 pin_count = READ_ONCE(bo->pin_count); 811 pin_count = READ_ONCE(bo->pin_count);
806 if (pin_count) 812 if (pin_count)
807 seq_printf(m, " pin count %d", pin_count); 813 seq_printf(m, " pin count %d", pin_count);
814
815 dma_buf = READ_ONCE(bo->gem_base.dma_buf);
816 attachment = READ_ONCE(bo->gem_base.import_attach);
817
818 if (attachment)
819 seq_printf(m, " imported from %p", dma_buf);
820 else if (dma_buf)
821 seq_printf(m, " exported as %p", dma_buf);
822
823 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
824 amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
825 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
826 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
827 amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
828 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
829 amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
830 amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
831
808 seq_printf(m, "\n"); 832 seq_printf(m, "\n");
809 833
810 return 0; 834 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 311589e02d17..f70eeed9ed76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
127 struct amdgpu_vm *vm; 127 struct amdgpu_vm *vm;
128 uint64_t fence_ctx; 128 uint64_t fence_ctx;
129 uint32_t status = 0, alloc_size; 129 uint32_t status = 0, alloc_size;
130 unsigned fence_flags = 0;
130 131
131 unsigned i; 132 unsigned i;
132 int r = 0; 133 int r = 0;
@@ -227,7 +228,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
227#endif 228#endif
228 amdgpu_asic_invalidate_hdp(adev, ring); 229 amdgpu_asic_invalidate_hdp(adev, ring);
229 230
230 r = amdgpu_fence_emit(ring, f); 231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
233
234 r = amdgpu_fence_emit(ring, f, fence_flags);
231 if (r) { 235 if (r) {
232 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 236 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
233 if (job && job->vmid) 237 if (job && job->vmid)
@@ -242,7 +246,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
242 /* wrap the last IB with fence */ 246 /* wrap the last IB with fence */
243 if (job && job->uf_addr) { 247 if (job && job->uf_addr) {
244 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 248 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
245 AMDGPU_FENCE_FLAG_64BIT); 249 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
246 } 250 }
247 251
248 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4b7824d30e73..eb4785e51573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -31,6 +31,7 @@
31#include "amdgpu_sched.h" 31#include "amdgpu_sched.h"
32#include "amdgpu_uvd.h" 32#include "amdgpu_uvd.h"
33#include "amdgpu_vce.h" 33#include "amdgpu_vce.h"
34#include "atom.h"
34 35
35#include <linux/vga_switcheroo.h> 36#include <linux/vga_switcheroo.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -214,6 +215,18 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
214 fw_info->ver = adev->gfx.rlc_fw_version; 215 fw_info->ver = adev->gfx.rlc_fw_version;
215 fw_info->feature = adev->gfx.rlc_feature_version; 216 fw_info->feature = adev->gfx.rlc_feature_version;
216 break; 217 break;
218 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
219 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
220 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
221 break;
222 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
223 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
224 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
225 break;
226 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
227 fw_info->ver = adev->gfx.rlc_srls_fw_version;
228 fw_info->feature = adev->gfx.rlc_srls_feature_version;
229 break;
217 case AMDGPU_INFO_FW_GFX_MEC: 230 case AMDGPU_INFO_FW_GFX_MEC:
218 if (query_fw->index == 0) { 231 if (query_fw->index == 0) {
219 fw_info->ver = adev->gfx.mec_fw_version; 232 fw_info->ver = adev->gfx.mec_fw_version;
@@ -279,6 +292,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
279 if (!info->return_size || !info->return_pointer) 292 if (!info->return_size || !info->return_pointer)
280 return -EINVAL; 293 return -EINVAL;
281 294
295 /* Ensure IB tests are run on ring */
296 flush_delayed_work(&adev->late_init_work);
297
282 switch (info->query) { 298 switch (info->query) {
283 case AMDGPU_INFO_ACCEL_WORKING: 299 case AMDGPU_INFO_ACCEL_WORKING:
284 ui32 = adev->accel_working; 300 ui32 = adev->accel_working;
@@ -701,10 +717,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
701 } 717 }
702 } 718 }
703 case AMDGPU_INFO_SENSOR: { 719 case AMDGPU_INFO_SENSOR: {
704 struct pp_gpu_power query = {0}; 720 if (!adev->pm.dpm_enabled)
705 int query_size = sizeof(query);
706
707 if (amdgpu_dpm == 0)
708 return -ENOENT; 721 return -ENOENT;
709 722
710 switch (info->sensor_info.type) { 723 switch (info->sensor_info.type) {
@@ -746,10 +759,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
746 /* get average GPU power */ 759 /* get average GPU power */
747 if (amdgpu_dpm_read_sensor(adev, 760 if (amdgpu_dpm_read_sensor(adev,
748 AMDGPU_PP_SENSOR_GPU_POWER, 761 AMDGPU_PP_SENSOR_GPU_POWER,
749 (void *)&query, &query_size)) { 762 (void *)&ui32, &ui32_size)) {
750 return -EINVAL; 763 return -EINVAL;
751 } 764 }
752 ui32 = query.average_gpu_power >> 8; 765 ui32 >>= 8;
753 break; 766 break;
754 case AMDGPU_INFO_SENSOR_VDDNB: 767 case AMDGPU_INFO_SENSOR_VDDNB:
755 /* get VDDNB in millivolts */ 768 /* get VDDNB in millivolts */
@@ -913,8 +926,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
913 return; 926 return;
914 927
915 pm_runtime_get_sync(dev->dev); 928 pm_runtime_get_sync(dev->dev);
916 929 amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
917 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
918 930
919 if (adev->asic_type != CHIP_RAVEN) { 931 if (adev->asic_type != CHIP_RAVEN) {
920 amdgpu_uvd_free_handles(adev, file_priv); 932 amdgpu_uvd_free_handles(adev, file_priv);
@@ -935,6 +947,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
935 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 947 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
936 948
937 amdgpu_vm_fini(adev, &fpriv->vm); 949 amdgpu_vm_fini(adev, &fpriv->vm);
950 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
951
938 if (pasid) 952 if (pasid)
939 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); 953 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
940 amdgpu_bo_unref(&pd); 954 amdgpu_bo_unref(&pd);
@@ -1088,6 +1102,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1088 struct amdgpu_device *adev = dev->dev_private; 1102 struct amdgpu_device *adev = dev->dev_private;
1089 struct drm_amdgpu_info_firmware fw_info; 1103 struct drm_amdgpu_info_firmware fw_info;
1090 struct drm_amdgpu_query_fw query_fw; 1104 struct drm_amdgpu_query_fw query_fw;
1105 struct atom_context *ctx = adev->mode_info.atom_context;
1091 int ret, i; 1106 int ret, i;
1092 1107
1093 /* VCE */ 1108 /* VCE */
@@ -1146,6 +1161,30 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1146 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1161 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1147 fw_info.feature, fw_info.ver); 1162 fw_info.feature, fw_info.ver);
1148 1163
1164 /* RLC SAVE RESTORE LIST CNTL */
1165 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1166 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1167 if (ret)
1168 return ret;
1169 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1170 fw_info.feature, fw_info.ver);
1171
1172 /* RLC SAVE RESTORE LIST GPM MEM */
1173 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1174 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1175 if (ret)
1176 return ret;
1177 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1178 fw_info.feature, fw_info.ver);
1179
1180 /* RLC SAVE RESTORE LIST SRM MEM */
1181 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1182 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1183 if (ret)
1184 return ret;
1185 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1186 fw_info.feature, fw_info.ver);
1187
1149 /* MEC */ 1188 /* MEC */
1150 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1189 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1151 query_fw.index = 0; 1190 query_fw.index = 0;
@@ -1210,6 +1249,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1210 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1249 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1211 fw_info.feature, fw_info.ver); 1250 fw_info.feature, fw_info.ver);
1212 1251
1252
1253 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1254
1213 return 0; 1255 return 0;
1214} 1256}
1215 1257
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d6416ee52e32..b9e9e8b02fb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -308,7 +308,6 @@ struct amdgpu_display_funcs {
308 308
309struct amdgpu_framebuffer { 309struct amdgpu_framebuffer {
310 struct drm_framebuffer base; 310 struct drm_framebuffer base;
311 struct drm_gem_object *obj;
312 311
313 /* caching for later use */ 312 /* caching for later use */
314 uint64_t address; 313 uint64_t address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6d08cde8443c..6a9e46ae7f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
191 u32 domain, struct amdgpu_bo **bo_ptr, 191 u32 domain, struct amdgpu_bo **bo_ptr,
192 u64 *gpu_addr, void **cpu_addr) 192 u64 *gpu_addr, void **cpu_addr)
193{ 193{
194 struct amdgpu_bo_param bp;
194 bool free = false; 195 bool free = false;
195 int r; 196 int r;
196 197
198 memset(&bp, 0, sizeof(bp));
199 bp.size = size;
200 bp.byte_align = align;
201 bp.domain = domain;
202 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
203 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
204 bp.type = ttm_bo_type_kernel;
205 bp.resv = NULL;
206
197 if (!*bo_ptr) { 207 if (!*bo_ptr) {
198 r = amdgpu_bo_create(adev, size, align, domain, 208 r = amdgpu_bo_create(adev, &bp, bo_ptr);
199 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
200 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
201 ttm_bo_type_kernel, NULL, bo_ptr);
202 if (r) { 209 if (r) {
203 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 210 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
204 r); 211 r);
@@ -341,27 +348,25 @@ fail:
341 return false; 348 return false;
342} 349}
343 350
344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, 351static int amdgpu_bo_do_create(struct amdgpu_device *adev,
345 int byte_align, u32 domain, 352 struct amdgpu_bo_param *bp,
346 u64 flags, enum ttm_bo_type type,
347 struct reservation_object *resv,
348 struct amdgpu_bo **bo_ptr) 353 struct amdgpu_bo **bo_ptr)
349{ 354{
350 struct ttm_operation_ctx ctx = { 355 struct ttm_operation_ctx ctx = {
351 .interruptible = (type != ttm_bo_type_kernel), 356 .interruptible = (bp->type != ttm_bo_type_kernel),
352 .no_wait_gpu = false, 357 .no_wait_gpu = false,
353 .resv = resv, 358 .resv = bp->resv,
354 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT 359 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
355 }; 360 };
356 struct amdgpu_bo *bo; 361 struct amdgpu_bo *bo;
357 unsigned long page_align; 362 unsigned long page_align, size = bp->size;
358 size_t acc_size; 363 size_t acc_size;
359 int r; 364 int r;
360 365
361 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 366 page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
362 size = ALIGN(size, PAGE_SIZE); 367 size = ALIGN(size, PAGE_SIZE);
363 368
364 if (!amdgpu_bo_validate_size(adev, size, domain)) 369 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
365 return -ENOMEM; 370 return -ENOMEM;
366 371
367 *bo_ptr = NULL; 372 *bo_ptr = NULL;
@@ -375,18 +380,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
375 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); 380 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
376 INIT_LIST_HEAD(&bo->shadow_list); 381 INIT_LIST_HEAD(&bo->shadow_list);
377 INIT_LIST_HEAD(&bo->va); 382 INIT_LIST_HEAD(&bo->va);
378 bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 383 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
379 AMDGPU_GEM_DOMAIN_GTT | 384 bp->domain;
380 AMDGPU_GEM_DOMAIN_CPU |
381 AMDGPU_GEM_DOMAIN_GDS |
382 AMDGPU_GEM_DOMAIN_GWS |
383 AMDGPU_GEM_DOMAIN_OA);
384 bo->allowed_domains = bo->preferred_domains; 385 bo->allowed_domains = bo->preferred_domains;
385 if (type != ttm_bo_type_kernel && 386 if (bp->type != ttm_bo_type_kernel &&
386 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 387 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
387 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 388 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
388 389
389 bo->flags = flags; 390 bo->flags = bp->flags;
390 391
391#ifdef CONFIG_X86_32 392#ifdef CONFIG_X86_32
392 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 393 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
@@ -417,11 +418,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
417#endif 418#endif
418 419
419 bo->tbo.bdev = &adev->mman.bdev; 420 bo->tbo.bdev = &adev->mman.bdev;
420 amdgpu_ttm_placement_from_domain(bo, domain); 421 amdgpu_ttm_placement_from_domain(bo, bp->domain);
422 if (bp->type == ttm_bo_type_kernel)
423 bo->tbo.priority = 1;
421 424
422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 425 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
423 &bo->placement, page_align, &ctx, acc_size, 426 &bo->placement, page_align, &ctx, acc_size,
424 NULL, resv, &amdgpu_ttm_bo_destroy); 427 NULL, bp->resv, &amdgpu_ttm_bo_destroy);
425 if (unlikely(r != 0)) 428 if (unlikely(r != 0))
426 return r; 429 return r;
427 430
@@ -433,10 +436,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
433 else 436 else
434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 437 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
435 438
436 if (type == ttm_bo_type_kernel) 439 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
437 bo->tbo.priority = 1;
438
439 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 440 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
441 struct dma_fence *fence; 441 struct dma_fence *fence;
442 442
@@ -449,20 +449,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
449 bo->tbo.moving = dma_fence_get(fence); 449 bo->tbo.moving = dma_fence_get(fence);
450 dma_fence_put(fence); 450 dma_fence_put(fence);
451 } 451 }
452 if (!resv) 452 if (!bp->resv)
453 amdgpu_bo_unreserve(bo); 453 amdgpu_bo_unreserve(bo);
454 *bo_ptr = bo; 454 *bo_ptr = bo;
455 455
456 trace_amdgpu_bo_create(bo); 456 trace_amdgpu_bo_create(bo);
457 457
458 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ 458 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
459 if (type == ttm_bo_type_device) 459 if (bp->type == ttm_bo_type_device)
460 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 460 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
461 461
462 return 0; 462 return 0;
463 463
464fail_unreserve: 464fail_unreserve:
465 if (!resv) 465 if (!bp->resv)
466 ww_mutex_unlock(&bo->tbo.resv->lock); 466 ww_mutex_unlock(&bo->tbo.resv->lock);
467 amdgpu_bo_unref(&bo); 467 amdgpu_bo_unref(&bo);
468 return r; 468 return r;
@@ -472,16 +472,22 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
472 unsigned long size, int byte_align, 472 unsigned long size, int byte_align,
473 struct amdgpu_bo *bo) 473 struct amdgpu_bo *bo)
474{ 474{
475 struct amdgpu_bo_param bp;
475 int r; 476 int r;
476 477
477 if (bo->shadow) 478 if (bo->shadow)
478 return 0; 479 return 0;
479 480
480 r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT, 481 memset(&bp, 0, sizeof(bp));
481 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 482 bp.size = size;
482 AMDGPU_GEM_CREATE_SHADOW, 483 bp.byte_align = byte_align;
483 ttm_bo_type_kernel, 484 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
484 bo->tbo.resv, &bo->shadow); 485 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
486 AMDGPU_GEM_CREATE_SHADOW;
487 bp.type = ttm_bo_type_kernel;
488 bp.resv = bo->tbo.resv;
489
490 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
485 if (!r) { 491 if (!r) {
486 bo->shadow->parent = amdgpu_bo_ref(bo); 492 bo->shadow->parent = amdgpu_bo_ref(bo);
487 mutex_lock(&adev->shadow_list_lock); 493 mutex_lock(&adev->shadow_list_lock);
@@ -492,28 +498,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
492 return r; 498 return r;
493} 499}
494 500
495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, 501int amdgpu_bo_create(struct amdgpu_device *adev,
496 int byte_align, u32 domain, 502 struct amdgpu_bo_param *bp,
497 u64 flags, enum ttm_bo_type type,
498 struct reservation_object *resv,
499 struct amdgpu_bo **bo_ptr) 503 struct amdgpu_bo **bo_ptr)
500{ 504{
501 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 505 u64 flags = bp->flags;
502 int r; 506 int r;
503 507
504 r = amdgpu_bo_do_create(adev, size, byte_align, domain, 508 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
505 parent_flags, type, resv, bo_ptr); 509 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
506 if (r) 510 if (r)
507 return r; 511 return r;
508 512
509 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 513 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
510 if (!resv) 514 if (!bp->resv)
511 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 515 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
512 NULL)); 516 NULL));
513 517
514 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); 518 r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
515 519
516 if (!resv) 520 if (!bp->resv)
517 reservation_object_unlock((*bo_ptr)->tbo.resv); 521 reservation_object_unlock((*bo_ptr)->tbo.resv);
518 522
519 if (r) 523 if (r)
@@ -689,8 +693,21 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
689 return -EINVAL; 693 return -EINVAL;
690 694
691 /* A shared bo cannot be migrated to VRAM */ 695 /* A shared bo cannot be migrated to VRAM */
692 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) 696 if (bo->prime_shared_count) {
693 return -EINVAL; 697 if (domain & AMDGPU_GEM_DOMAIN_GTT)
698 domain = AMDGPU_GEM_DOMAIN_GTT;
699 else
700 return -EINVAL;
701 }
702
703 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
704 * See function amdgpu_display_supported_domains()
705 */
706 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
707 domain = AMDGPU_GEM_DOMAIN_VRAM;
708 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
709 domain = AMDGPU_GEM_DOMAIN_GTT;
710 }
694 711
695 if (bo->pin_count) { 712 if (bo->pin_count) {
696 uint32_t mem_type = bo->tbo.mem.mem_type; 713 uint32_t mem_type = bo->tbo.mem.mem_type;
@@ -838,6 +855,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
838 return amdgpu_ttm_init(adev); 855 return amdgpu_ttm_init(adev);
839} 856}
840 857
858int amdgpu_bo_late_init(struct amdgpu_device *adev)
859{
860 amdgpu_ttm_late_init(adev);
861
862 return 0;
863}
864
841void amdgpu_bo_fini(struct amdgpu_device *adev) 865void amdgpu_bo_fini(struct amdgpu_device *adev)
842{ 866{
843 amdgpu_ttm_fini(adev); 867 amdgpu_ttm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 546f77cb7882..540e03fa159f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,16 @@
33 33
34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX 34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35 35
36struct amdgpu_bo_param {
37 unsigned long size;
38 int byte_align;
39 u32 domain;
40 u32 preferred_domain;
41 u64 flags;
42 enum ttm_bo_type type;
43 struct reservation_object *resv;
44};
45
36/* bo virtual addresses in a vm */ 46/* bo virtual addresses in a vm */
37struct amdgpu_bo_va_mapping { 47struct amdgpu_bo_va_mapping {
38 struct amdgpu_bo_va *bo_va; 48 struct amdgpu_bo_va *bo_va;
@@ -196,6 +206,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
196} 206}
197 207
198/** 208/**
209 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
210 */
211static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
212{
213 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
214 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
215 struct drm_mm_node *node = bo->tbo.mem.mm_node;
216 unsigned long pages_left;
217
218 if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
219 return false;
220
221 for (pages_left = bo->tbo.mem.num_pages; pages_left;
222 pages_left -= node->size, node++)
223 if (node->start < fpfn)
224 return true;
225
226 return false;
227}
228
229/**
199 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 230 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
200 */ 231 */
201static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 232static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
@@ -203,10 +234,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
203 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 234 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
204} 235}
205 236
206int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, 237int amdgpu_bo_create(struct amdgpu_device *adev,
207 int byte_align, u32 domain, 238 struct amdgpu_bo_param *bp,
208 u64 flags, enum ttm_bo_type type,
209 struct reservation_object *resv,
210 struct amdgpu_bo **bo_ptr); 239 struct amdgpu_bo **bo_ptr);
211int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 240int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
212 unsigned long size, int align, 241 unsigned long size, int align,
@@ -230,6 +259,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
230int amdgpu_bo_unpin(struct amdgpu_bo *bo); 259int amdgpu_bo_unpin(struct amdgpu_bo *bo);
231int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 260int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
232int amdgpu_bo_init(struct amdgpu_device *adev); 261int amdgpu_bo_init(struct amdgpu_device *adev);
262int amdgpu_bo_late_init(struct amdgpu_device *adev);
233void amdgpu_bo_fini(struct amdgpu_device *adev); 263void amdgpu_bo_fini(struct amdgpu_device *adev);
234int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 264int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
235 struct vm_area_struct *vma); 265 struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 361975cf45a9..b455da487782 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
77 } 77 }
78} 78}
79 79
80/**
81 * DOC: power_dpm_state
82 *
83 * This is a legacy interface and is only provided for backwards compatibility.
84 * The amdgpu driver provides a sysfs API for adjusting certain power
85 * related parameters. The file power_dpm_state is used for this.
86 * It accepts the following arguments:
87 * - battery
88 * - balanced
89 * - performance
90 *
91 * battery
92 *
93 * On older GPUs, the vbios provided a special power state for battery
94 * operation. Selecting battery switched to this state. This is no
95 * longer provided on newer GPUs so the option does nothing in that case.
96 *
97 * balanced
98 *
99 * On older GPUs, the vbios provided a special power state for balanced
100 * operation. Selecting balanced switched to this state. This is no
101 * longer provided on newer GPUs so the option does nothing in that case.
102 *
103 * performance
104 *
105 * On older GPUs, the vbios provided a special power state for performance
106 * operation. Selecting performance switched to this state. This is no
107 * longer provided on newer GPUs so the option does nothing in that case.
108 *
109 */
110
80static ssize_t amdgpu_get_dpm_state(struct device *dev, 111static ssize_t amdgpu_get_dpm_state(struct device *dev,
81 struct device_attribute *attr, 112 struct device_attribute *attr,
82 char *buf) 113 char *buf)
@@ -131,6 +162,59 @@ fail:
131 return count; 162 return count;
132} 163}
133 164
165
166/**
167 * DOC: power_dpm_force_performance_level
168 *
169 * The amdgpu driver provides a sysfs API for adjusting certain power
170 * related parameters. The file power_dpm_force_performance_level is
171 * used for this. It accepts the following arguments:
172 * - auto
173 * - low
174 * - high
175 * - manual
176 * - GPU fan
177 * - profile_standard
178 * - profile_min_sclk
179 * - profile_min_mclk
180 * - profile_peak
181 *
182 * auto
183 *
184 * When auto is selected, the driver will attempt to dynamically select
185 * the optimal power profile for current conditions in the driver.
186 *
187 * low
188 *
189 * When low is selected, the clocks are forced to the lowest power state.
190 *
191 * high
192 *
193 * When high is selected, the clocks are forced to the highest power state.
194 *
195 * manual
196 *
197 * When manual is selected, the user can manually adjust which power states
198 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
199 * and pp_dpm_pcie files and adjust the power state transition heuristics
200 * via the pp_power_profile_mode sysfs file.
201 *
202 * profile_standard
203 * profile_min_sclk
204 * profile_min_mclk
205 * profile_peak
206 *
207 * When the profiling modes are selected, clock and power gating are
208 * disabled and the clocks are set for different profiling cases. This
209 * mode is recommended for profiling specific work loads where you do
210 * not want clock or power gating for clock fluctuation to interfere
211 * with your results. profile_standard sets the clocks to a fixed clock
212 * level which varies from asic to asic. profile_min_sclk forces the sclk
213 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
214 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
215 *
216 */
217
134static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 218static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
135 struct device_attribute *attr, 219 struct device_attribute *attr,
136 char *buf) 220 char *buf)
@@ -324,6 +408,17 @@ fail:
324 return count; 408 return count;
325} 409}
326 410
411/**
412 * DOC: pp_table
413 *
414 * The amdgpu driver provides a sysfs API for uploading new powerplay
415 * tables. The file pp_table is used for this. Reading the file
416 * will dump the current power play table. Writing to the file
417 * will attempt to upload a new powerplay table and re-initialize
418 * powerplay using that new table.
419 *
420 */
421
327static ssize_t amdgpu_get_pp_table(struct device *dev, 422static ssize_t amdgpu_get_pp_table(struct device *dev,
328 struct device_attribute *attr, 423 struct device_attribute *attr,
329 char *buf) 424 char *buf)
@@ -360,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
360 return count; 455 return count;
361} 456}
362 457
458/**
459 * DOC: pp_od_clk_voltage
460 *
461 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
462 * in each power level within a power state. The pp_od_clk_voltage is used for
463 * this.
464 *
465 * Reading the file will display:
466 * - a list of engine clock levels and voltages labeled OD_SCLK
467 * - a list of memory clock levels and voltages labeled OD_MCLK
468 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
469 *
470 * To manually adjust these settings, first select manual using
471 * power_dpm_force_performance_level. Enter a new value for each
472 * level by writing a string that contains "s/m level clock voltage" to
473 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
474 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
475 * 810 mV. When you have edited all of the states as needed, write
476 * "c" (commit) to the file to commit your changes. If you want to reset to the
477 * default power levels, write "r" (reset) to the file to reset them.
478 *
479 */
480
363static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 481static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
364 struct device_attribute *attr, 482 struct device_attribute *attr,
365 const char *buf, 483 const char *buf,
@@ -437,6 +555,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
437 if (adev->powerplay.pp_funcs->print_clock_levels) { 555 if (adev->powerplay.pp_funcs->print_clock_levels) {
438 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 556 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
439 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 557 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
558 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
440 return size; 559 return size;
441 } else { 560 } else {
442 return snprintf(buf, PAGE_SIZE, "\n"); 561 return snprintf(buf, PAGE_SIZE, "\n");
@@ -444,6 +563,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
444 563
445} 564}
446 565
566/**
567 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
568 *
569 * The amdgpu driver provides a sysfs API for adjusting what power levels
570 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
571 * and pp_dpm_pcie are used for this.
572 *
573 * Reading back the files will show you the available power levels within
574 * the power state and the clock information for those levels.
575 *
576 * To manually adjust these states, first select manual using
577 * power_dpm_force_performance_level.
578 * Secondly,Enter a new value for each level by inputing a string that
579 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
580 * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
581 */
582
447static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 583static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
448 struct device_attribute *attr, 584 struct device_attribute *attr,
449 char *buf) 585 char *buf)
@@ -466,23 +602,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
466 struct amdgpu_device *adev = ddev->dev_private; 602 struct amdgpu_device *adev = ddev->dev_private;
467 int ret; 603 int ret;
468 long level; 604 long level;
469 uint32_t i, mask = 0; 605 uint32_t mask = 0;
470 char sub_str[2]; 606 char *sub_str = NULL;
607 char *tmp;
608 char buf_cpy[count];
609 const char delimiter[3] = {' ', '\n', '\0'};
471 610
472 for (i = 0; i < strlen(buf); i++) { 611 memcpy(buf_cpy, buf, count+1);
473 if (*(buf + i) == '\n') 612 tmp = buf_cpy;
474 continue; 613 while (tmp[0]) {
475 sub_str[0] = *(buf + i); 614 sub_str = strsep(&tmp, delimiter);
476 sub_str[1] = '\0'; 615 if (strlen(sub_str)) {
477 ret = kstrtol(sub_str, 0, &level); 616 ret = kstrtol(sub_str, 0, &level);
478 617
479 if (ret) { 618 if (ret) {
480 count = -EINVAL; 619 count = -EINVAL;
481 goto fail; 620 goto fail;
482 } 621 }
483 mask |= 1 << level; 622 mask |= 1 << level;
623 } else
624 break;
484 } 625 }
485
486 if (adev->powerplay.pp_funcs->force_clock_level) 626 if (adev->powerplay.pp_funcs->force_clock_level)
487 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 627 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
488 628
@@ -512,21 +652,26 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
512 struct amdgpu_device *adev = ddev->dev_private; 652 struct amdgpu_device *adev = ddev->dev_private;
513 int ret; 653 int ret;
514 long level; 654 long level;
515 uint32_t i, mask = 0; 655 uint32_t mask = 0;
516 char sub_str[2]; 656 char *sub_str = NULL;
657 char *tmp;
658 char buf_cpy[count];
659 const char delimiter[3] = {' ', '\n', '\0'};
517 660
518 for (i = 0; i < strlen(buf); i++) { 661 memcpy(buf_cpy, buf, count+1);
519 if (*(buf + i) == '\n') 662 tmp = buf_cpy;
520 continue; 663 while (tmp[0]) {
521 sub_str[0] = *(buf + i); 664 sub_str = strsep(&tmp, delimiter);
522 sub_str[1] = '\0'; 665 if (strlen(sub_str)) {
523 ret = kstrtol(sub_str, 0, &level); 666 ret = kstrtol(sub_str, 0, &level);
524 667
525 if (ret) { 668 if (ret) {
526 count = -EINVAL; 669 count = -EINVAL;
527 goto fail; 670 goto fail;
528 } 671 }
529 mask |= 1 << level; 672 mask |= 1 << level;
673 } else
674 break;
530 } 675 }
531 if (adev->powerplay.pp_funcs->force_clock_level) 676 if (adev->powerplay.pp_funcs->force_clock_level)
532 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 677 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -557,21 +702,27 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
557 struct amdgpu_device *adev = ddev->dev_private; 702 struct amdgpu_device *adev = ddev->dev_private;
558 int ret; 703 int ret;
559 long level; 704 long level;
560 uint32_t i, mask = 0; 705 uint32_t mask = 0;
561 char sub_str[2]; 706 char *sub_str = NULL;
707 char *tmp;
708 char buf_cpy[count];
709 const char delimiter[3] = {' ', '\n', '\0'};
562 710
563 for (i = 0; i < strlen(buf); i++) { 711 memcpy(buf_cpy, buf, count+1);
564 if (*(buf + i) == '\n') 712 tmp = buf_cpy;
565 continue;
566 sub_str[0] = *(buf + i);
567 sub_str[1] = '\0';
568 ret = kstrtol(sub_str, 0, &level);
569 713
570 if (ret) { 714 while (tmp[0]) {
571 count = -EINVAL; 715 sub_str = strsep(&tmp, delimiter);
572 goto fail; 716 if (strlen(sub_str)) {
573 } 717 ret = kstrtol(sub_str, 0, &level);
574 mask |= 1 << level; 718
719 if (ret) {
720 count = -EINVAL;
721 goto fail;
722 }
723 mask |= 1 << level;
724 } else
725 break;
575 } 726 }
576 if (adev->powerplay.pp_funcs->force_clock_level) 727 if (adev->powerplay.pp_funcs->force_clock_level)
577 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 728 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
@@ -668,6 +819,26 @@ fail:
668 return count; 819 return count;
669} 820}
670 821
822/**
823 * DOC: pp_power_profile_mode
824 *
825 * The amdgpu driver provides a sysfs API for adjusting the heuristics
826 * related to switching between power levels in a power state. The file
827 * pp_power_profile_mode is used for this.
828 *
829 * Reading this file outputs a list of all of the predefined power profiles
830 * and the relevant heuristics settings for that profile.
831 *
832 * To select a profile or create a custom profile, first select manual using
833 * power_dpm_force_performance_level. Writing the number of a predefined
834 * profile to pp_power_profile_mode will enable those heuristics. To
835 * create a custom set of heuristics, write a string of numbers to the file
836 * starting with the number of the custom profile along with a setting
837 * for each heuristic parameter. Due to differences across asic families
838 * the heuristic parameters vary from family to family.
839 *
840 */
841
671static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 842static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
672 struct device_attribute *attr, 843 struct device_attribute *attr,
673 char *buf) 844 char *buf)
@@ -1020,8 +1191,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1020{ 1191{
1021 struct amdgpu_device *adev = dev_get_drvdata(dev); 1192 struct amdgpu_device *adev = dev_get_drvdata(dev);
1022 struct drm_device *ddev = adev->ddev; 1193 struct drm_device *ddev = adev->ddev;
1023 struct pp_gpu_power query = {0}; 1194 u32 query = 0;
1024 int r, size = sizeof(query); 1195 int r, size = sizeof(u32);
1025 unsigned uw; 1196 unsigned uw;
1026 1197
1027 /* Can't get power when the card is off */ 1198 /* Can't get power when the card is off */
@@ -1041,7 +1212,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1041 return r; 1212 return r;
1042 1213
1043 /* convert to microwatts */ 1214 /* convert to microwatts */
1044 uw = (query.average_gpu_power >> 8) * 1000000; 1215 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1045 1216
1046 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 1217 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1047} 1218}
@@ -1109,6 +1280,46 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1109 return count; 1280 return count;
1110} 1281}
1111 1282
1283
1284/**
1285 * DOC: hwmon
1286 *
1287 * The amdgpu driver exposes the following sensor interfaces:
1288 * - GPU temperature (via the on-die sensor)
1289 * - GPU voltage
1290 * - Northbridge voltage (APUs only)
1291 * - GPU power
1292 * - GPU fan
1293 *
1294 * hwmon interfaces for GPU temperature:
1295 * - temp1_input: the on die GPU temperature in millidegrees Celsius
1296 * - temp1_crit: temperature critical max value in millidegrees Celsius
1297 * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
1298 *
1299 * hwmon interfaces for GPU voltage:
1300 * - in0_input: the voltage on the GPU in millivolts
1301 * - in1_input: the voltage on the Northbridge in millivolts
1302 *
1303 * hwmon interfaces for GPU power:
1304 * - power1_average: average power used by the GPU in microWatts
1305 * - power1_cap_min: minimum cap supported in microWatts
1306 * - power1_cap_max: maximum cap supported in microWatts
1307 * - power1_cap: selected power cap in microWatts
1308 *
1309 * hwmon interfaces for GPU fan:
1310 * - pwm1: pulse width modulation fan level (0-255)
1311 * - pwm1_enable: pulse width modulation fan control method
1312 * 0: no fan speed control
1313 * 1: manual fan speed control using pwm interface
1314 * 2: automatic fan speed control
1315 * - pwm1_min: pulse width modulation fan control minimum level (0)
1316 * - pwm1_max: pulse width modulation fan control maximum level (255)
1317 * - fan1_input: fan speed in RPM
1318 *
1319 * You can use hwmon tools like sensors to view this information on your system.
1320 *
1321 */
1322
1112static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 1323static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
1113static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 1324static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
1114static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 1325static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -1153,19 +1364,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1153 struct amdgpu_device *adev = dev_get_drvdata(dev); 1364 struct amdgpu_device *adev = dev_get_drvdata(dev);
1154 umode_t effective_mode = attr->mode; 1365 umode_t effective_mode = attr->mode;
1155 1366
1156 /* handle non-powerplay limitations */ 1367
1157 if (!adev->powerplay.pp_handle) { 1368 /* Skip fan attributes if fan is not present */
1158 /* Skip fan attributes if fan is not present */ 1369 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1159 if (adev->pm.no_fan && 1370 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1371 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1161 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 1372 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
1162 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1373 attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
1163 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1374 return 0;
1164 return 0;
1165 /* requires powerplay */
1166 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
1167 return 0;
1168 }
1169 1375
1170 /* Skip limit attributes if DPM is not enabled */ 1376 /* Skip limit attributes if DPM is not enabled */
1171 if (!adev->pm.dpm_enabled && 1377 if (!adev->pm.dpm_enabled &&
@@ -1658,9 +1864,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1658 1864
1659void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1865void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1660{ 1866{
1661 struct drm_device *ddev = adev->ddev;
1662 struct drm_crtc *crtc;
1663 struct amdgpu_crtc *amdgpu_crtc;
1664 int i = 0; 1867 int i = 0;
1665 1868
1666 if (!adev->pm.dpm_enabled) 1869 if (!adev->pm.dpm_enabled)
@@ -1676,21 +1879,25 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1676 } 1879 }
1677 1880
1678 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1881 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1882 if (!amdgpu_device_has_dc_support(adev)) {
1883 mutex_lock(&adev->pm.mutex);
1884 amdgpu_dpm_get_active_displays(adev);
1885 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
1886 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1887 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1888 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
1889 if (adev->pm.pm_display_cfg.vrefresh > 120)
1890 adev->pm.pm_display_cfg.min_vblank_time = 0;
1891 if (adev->powerplay.pp_funcs->display_configuration_change)
1892 adev->powerplay.pp_funcs->display_configuration_change(
1893 adev->powerplay.pp_handle,
1894 &adev->pm.pm_display_cfg);
1895 mutex_unlock(&adev->pm.mutex);
1896 }
1679 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 1897 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1680 } else { 1898 } else {
1681 mutex_lock(&adev->pm.mutex); 1899 mutex_lock(&adev->pm.mutex);
1682 adev->pm.dpm.new_active_crtcs = 0; 1900 amdgpu_dpm_get_active_displays(adev);
1683 adev->pm.dpm.new_active_crtc_count = 0;
1684 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
1685 list_for_each_entry(crtc,
1686 &ddev->mode_config.crtc_list, head) {
1687 amdgpu_crtc = to_amdgpu_crtc(crtc);
1688 if (amdgpu_crtc->enabled) {
1689 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
1690 adev->pm.dpm.new_active_crtc_count++;
1691 }
1692 }
1693 }
1694 /* update battery/ac status */ 1901 /* update battery/ac status */
1695 if (power_supply_is_system_supplied() > 0) 1902 if (power_supply_is_system_supplied() > 0)
1696 adev->pm.dpm.ac_power = true; 1903 adev->pm.dpm.ac_power = true;
@@ -1711,7 +1918,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1711static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 1918static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1712{ 1919{
1713 uint32_t value; 1920 uint32_t value;
1714 struct pp_gpu_power query = {0}; 1921 uint32_t query = 0;
1715 int size; 1922 int size;
1716 1923
1717 /* sanity check PP is enabled */ 1924 /* sanity check PP is enabled */
@@ -1734,17 +1941,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
1734 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 1941 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1735 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 1942 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
1736 seq_printf(m, "\t%u mV (VDDNB)\n", value); 1943 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1737 size = sizeof(query); 1944 size = sizeof(uint32_t);
1738 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) { 1945 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
1739 seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8, 1946 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
1740 query.vddc_power & 0xff);
1741 seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
1742 query.vddci_power & 0xff);
1743 seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
1744 query.max_gpu_power & 0xff);
1745 seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
1746 query.average_gpu_power & 0xff);
1747 }
1748 size = sizeof(value); 1947 size = sizeof(value);
1749 seq_printf(m, "\n"); 1948 seq_printf(m, "\n");
1750 1949
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4b584cb75bf4..4683626b065f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
102 struct reservation_object *resv = attach->dmabuf->resv; 102 struct reservation_object *resv = attach->dmabuf->resv;
103 struct amdgpu_device *adev = dev->dev_private; 103 struct amdgpu_device *adev = dev->dev_private;
104 struct amdgpu_bo *bo; 104 struct amdgpu_bo *bo;
105 struct amdgpu_bo_param bp;
105 int ret; 106 int ret;
106 107
108 memset(&bp, 0, sizeof(bp));
109 bp.size = attach->dmabuf->size;
110 bp.byte_align = PAGE_SIZE;
111 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
112 bp.flags = 0;
113 bp.type = ttm_bo_type_sg;
114 bp.resv = resv;
107 ww_mutex_lock(&resv->lock, NULL); 115 ww_mutex_lock(&resv->lock, NULL);
108 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, 116 ret = amdgpu_bo_create(adev, &bp, &bo);
109 AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
110 resv, &bo);
111 if (ret) 117 if (ret)
112 goto error; 118 goto error;
113 119
@@ -209,7 +215,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
209 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 215 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
210 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 216 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
211 struct ttm_operation_ctx ctx = { true, false }; 217 struct ttm_operation_ctx ctx = { true, false };
212 u32 domain = amdgpu_display_framebuffer_domains(adev); 218 u32 domain = amdgpu_display_supported_domains(adev);
213 int ret; 219 int ret;
214 bool reads = (direction == DMA_BIDIRECTIONAL || 220 bool reads = (direction == DMA_BIDIRECTIONAL ||
215 direction == DMA_FROM_DEVICE); 221 direction == DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d5f526f38e50..49cad08b5c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
459 spin_unlock(&adev->ring_lru_list_lock); 459 spin_unlock(&adev->ring_lru_list_lock);
460} 460}
461 461
462/**
463 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
464 *
465 * @adev: amdgpu_device pointer
466 * @reg0: register to write
467 * @reg1: register to wait on
468 * @ref: reference value to write/wait on
469 * @mask: mask to wait on
470 *
471 * Helper for rings that don't support write and wait in a
472 * single oneshot packet.
473 */
474void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
475 uint32_t reg0, uint32_t reg1,
476 uint32_t ref, uint32_t mask)
477{
478 amdgpu_ring_emit_wreg(ring, reg0, ref);
479 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
480}
481
462/* 482/*
463 * Debugfs info 483 * Debugfs info
464 */ 484 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1a5911882657..4f8dac2d36a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -42,6 +42,7 @@
42 42
43#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 43#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
44#define AMDGPU_FENCE_FLAG_INT (1 << 1) 44#define AMDGPU_FENCE_FLAG_INT (1 << 1)
45#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
45 46
46enum amdgpu_ring_type { 47enum amdgpu_ring_type {
47 AMDGPU_RING_TYPE_GFX, 48 AMDGPU_RING_TYPE_GFX,
@@ -90,7 +91,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
90 unsigned irq_type); 91 unsigned irq_type);
91void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 92void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
92void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 93void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
93int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); 94int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
95 unsigned flags);
94int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s); 96int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
95void amdgpu_fence_process(struct amdgpu_ring *ring); 97void amdgpu_fence_process(struct amdgpu_ring *ring);
96int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 98int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -154,6 +156,9 @@ struct amdgpu_ring_funcs {
154 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 156 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
155 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, 157 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
156 uint32_t val, uint32_t mask); 158 uint32_t val, uint32_t mask);
159 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
160 uint32_t reg0, uint32_t reg1,
161 uint32_t ref, uint32_t mask);
157 void (*emit_tmz)(struct amdgpu_ring *ring, bool start); 162 void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
158 /* priority functions */ 163 /* priority functions */
159 void (*set_priority) (struct amdgpu_ring *ring, 164 void (*set_priority) (struct amdgpu_ring *ring,
@@ -228,6 +233,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
228 int *blacklist, int num_blacklist, 233 int *blacklist, int num_blacklist,
229 bool lru_pipe_order, struct amdgpu_ring **ring); 234 bool lru_pipe_order, struct amdgpu_ring **ring);
230void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring); 235void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
236void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
237 uint32_t reg0, uint32_t val0,
238 uint32_t reg1, uint32_t val1);
239
231static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) 240static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
232{ 241{
233 int i = 0; 242 int i = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 2dbe87591f81..d167e8ab76d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
33 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 33 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
34 struct amdgpu_bo *vram_obj = NULL; 34 struct amdgpu_bo *vram_obj = NULL;
35 struct amdgpu_bo **gtt_obj = NULL; 35 struct amdgpu_bo **gtt_obj = NULL;
36 struct amdgpu_bo_param bp;
36 uint64_t gart_addr, vram_addr; 37 uint64_t gart_addr, vram_addr;
37 unsigned n, size; 38 unsigned n, size;
38 int i, r; 39 int i, r;
@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
58 r = 1; 59 r = 1;
59 goto out_cleanup; 60 goto out_cleanup;
60 } 61 }
61 62 memset(&bp, 0, sizeof(bp));
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0, 63 bp.size = size;
63 ttm_bo_type_kernel, NULL, &vram_obj); 64 bp.byte_align = PAGE_SIZE;
65 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
66 bp.flags = 0;
67 bp.type = ttm_bo_type_kernel;
68 bp.resv = NULL;
69
70 r = amdgpu_bo_create(adev, &bp, &vram_obj);
64 if (r) { 71 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n"); 72 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup; 73 goto out_cleanup;
@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
79 void **vram_start, **vram_end; 86 void **vram_start, **vram_end;
80 struct dma_fence *fence = NULL; 87 struct dma_fence *fence = NULL;
81 88
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, 89 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
83 AMDGPU_GEM_DOMAIN_GTT, 0, 90 r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
84 ttm_bo_type_kernel, NULL, gtt_obj + i);
85 if (r) { 91 if (r) {
86 DRM_ERROR("Failed to create GTT object %d\n", i); 92 DRM_ERROR("Failed to create GTT object %d\n", i);
87 goto out_lclean; 93 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 532263ab6e16..e96e26d3f3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
275 ), 275 ),
276 276
277 TP_fast_assign( 277 TP_fast_assign(
278 __entry->bo = bo_va->base.bo; 278 __entry->bo = bo_va ? bo_va->base.bo : NULL;
279 __entry->start = mapping->start; 279 __entry->start = mapping->start;
280 __entry->last = mapping->last; 280 __entry->last = mapping->last;
281 __entry->offset = mapping->offset; 281 __entry->offset = mapping->offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c713d30cba86..69a2b25b3696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
111 ring = adev->mman.buffer_funcs_ring; 111 ring = adev->mman.buffer_funcs_ring;
112 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 112 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
113 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, 113 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
114 rq, amdgpu_sched_jobs, NULL); 114 rq, NULL);
115 if (r) { 115 if (r) {
116 DRM_ERROR("Failed setting up TTM BO move run queue.\n"); 116 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
117 goto error_entity; 117 goto error_entity;
@@ -223,20 +223,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
223 if (!adev->mman.buffer_funcs_enabled) { 223 if (!adev->mman.buffer_funcs_enabled) {
224 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 224 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
225 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && 225 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
226 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 226 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
227 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 227 amdgpu_bo_in_cpu_visible_vram(abo)) {
228 struct drm_mm_node *node = bo->mem.mm_node;
229 unsigned long pages_left;
230
231 for (pages_left = bo->mem.num_pages;
232 pages_left;
233 pages_left -= node->size, node++) {
234 if (node->start < fpfn)
235 break;
236 }
237
238 if (!pages_left)
239 goto gtt;
240 228
241 /* Try evicting to the CPU inaccessible part of VRAM 229 /* Try evicting to the CPU inaccessible part of VRAM
242 * first, but only set GTT as busy placement, so this 230 * first, but only set GTT as busy placement, so this
@@ -245,12 +233,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
245 */ 233 */
246 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 234 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
247 AMDGPU_GEM_DOMAIN_GTT); 235 AMDGPU_GEM_DOMAIN_GTT);
248 abo->placements[0].fpfn = fpfn; 236 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
249 abo->placements[0].lpfn = 0; 237 abo->placements[0].lpfn = 0;
250 abo->placement.busy_placement = &abo->placements[1]; 238 abo->placement.busy_placement = &abo->placements[1];
251 abo->placement.num_busy_placement = 1; 239 abo->placement.num_busy_placement = 1;
252 } else { 240 } else {
253gtt:
254 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 241 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
255 } 242 }
256 break; 243 break;
@@ -856,6 +843,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
856 sg_free_table(ttm->sg); 843 sg_free_table(ttm->sg);
857} 844}
858 845
846int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
847 struct ttm_buffer_object *tbo,
848 uint64_t flags)
849{
850 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
851 struct ttm_tt *ttm = tbo->ttm;
852 struct amdgpu_ttm_tt *gtt = (void *)ttm;
853 int r;
854
855 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
856 uint64_t page_idx = 1;
857
858 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
859 ttm->pages, gtt->ttm.dma_address, flags);
860 if (r)
861 goto gart_bind_fail;
862
863 /* Patch mtype of the second part BO */
864 flags &= ~AMDGPU_PTE_MTYPE_MASK;
865 flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
866
867 r = amdgpu_gart_bind(adev,
868 gtt->offset + (page_idx << PAGE_SHIFT),
869 ttm->num_pages - page_idx,
870 &ttm->pages[page_idx],
871 &(gtt->ttm.dma_address[page_idx]), flags);
872 } else {
873 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
874 ttm->pages, gtt->ttm.dma_address, flags);
875 }
876
877gart_bind_fail:
878 if (r)
879 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
880 ttm->num_pages, gtt->offset);
881
882 return r;
883}
884
859static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 885static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
860 struct ttm_mem_reg *bo_mem) 886 struct ttm_mem_reg *bo_mem)
861{ 887{
@@ -929,8 +955,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
929 955
930 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); 956 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
931 gtt->offset = (u64)tmp.start << PAGE_SHIFT; 957 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
932 r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages, 958 r = amdgpu_ttm_gart_bind(adev, bo, flags);
933 bo->ttm->pages, gtt->ttm.dma_address, flags);
934 if (unlikely(r)) { 959 if (unlikely(r)) {
935 ttm_bo_mem_put(bo, &tmp); 960 ttm_bo_mem_put(bo, &tmp);
936 return r; 961 return r;
@@ -947,19 +972,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
947int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 972int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
948{ 973{
949 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 974 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
950 struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
951 uint64_t flags; 975 uint64_t flags;
952 int r; 976 int r;
953 977
954 if (!gtt) 978 if (!tbo->ttm)
955 return 0; 979 return 0;
956 980
957 flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem); 981 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
958 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, 982 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
959 gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); 983
960 if (r)
961 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
962 gtt->ttm.ttm.num_pages, gtt->offset);
963 return r; 984 return r;
964} 985}
965 986
@@ -1349,6 +1370,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1349static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) 1370static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1350{ 1371{
1351 struct ttm_operation_ctx ctx = { false, false }; 1372 struct ttm_operation_ctx ctx = { false, false };
1373 struct amdgpu_bo_param bp;
1352 int r = 0; 1374 int r = 0;
1353 int i; 1375 int i;
1354 u64 vram_size = adev->gmc.visible_vram_size; 1376 u64 vram_size = adev->gmc.visible_vram_size;
@@ -1356,17 +1378,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1356 u64 size = adev->fw_vram_usage.size; 1378 u64 size = adev->fw_vram_usage.size;
1357 struct amdgpu_bo *bo; 1379 struct amdgpu_bo *bo;
1358 1380
1381 memset(&bp, 0, sizeof(bp));
1382 bp.size = adev->fw_vram_usage.size;
1383 bp.byte_align = PAGE_SIZE;
1384 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1385 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1386 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1387 bp.type = ttm_bo_type_kernel;
1388 bp.resv = NULL;
1359 adev->fw_vram_usage.va = NULL; 1389 adev->fw_vram_usage.va = NULL;
1360 adev->fw_vram_usage.reserved_bo = NULL; 1390 adev->fw_vram_usage.reserved_bo = NULL;
1361 1391
1362 if (adev->fw_vram_usage.size > 0 && 1392 if (adev->fw_vram_usage.size > 0 &&
1363 adev->fw_vram_usage.size <= vram_size) { 1393 adev->fw_vram_usage.size <= vram_size) {
1364 1394
1365 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE, 1395 r = amdgpu_bo_create(adev, &bp,
1366 AMDGPU_GEM_DOMAIN_VRAM,
1367 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1368 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1369 ttm_bo_type_kernel, NULL,
1370 &adev->fw_vram_usage.reserved_bo); 1396 &adev->fw_vram_usage.reserved_bo);
1371 if (r) 1397 if (r)
1372 goto error_create; 1398 goto error_create;
@@ -1474,12 +1500,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1474 return r; 1500 return r;
1475 } 1501 }
1476 1502
1477 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, 1503 if (adev->gmc.stolen_size) {
1478 AMDGPU_GEM_DOMAIN_VRAM, 1504 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1479 &adev->stolen_vga_memory, 1505 AMDGPU_GEM_DOMAIN_VRAM,
1480 NULL, NULL); 1506 &adev->stolen_vga_memory,
1481 if (r) 1507 NULL, NULL);
1482 return r; 1508 if (r)
1509 return r;
1510 }
1483 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1511 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1484 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); 1512 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1485 1513
@@ -1548,13 +1576,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1548 return 0; 1576 return 0;
1549} 1577}
1550 1578
1579void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1580{
1581 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1582}
1583
1551void amdgpu_ttm_fini(struct amdgpu_device *adev) 1584void amdgpu_ttm_fini(struct amdgpu_device *adev)
1552{ 1585{
1553 if (!adev->mman.initialized) 1586 if (!adev->mman.initialized)
1554 return; 1587 return;
1555 1588
1556 amdgpu_ttm_debugfs_fini(adev); 1589 amdgpu_ttm_debugfs_fini(adev);
1557 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1558 amdgpu_ttm_fw_reserve_vram_fini(adev); 1590 amdgpu_ttm_fw_reserve_vram_fini(adev);
1559 if (adev->mman.aper_base_kaddr) 1591 if (adev->mman.aper_base_kaddr)
1560 iounmap(adev->mman.aper_base_kaddr); 1592 iounmap(adev->mman.aper_base_kaddr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6ea7de863041..e969c879d87e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -77,6 +77,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
77uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 77uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
78 78
79int amdgpu_ttm_init(struct amdgpu_device *adev); 79int amdgpu_ttm_init(struct amdgpu_device *adev);
80void amdgpu_ttm_late_init(struct amdgpu_device *adev);
80void amdgpu_ttm_fini(struct amdgpu_device *adev); 81void amdgpu_ttm_fini(struct amdgpu_device *adev);
81void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, 82void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
82 bool enable); 83 bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5916cc25e28b..75592bd04d6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -161,8 +161,38 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
161 le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes)); 161 le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
162 DRM_DEBUG("reg_list_separate_size_bytes: %u\n", 162 DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
163 le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); 163 le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
164 DRM_DEBUG("reg_list_separate_size_bytes: %u\n", 164 DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
165 le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); 165 le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
166 if (version_minor == 1) {
167 const struct rlc_firmware_header_v2_1 *v2_1 =
168 container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
169 DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
170 le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
171 DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
172 le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
173 DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
174 le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
175 DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
176 le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
177 DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
178 le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
179 DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
180 le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
181 DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
182 le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
183 DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
184 le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
185 DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
186 le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
187 DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
188 le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
189 DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
190 le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
191 DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
192 le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
193 DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
194 le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
195 }
166 } else { 196 } else {
167 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); 197 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
168 } 198 }
@@ -265,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
265 case CHIP_POLARIS10: 295 case CHIP_POLARIS10:
266 case CHIP_POLARIS11: 296 case CHIP_POLARIS11:
267 case CHIP_POLARIS12: 297 case CHIP_POLARIS12:
298 case CHIP_VEGAM:
268 if (!load_type) 299 if (!load_type)
269 return AMDGPU_FW_LOAD_DIRECT; 300 return AMDGPU_FW_LOAD_DIRECT;
270 else 301 else
@@ -307,7 +338,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
307 (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && 338 (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
308 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && 339 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
309 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && 340 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
310 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) { 341 ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
342 ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
343 ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
344 ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
311 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); 345 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
312 346
313 memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + 347 memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -329,6 +363,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
329 le32_to_cpu(header->ucode_array_offset_bytes) + 363 le32_to_cpu(header->ucode_array_offset_bytes) +
330 le32_to_cpu(cp_hdr->jt_offset) * 4), 364 le32_to_cpu(cp_hdr->jt_offset) * 4),
331 ucode->ucode_size); 365 ucode->ucode_size);
366 } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
367 ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
368 memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
369 ucode->ucode_size);
370 } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
371 ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
372 memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
373 ucode->ucode_size);
374 } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
375 ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
376 memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
377 ucode->ucode_size);
332 } 378 }
333 379
334 return 0; 380 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 30b5500dc152..08e38579af24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -98,6 +98,24 @@ struct rlc_firmware_header_v2_0 {
98 uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */ 98 uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
99}; 99};
100 100
101/* version_major=2, version_minor=1 */
102struct rlc_firmware_header_v2_1 {
103 struct rlc_firmware_header_v2_0 v2_0;
104 uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */
105 uint32_t save_restore_list_cntl_ucode_ver;
106 uint32_t save_restore_list_cntl_feature_ver;
107 uint32_t save_restore_list_cntl_size_bytes;
108 uint32_t save_restore_list_cntl_offset_bytes;
109 uint32_t save_restore_list_gpm_ucode_ver;
110 uint32_t save_restore_list_gpm_feature_ver;
111 uint32_t save_restore_list_gpm_size_bytes;
112 uint32_t save_restore_list_gpm_offset_bytes;
113 uint32_t save_restore_list_srm_ucode_ver;
114 uint32_t save_restore_list_srm_feature_ver;
115 uint32_t save_restore_list_srm_size_bytes;
116 uint32_t save_restore_list_srm_offset_bytes;
117};
118
101/* version_major=1, version_minor=0 */ 119/* version_major=1, version_minor=0 */
102struct sdma_firmware_header_v1_0 { 120struct sdma_firmware_header_v1_0 {
103 struct common_firmware_header header; 121 struct common_firmware_header header;
@@ -148,6 +166,7 @@ union amdgpu_firmware_header {
148 struct gfx_firmware_header_v1_0 gfx; 166 struct gfx_firmware_header_v1_0 gfx;
149 struct rlc_firmware_header_v1_0 rlc; 167 struct rlc_firmware_header_v1_0 rlc;
150 struct rlc_firmware_header_v2_0 rlc_v2_0; 168 struct rlc_firmware_header_v2_0 rlc_v2_0;
169 struct rlc_firmware_header_v2_1 rlc_v2_1;
151 struct sdma_firmware_header_v1_0 sdma; 170 struct sdma_firmware_header_v1_0 sdma;
152 struct sdma_firmware_header_v1_1 sdma_v1_1; 171 struct sdma_firmware_header_v1_1 sdma_v1_1;
153 struct gpu_info_firmware_header_v1_0 gpu_info; 172 struct gpu_info_firmware_header_v1_0 gpu_info;
@@ -168,6 +187,9 @@ enum AMDGPU_UCODE_ID {
168 AMDGPU_UCODE_ID_CP_MEC2, 187 AMDGPU_UCODE_ID_CP_MEC2,
169 AMDGPU_UCODE_ID_CP_MEC2_JT, 188 AMDGPU_UCODE_ID_CP_MEC2_JT,
170 AMDGPU_UCODE_ID_RLC_G, 189 AMDGPU_UCODE_ID_RLC_G,
190 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
191 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
192 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
171 AMDGPU_UCODE_ID_STORAGE, 193 AMDGPU_UCODE_ID_STORAGE,
172 AMDGPU_UCODE_ID_SMC, 194 AMDGPU_UCODE_ID_SMC,
173 AMDGPU_UCODE_ID_UVD, 195 AMDGPU_UCODE_ID_UVD,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 627542b22ae4..de4d77af02ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -66,6 +66,7 @@
66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" 66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" 67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
68#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" 68#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
69#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
69 70
70#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" 71#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
71#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" 72#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
@@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
109MODULE_FIRMWARE(FIRMWARE_POLARIS10); 110MODULE_FIRMWARE(FIRMWARE_POLARIS10);
110MODULE_FIRMWARE(FIRMWARE_POLARIS11); 111MODULE_FIRMWARE(FIRMWARE_POLARIS11);
111MODULE_FIRMWARE(FIRMWARE_POLARIS12); 112MODULE_FIRMWARE(FIRMWARE_POLARIS12);
113MODULE_FIRMWARE(FIRMWARE_VEGAM);
112 114
113MODULE_FIRMWARE(FIRMWARE_VEGA10); 115MODULE_FIRMWARE(FIRMWARE_VEGA10);
114MODULE_FIRMWARE(FIRMWARE_VEGA12); 116MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
172 case CHIP_VEGA12: 174 case CHIP_VEGA12:
173 fw_name = FIRMWARE_VEGA12; 175 fw_name = FIRMWARE_VEGA12;
174 break; 176 break;
177 case CHIP_VEGAM:
178 fw_name = FIRMWARE_VEGAM;
179 break;
175 default: 180 default:
176 return -EINVAL; 181 return -EINVAL;
177 } 182 }
@@ -237,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
237 ring = &adev->uvd.ring; 242 ring = &adev->uvd.ring;
238 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 243 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
239 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, 244 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
240 rq, amdgpu_sched_jobs, NULL); 245 rq, NULL);
241 if (r != 0) { 246 if (r != 0) {
242 DRM_ERROR("Failed setting up UVD run queue.\n"); 247 DRM_ERROR("Failed setting up UVD run queue.\n");
243 return r; 248 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a33804bd3314..a86322f5164f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -51,8 +51,9 @@
51#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 51#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 52#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" 53#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" 55#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
56#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
56 57
57#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" 58#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
58#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" 59#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
@@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
71MODULE_FIRMWARE(FIRMWARE_POLARIS10); 72MODULE_FIRMWARE(FIRMWARE_POLARIS10);
72MODULE_FIRMWARE(FIRMWARE_POLARIS11); 73MODULE_FIRMWARE(FIRMWARE_POLARIS11);
73MODULE_FIRMWARE(FIRMWARE_POLARIS12); 74MODULE_FIRMWARE(FIRMWARE_POLARIS12);
75MODULE_FIRMWARE(FIRMWARE_VEGAM);
74 76
75MODULE_FIRMWARE(FIRMWARE_VEGA10); 77MODULE_FIRMWARE(FIRMWARE_VEGA10);
76MODULE_FIRMWARE(FIRMWARE_VEGA12); 78MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
132 case CHIP_POLARIS12: 134 case CHIP_POLARIS12:
133 fw_name = FIRMWARE_POLARIS12; 135 fw_name = FIRMWARE_POLARIS12;
134 break; 136 break;
137 case CHIP_VEGAM:
138 fw_name = FIRMWARE_VEGAM;
139 break;
135 case CHIP_VEGA10: 140 case CHIP_VEGA10:
136 fw_name = FIRMWARE_VEGA10; 141 fw_name = FIRMWARE_VEGA10;
137 break; 142 break;
@@ -181,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
181 ring = &adev->vce.ring[0]; 186 ring = &adev->vce.ring[0];
182 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 187 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
183 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, 188 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
184 rq, amdgpu_sched_jobs, NULL); 189 rq, NULL);
185 if (r != 0) { 190 if (r != 0) {
186 DRM_ERROR("Failed setting up VCE run queue.\n"); 191 DRM_ERROR("Failed setting up VCE run queue.\n");
187 return r; 192 return r;
@@ -755,6 +760,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
755 if (r) 760 if (r)
756 goto out; 761 goto out;
757 break; 762 break;
763
764 case 0x0500000d: /* MV buffer */
765 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
766 idx + 2, 0, 0);
767 if (r)
768 goto out;
769
770 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
771 idx + 7, 0, 0);
772 if (r)
773 goto out;
774 break;
758 } 775 }
759 776
760 idx += len / 4; 777 idx += len / 4;
@@ -860,6 +877,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
860 goto out; 877 goto out;
861 break; 878 break;
862 879
880 case 0x0500000d: /* MV buffer */
881 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
882 idx + 2, *size, 0);
883 if (r)
884 goto out;
885
886 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
887 idx + 7, *size / 12, 0);
888 if (r)
889 goto out;
890 break;
891
863 default: 892 default:
864 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 893 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
865 r = -EINVAL; 894 r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 58e495330b38..e5d234cf804f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
105 ring = &adev->vcn.ring_dec; 105 ring = &adev->vcn.ring_dec;
106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, 107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
108 rq, amdgpu_sched_jobs, NULL); 108 rq, NULL);
109 if (r != 0) { 109 if (r != 0) {
110 DRM_ERROR("Failed setting up VCN dec run queue.\n"); 110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111 return r; 111 return r;
@@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
114 ring = &adev->vcn.ring_enc[0]; 114 ring = &adev->vcn.ring_enc[0];
115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, 116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
117 rq, amdgpu_sched_jobs, NULL); 117 rq, NULL);
118 if (r != 0) { 118 if (r != 0) {
119 DRM_ERROR("Failed setting up VCN enc run queue.\n"); 119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120 return r; 120 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index da55a78d7380..1a8f4e0dd023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
94 struct dma_fence_cb cb; 94 struct dma_fence_cb cb;
95}; 95};
96 96
97static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
98 struct amdgpu_vm *vm,
99 struct amdgpu_bo *bo)
100{
101 base->vm = vm;
102 base->bo = bo;
103 INIT_LIST_HEAD(&base->bo_list);
104 INIT_LIST_HEAD(&base->vm_status);
105
106 if (!bo)
107 return;
108 list_add_tail(&base->bo_list, &bo->va);
109
110 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
111 return;
112
113 if (bo->preferred_domains &
114 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
115 return;
116
117 /*
118 * we checked all the prerequisites, but it looks like this per vm bo
119 * is currently evicted. add the bo to the evicted list to make sure it
120 * is validated on next vm use to avoid fault.
121 * */
122 spin_lock(&vm->status_lock);
123 list_move_tail(&base->vm_status, &vm->evicted);
124 spin_unlock(&vm->status_lock);
125}
126
97/** 127/**
98 * amdgpu_vm_level_shift - return the addr shift for each level 128 * amdgpu_vm_level_shift - return the addr shift for each level
99 * 129 *
@@ -412,11 +442,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
412 struct amdgpu_bo *pt; 442 struct amdgpu_bo *pt;
413 443
414 if (!entry->base.bo) { 444 if (!entry->base.bo) {
415 r = amdgpu_bo_create(adev, 445 struct amdgpu_bo_param bp;
416 amdgpu_vm_bo_size(adev, level), 446
417 AMDGPU_GPU_PAGE_SIZE, 447 memset(&bp, 0, sizeof(bp));
418 AMDGPU_GEM_DOMAIN_VRAM, flags, 448 bp.size = amdgpu_vm_bo_size(adev, level);
419 ttm_bo_type_kernel, resv, &pt); 449 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
450 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
451 bp.flags = flags;
452 bp.type = ttm_bo_type_kernel;
453 bp.resv = resv;
454 r = amdgpu_bo_create(adev, &bp, &pt);
420 if (r) 455 if (r)
421 return r; 456 return r;
422 457
@@ -441,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
441 */ 476 */
442 pt->parent = amdgpu_bo_ref(parent->base.bo); 477 pt->parent = amdgpu_bo_ref(parent->base.bo);
443 478
444 entry->base.vm = vm; 479 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
445 entry->base.bo = pt;
446 list_add_tail(&entry->base.bo_list, &pt->va);
447 spin_lock(&vm->status_lock); 480 spin_lock(&vm->status_lock);
448 list_add(&entry->base.vm_status, &vm->relocated); 481 list_move(&entry->base.vm_status, &vm->relocated);
449 spin_unlock(&vm->status_lock); 482 spin_unlock(&vm->status_lock);
450 } 483 }
451 484
@@ -628,7 +661,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
628 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 661 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
629 662
630 if (vm_flush_needed || pasid_mapping_needed) { 663 if (vm_flush_needed || pasid_mapping_needed) {
631 r = amdgpu_fence_emit(ring, &fence); 664 r = amdgpu_fence_emit(ring, &fence, 0);
632 if (r) 665 if (r)
633 return r; 666 return r;
634 } 667 }
@@ -1557,6 +1590,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1557 1590
1558 spin_lock(&vm->status_lock); 1591 spin_lock(&vm->status_lock);
1559 list_del_init(&bo_va->base.vm_status); 1592 list_del_init(&bo_va->base.vm_status);
1593
1594 /* If the BO is not in its preferred location add it back to
1595 * the evicted list so that it gets validated again on the
1596 * next command submission.
1597 */
1598 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
1599 !(bo->preferred_domains &
1600 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
1601 list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1560 spin_unlock(&vm->status_lock); 1602 spin_unlock(&vm->status_lock);
1561 1603
1562 list_splice_init(&bo_va->invalids, &bo_va->valids); 1604 list_splice_init(&bo_va->invalids, &bo_va->valids);
@@ -1827,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1827 if (bo_va == NULL) { 1869 if (bo_va == NULL) {
1828 return NULL; 1870 return NULL;
1829 } 1871 }
1830 bo_va->base.vm = vm; 1872 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1831 bo_va->base.bo = bo;
1832 INIT_LIST_HEAD(&bo_va->base.bo_list);
1833 INIT_LIST_HEAD(&bo_va->base.vm_status);
1834 1873
1835 bo_va->ref_count = 1; 1874 bo_va->ref_count = 1;
1836 INIT_LIST_HEAD(&bo_va->valids); 1875 INIT_LIST_HEAD(&bo_va->valids);
1837 INIT_LIST_HEAD(&bo_va->invalids); 1876 INIT_LIST_HEAD(&bo_va->invalids);
1838 1877
1839 if (!bo)
1840 return bo_va;
1841
1842 list_add_tail(&bo_va->base.bo_list, &bo->va);
1843
1844 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
1845 return bo_va;
1846
1847 if (bo->preferred_domains &
1848 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
1849 return bo_va;
1850
1851 /*
1852 * We checked all the prerequisites, but it looks like this per VM BO
1853 * is currently evicted. add the BO to the evicted list to make sure it
1854 * is validated on next VM use to avoid fault.
1855 * */
1856 spin_lock(&vm->status_lock);
1857 list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1858 spin_unlock(&vm->status_lock);
1859
1860 return bo_va; 1878 return bo_va;
1861} 1879}
1862 1880
@@ -2234,6 +2252,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2234{ 2252{
2235 struct amdgpu_vm_bo_base *bo_base; 2253 struct amdgpu_vm_bo_base *bo_base;
2236 2254
2255 /* shadow bo doesn't have bo base, its validation needs its parent */
2256 if (bo->parent && bo->parent->shadow == bo)
2257 bo = bo->parent;
2258
2237 list_for_each_entry(bo_base, &bo->va, bo_list) { 2259 list_for_each_entry(bo_base, &bo->va, bo_list) {
2238 struct amdgpu_vm *vm = bo_base->vm; 2260 struct amdgpu_vm *vm = bo_base->vm;
2239 2261
@@ -2355,6 +2377,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2355int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2377int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2356 int vm_context, unsigned int pasid) 2378 int vm_context, unsigned int pasid)
2357{ 2379{
2380 struct amdgpu_bo_param bp;
2381 struct amdgpu_bo *root;
2358 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 2382 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2359 AMDGPU_VM_PTE_COUNT(adev) * 8); 2383 AMDGPU_VM_PTE_COUNT(adev) * 8);
2360 unsigned ring_instance; 2384 unsigned ring_instance;
@@ -2380,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2380 ring = adev->vm_manager.vm_pte_rings[ring_instance]; 2404 ring = adev->vm_manager.vm_pte_rings[ring_instance];
2381 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 2405 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2382 r = drm_sched_entity_init(&ring->sched, &vm->entity, 2406 r = drm_sched_entity_init(&ring->sched, &vm->entity,
2383 rq, amdgpu_sched_jobs, NULL); 2407 rq, NULL);
2384 if (r) 2408 if (r)
2385 return r; 2409 return r;
2386 2410
@@ -2409,24 +2433,28 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2409 flags |= AMDGPU_GEM_CREATE_SHADOW; 2433 flags |= AMDGPU_GEM_CREATE_SHADOW;
2410 2434
2411 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); 2435 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2412 r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags, 2436 memset(&bp, 0, sizeof(bp));
2413 ttm_bo_type_kernel, NULL, &vm->root.base.bo); 2437 bp.size = size;
2438 bp.byte_align = align;
2439 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2440 bp.flags = flags;
2441 bp.type = ttm_bo_type_kernel;
2442 bp.resv = NULL;
2443 r = amdgpu_bo_create(adev, &bp, &root);
2414 if (r) 2444 if (r)
2415 goto error_free_sched_entity; 2445 goto error_free_sched_entity;
2416 2446
2417 r = amdgpu_bo_reserve(vm->root.base.bo, true); 2447 r = amdgpu_bo_reserve(root, true);
2418 if (r) 2448 if (r)
2419 goto error_free_root; 2449 goto error_free_root;
2420 2450
2421 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, 2451 r = amdgpu_vm_clear_bo(adev, vm, root,
2422 adev->vm_manager.root_level, 2452 adev->vm_manager.root_level,
2423 vm->pte_support_ats); 2453 vm->pte_support_ats);
2424 if (r) 2454 if (r)
2425 goto error_unreserve; 2455 goto error_unreserve;
2426 2456
2427 vm->root.base.vm = vm; 2457 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2428 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2429 list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2430 amdgpu_bo_unreserve(vm->root.base.bo); 2458 amdgpu_bo_unreserve(vm->root.base.bo);
2431 2459
2432 if (pasid) { 2460 if (pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 30f080364c97..4cf678684a12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -75,11 +75,12 @@ struct amdgpu_bo_list_entry;
75/* PDE Block Fragment Size for VEGA10 */ 75/* PDE Block Fragment Size for VEGA10 */
76#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 76#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
77 77
78/* VEGA10 only */ 78
79/* For GFX9 */
79#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 80#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
80#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 81#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
81 82
82/* For Raven */ 83#define AMDGPU_MTYPE_NC 0
83#define AMDGPU_MTYPE_CC 2 84#define AMDGPU_MTYPE_CC 2
84 85
85#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 86#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 47ef3e6e7178..a266dcf5daed 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
5903 pi->pcie_dpm_key_disabled = 0; 5903 pi->pcie_dpm_key_disabled = 0;
5904 pi->thermal_sclk_dpm_enabled = 0; 5904 pi->thermal_sclk_dpm_enabled = 0;
5905 5905
5906 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) 5906 if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
5907 pi->caps_sclk_ds = true; 5907 pi->caps_sclk_ds = true;
5908 else 5908 else
5909 pi->caps_sclk_ds = false; 5909 pi->caps_sclk_ds = false;
@@ -6255,7 +6255,7 @@ static int ci_dpm_late_init(void *handle)
6255 int ret; 6255 int ret;
6256 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6257 6257
6258 if (!amdgpu_dpm) 6258 if (!adev->pm.dpm_enabled)
6259 return 0; 6259 return 0;
6260 6260
6261 /* init the sysfs and debugfs files late */ 6261 /* init the sysfs and debugfs files late */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 0df22030e713..8ff4c60d1b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1735,6 +1735,12 @@ static void cik_invalidate_hdp(struct amdgpu_device *adev,
1735 } 1735 }
1736} 1736}
1737 1737
1738static bool cik_need_full_reset(struct amdgpu_device *adev)
1739{
1740 /* change this when we support soft reset */
1741 return true;
1742}
1743
1738static const struct amdgpu_asic_funcs cik_asic_funcs = 1744static const struct amdgpu_asic_funcs cik_asic_funcs =
1739{ 1745{
1740 .read_disabled_bios = &cik_read_disabled_bios, 1746 .read_disabled_bios = &cik_read_disabled_bios,
@@ -1748,6 +1754,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
1748 .get_config_memsize = &cik_get_config_memsize, 1754 .get_config_memsize = &cik_get_config_memsize,
1749 .flush_hdp = &cik_flush_hdp, 1755 .flush_hdp = &cik_flush_hdp,
1750 .invalidate_hdp = &cik_invalidate_hdp, 1756 .invalidate_hdp = &cik_invalidate_hdp,
1757 .need_full_reset = &cik_need_full_reset,
1751}; 1758};
1752 1759
1753static int cik_common_early_init(void *handle) 1760static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 452f88ea46a2..ada241bfeee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1823,7 +1823,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1823 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1823 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1824 struct drm_device *dev = crtc->dev; 1824 struct drm_device *dev = crtc->dev;
1825 struct amdgpu_device *adev = dev->dev_private; 1825 struct amdgpu_device *adev = dev->dev_private;
1826 struct amdgpu_framebuffer *amdgpu_fb;
1827 struct drm_framebuffer *target_fb; 1826 struct drm_framebuffer *target_fb;
1828 struct drm_gem_object *obj; 1827 struct drm_gem_object *obj;
1829 struct amdgpu_bo *abo; 1828 struct amdgpu_bo *abo;
@@ -1842,18 +1841,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1842 return 0; 1841 return 0;
1843 } 1842 }
1844 1843
1845 if (atomic) { 1844 if (atomic)
1846 amdgpu_fb = to_amdgpu_framebuffer(fb);
1847 target_fb = fb; 1845 target_fb = fb;
1848 } else { 1846 else
1849 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1850 target_fb = crtc->primary->fb; 1847 target_fb = crtc->primary->fb;
1851 }
1852 1848
1853 /* If atomic, assume fb object is pinned & idle & fenced and 1849 /* If atomic, assume fb object is pinned & idle & fenced and
1854 * just update base pointers 1850 * just update base pointers
1855 */ 1851 */
1856 obj = amdgpu_fb->obj; 1852 obj = target_fb->obj[0];
1857 abo = gem_to_amdgpu_bo(obj); 1853 abo = gem_to_amdgpu_bo(obj);
1858 r = amdgpu_bo_reserve(abo, false); 1854 r = amdgpu_bo_reserve(abo, false);
1859 if (unlikely(r != 0)) 1855 if (unlikely(r != 0))
@@ -2043,8 +2039,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
2043 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2039 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2044 2040
2045 if (!atomic && fb && fb != crtc->primary->fb) { 2041 if (!atomic && fb && fb != crtc->primary->fb) {
2046 amdgpu_fb = to_amdgpu_framebuffer(fb); 2042 abo = gem_to_amdgpu_bo(fb->obj[0]);
2047 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2048 r = amdgpu_bo_reserve(abo, true); 2043 r = amdgpu_bo_reserve(abo, true);
2049 if (unlikely(r != 0)) 2044 if (unlikely(r != 0))
2050 return r; 2045 return r;
@@ -2526,11 +2521,9 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2526 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2521 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2527 if (crtc->primary->fb) { 2522 if (crtc->primary->fb) {
2528 int r; 2523 int r;
2529 struct amdgpu_framebuffer *amdgpu_fb;
2530 struct amdgpu_bo *abo; 2524 struct amdgpu_bo *abo;
2531 2525
2532 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2526 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2533 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2534 r = amdgpu_bo_reserve(abo, true); 2527 r = amdgpu_bo_reserve(abo, true);
2535 if (unlikely(r)) 2528 if (unlikely(r))
2536 DRM_ERROR("failed to reserve abo before unpin\n"); 2529 DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a7c1c584a191..a5b96eac3033 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
173 ARRAY_SIZE(polaris11_golden_settings_a11)); 173 ARRAY_SIZE(polaris11_golden_settings_a11));
174 break; 174 break;
175 case CHIP_POLARIS10: 175 case CHIP_POLARIS10:
176 case CHIP_VEGAM:
176 amdgpu_device_program_register_sequence(adev, 177 amdgpu_device_program_register_sequence(adev,
177 polaris10_golden_settings_a11, 178 polaris10_golden_settings_a11,
178 ARRAY_SIZE(polaris10_golden_settings_a11)); 179 ARRAY_SIZE(polaris10_golden_settings_a11));
@@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
473 num_crtc = 2; 474 num_crtc = 2;
474 break; 475 break;
475 case CHIP_POLARIS10: 476 case CHIP_POLARIS10:
477 case CHIP_VEGAM:
476 num_crtc = 6; 478 num_crtc = 6;
477 break; 479 break;
478 case CHIP_POLARIS11: 480 case CHIP_POLARIS11:
@@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1445 adev->mode_info.audio.num_pins = 7; 1447 adev->mode_info.audio.num_pins = 7;
1446 break; 1448 break;
1447 case CHIP_POLARIS10: 1449 case CHIP_POLARIS10:
1450 case CHIP_VEGAM:
1448 adev->mode_info.audio.num_pins = 8; 1451 adev->mode_info.audio.num_pins = 8;
1449 break; 1452 break;
1450 case CHIP_POLARIS11: 1453 case CHIP_POLARIS11:
@@ -1862,7 +1865,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1862 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1865 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1863 struct drm_device *dev = crtc->dev; 1866 struct drm_device *dev = crtc->dev;
1864 struct amdgpu_device *adev = dev->dev_private; 1867 struct amdgpu_device *adev = dev->dev_private;
1865 struct amdgpu_framebuffer *amdgpu_fb;
1866 struct drm_framebuffer *target_fb; 1868 struct drm_framebuffer *target_fb;
1867 struct drm_gem_object *obj; 1869 struct drm_gem_object *obj;
1868 struct amdgpu_bo *abo; 1870 struct amdgpu_bo *abo;
@@ -1881,18 +1883,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1881 return 0; 1883 return 0;
1882 } 1884 }
1883 1885
1884 if (atomic) { 1886 if (atomic)
1885 amdgpu_fb = to_amdgpu_framebuffer(fb);
1886 target_fb = fb; 1887 target_fb = fb;
1887 } else { 1888 else
1888 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1889 target_fb = crtc->primary->fb; 1889 target_fb = crtc->primary->fb;
1890 }
1891 1890
1892 /* If atomic, assume fb object is pinned & idle & fenced and 1891 /* If atomic, assume fb object is pinned & idle & fenced and
1893 * just update base pointers 1892 * just update base pointers
1894 */ 1893 */
1895 obj = amdgpu_fb->obj; 1894 obj = target_fb->obj[0];
1896 abo = gem_to_amdgpu_bo(obj); 1895 abo = gem_to_amdgpu_bo(obj);
1897 r = amdgpu_bo_reserve(abo, false); 1896 r = amdgpu_bo_reserve(abo, false);
1898 if (unlikely(r != 0)) 1897 if (unlikely(r != 0))
@@ -2082,8 +2081,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
2082 WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2081 WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2083 2082
2084 if (!atomic && fb && fb != crtc->primary->fb) { 2083 if (!atomic && fb && fb != crtc->primary->fb) {
2085 amdgpu_fb = to_amdgpu_framebuffer(fb); 2084 abo = gem_to_amdgpu_bo(fb->obj[0]);
2086 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2087 r = amdgpu_bo_reserve(abo, true); 2085 r = amdgpu_bo_reserve(abo, true);
2088 if (unlikely(r != 0)) 2086 if (unlikely(r != 0))
2089 return r; 2087 return r;
@@ -2253,7 +2251,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2253 2251
2254 if ((adev->asic_type == CHIP_POLARIS10) || 2252 if ((adev->asic_type == CHIP_POLARIS10) ||
2255 (adev->asic_type == CHIP_POLARIS11) || 2253 (adev->asic_type == CHIP_POLARIS11) ||
2256 (adev->asic_type == CHIP_POLARIS12)) { 2254 (adev->asic_type == CHIP_POLARIS12) ||
2255 (adev->asic_type == CHIP_VEGAM)) {
2257 struct amdgpu_encoder *amdgpu_encoder = 2256 struct amdgpu_encoder *amdgpu_encoder =
2258 to_amdgpu_encoder(amdgpu_crtc->encoder); 2257 to_amdgpu_encoder(amdgpu_crtc->encoder);
2259 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2258 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2601,11 +2600,9 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2601 dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2600 dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2602 if (crtc->primary->fb) { 2601 if (crtc->primary->fb) {
2603 int r; 2602 int r;
2604 struct amdgpu_framebuffer *amdgpu_fb;
2605 struct amdgpu_bo *abo; 2603 struct amdgpu_bo *abo;
2606 2604
2607 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2605 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2608 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2609 r = amdgpu_bo_reserve(abo, true); 2606 r = amdgpu_bo_reserve(abo, true);
2610 if (unlikely(r)) 2607 if (unlikely(r))
2611 DRM_ERROR("failed to reserve abo before unpin\n"); 2608 DRM_ERROR("failed to reserve abo before unpin\n");
@@ -2673,7 +2670,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2673 2670
2674 if ((adev->asic_type == CHIP_POLARIS10) || 2671 if ((adev->asic_type == CHIP_POLARIS10) ||
2675 (adev->asic_type == CHIP_POLARIS11) || 2672 (adev->asic_type == CHIP_POLARIS11) ||
2676 (adev->asic_type == CHIP_POLARIS12)) { 2673 (adev->asic_type == CHIP_POLARIS12) ||
2674 (adev->asic_type == CHIP_VEGAM)) {
2677 struct amdgpu_encoder *amdgpu_encoder = 2675 struct amdgpu_encoder *amdgpu_encoder =
2678 to_amdgpu_encoder(amdgpu_crtc->encoder); 2676 to_amdgpu_encoder(amdgpu_crtc->encoder);
2679 int encoder_mode = 2677 int encoder_mode =
@@ -2830,6 +2828,7 @@ static int dce_v11_0_early_init(void *handle)
2830 adev->mode_info.num_dig = 9; 2828 adev->mode_info.num_dig = 9;
2831 break; 2829 break;
2832 case CHIP_POLARIS10: 2830 case CHIP_POLARIS10:
2831 case CHIP_VEGAM:
2833 adev->mode_info.num_hpd = 6; 2832 adev->mode_info.num_hpd = 6;
2834 adev->mode_info.num_dig = 6; 2833 adev->mode_info.num_dig = 6;
2835 break; 2834 break;
@@ -2949,7 +2948,8 @@ static int dce_v11_0_hw_init(void *handle)
2949 amdgpu_atombios_encoder_init_dig(adev); 2948 amdgpu_atombios_encoder_init_dig(adev);
2950 if ((adev->asic_type == CHIP_POLARIS10) || 2949 if ((adev->asic_type == CHIP_POLARIS10) ||
2951 (adev->asic_type == CHIP_POLARIS11) || 2950 (adev->asic_type == CHIP_POLARIS11) ||
2952 (adev->asic_type == CHIP_POLARIS12)) { 2951 (adev->asic_type == CHIP_POLARIS12) ||
2952 (adev->asic_type == CHIP_VEGAM)) {
2953 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, 2953 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
2954 DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); 2954 DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
2955 amdgpu_atombios_crtc_set_dce_clock(adev, 0, 2955 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 9f67b7fd3487..394cc1e8fe20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1780,7 +1780,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1780 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1780 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1781 struct drm_device *dev = crtc->dev; 1781 struct drm_device *dev = crtc->dev;
1782 struct amdgpu_device *adev = dev->dev_private; 1782 struct amdgpu_device *adev = dev->dev_private;
1783 struct amdgpu_framebuffer *amdgpu_fb;
1784 struct drm_framebuffer *target_fb; 1783 struct drm_framebuffer *target_fb;
1785 struct drm_gem_object *obj; 1784 struct drm_gem_object *obj;
1786 struct amdgpu_bo *abo; 1785 struct amdgpu_bo *abo;
@@ -1798,18 +1797,15 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1798 return 0; 1797 return 0;
1799 } 1798 }
1800 1799
1801 if (atomic) { 1800 if (atomic)
1802 amdgpu_fb = to_amdgpu_framebuffer(fb);
1803 target_fb = fb; 1801 target_fb = fb;
1804 } else { 1802 else
1805 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1806 target_fb = crtc->primary->fb; 1803 target_fb = crtc->primary->fb;
1807 }
1808 1804
1809 /* If atomic, assume fb object is pinned & idle & fenced and 1805 /* If atomic, assume fb object is pinned & idle & fenced and
1810 * just update base pointers 1806 * just update base pointers
1811 */ 1807 */
1812 obj = amdgpu_fb->obj; 1808 obj = target_fb->obj[0];
1813 abo = gem_to_amdgpu_bo(obj); 1809 abo = gem_to_amdgpu_bo(obj);
1814 r = amdgpu_bo_reserve(abo, false); 1810 r = amdgpu_bo_reserve(abo, false);
1815 if (unlikely(r != 0)) 1811 if (unlikely(r != 0))
@@ -1978,8 +1974,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1978 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1974 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1979 1975
1980 if (!atomic && fb && fb != crtc->primary->fb) { 1976 if (!atomic && fb && fb != crtc->primary->fb) {
1981 amdgpu_fb = to_amdgpu_framebuffer(fb); 1977 abo = gem_to_amdgpu_bo(fb->obj[0]);
1982 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1983 r = amdgpu_bo_reserve(abo, true); 1978 r = amdgpu_bo_reserve(abo, true);
1984 if (unlikely(r != 0)) 1979 if (unlikely(r != 0))
1985 return r; 1980 return r;
@@ -2414,11 +2409,9 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2414 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2409 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2415 if (crtc->primary->fb) { 2410 if (crtc->primary->fb) {
2416 int r; 2411 int r;
2417 struct amdgpu_framebuffer *amdgpu_fb;
2418 struct amdgpu_bo *abo; 2412 struct amdgpu_bo *abo;
2419 2413
2420 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2414 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2421 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2422 r = amdgpu_bo_reserve(abo, true); 2415 r = amdgpu_bo_reserve(abo, true);
2423 if (unlikely(r)) 2416 if (unlikely(r))
2424 DRM_ERROR("failed to reserve abo before unpin\n"); 2417 DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f55422cbd77a..c9b9ab8f1b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1754,7 +1754,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1754 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1754 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1755 struct drm_device *dev = crtc->dev; 1755 struct drm_device *dev = crtc->dev;
1756 struct amdgpu_device *adev = dev->dev_private; 1756 struct amdgpu_device *adev = dev->dev_private;
1757 struct amdgpu_framebuffer *amdgpu_fb;
1758 struct drm_framebuffer *target_fb; 1757 struct drm_framebuffer *target_fb;
1759 struct drm_gem_object *obj; 1758 struct drm_gem_object *obj;
1760 struct amdgpu_bo *abo; 1759 struct amdgpu_bo *abo;
@@ -1773,18 +1772,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1773 return 0; 1772 return 0;
1774 } 1773 }
1775 1774
1776 if (atomic) { 1775 if (atomic)
1777 amdgpu_fb = to_amdgpu_framebuffer(fb);
1778 target_fb = fb; 1776 target_fb = fb;
1779 } else { 1777 else
1780 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1781 target_fb = crtc->primary->fb; 1778 target_fb = crtc->primary->fb;
1782 }
1783 1779
1784 /* If atomic, assume fb object is pinned & idle & fenced and 1780 /* If atomic, assume fb object is pinned & idle & fenced and
1785 * just update base pointers 1781 * just update base pointers
1786 */ 1782 */
1787 obj = amdgpu_fb->obj; 1783 obj = target_fb->obj[0];
1788 abo = gem_to_amdgpu_bo(obj); 1784 abo = gem_to_amdgpu_bo(obj);
1789 r = amdgpu_bo_reserve(abo, false); 1785 r = amdgpu_bo_reserve(abo, false);
1790 if (unlikely(r != 0)) 1786 if (unlikely(r != 0))
@@ -1955,8 +1951,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1955 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1951 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1956 1952
1957 if (!atomic && fb && fb != crtc->primary->fb) { 1953 if (!atomic && fb && fb != crtc->primary->fb) {
1958 amdgpu_fb = to_amdgpu_framebuffer(fb); 1954 abo = gem_to_amdgpu_bo(fb->obj[0]);
1959 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1960 r = amdgpu_bo_reserve(abo, true); 1955 r = amdgpu_bo_reserve(abo, true);
1961 if (unlikely(r != 0)) 1956 if (unlikely(r != 0))
1962 return r; 1957 return r;
@@ -2430,11 +2425,9 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2430 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2425 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2431 if (crtc->primary->fb) { 2426 if (crtc->primary->fb) {
2432 int r; 2427 int r;
2433 struct amdgpu_framebuffer *amdgpu_fb;
2434 struct amdgpu_bo *abo; 2428 struct amdgpu_bo *abo;
2435 2429
2436 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2430 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2437 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2438 r = amdgpu_bo_reserve(abo, true); 2431 r = amdgpu_bo_reserve(abo, true);
2439 if (unlikely(r)) 2432 if (unlikely(r))
2440 DRM_ERROR("failed to reserve abo before unpin\n"); 2433 DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b51f05dc9582..de7be3de0f41 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -168,11 +168,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
168 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 168 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
169 if (crtc->primary->fb) { 169 if (crtc->primary->fb) {
170 int r; 170 int r;
171 struct amdgpu_framebuffer *amdgpu_fb;
172 struct amdgpu_bo *abo; 171 struct amdgpu_bo *abo;
173 172
174 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 173 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
175 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
176 r = amdgpu_bo_reserve(abo, true); 174 r = amdgpu_bo_reserve(abo, true);
177 if (unlikely(r)) 175 if (unlikely(r))
178 DRM_ERROR("failed to reserve abo before unpin\n"); 176 DRM_ERROR("failed to reserve abo before unpin\n");
@@ -329,7 +327,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
329 return 0; 327 return 0;
330} 328}
331 329
332static int dce_virtual_mode_valid(struct drm_connector *connector, 330static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
333 struct drm_display_mode *mode) 331 struct drm_display_mode *mode)
334{ 332{
335 return MODE_OK; 333 return MODE_OK;
@@ -462,8 +460,9 @@ static int dce_virtual_hw_init(void *handle)
462 break; 460 break;
463 case CHIP_CARRIZO: 461 case CHIP_CARRIZO:
464 case CHIP_STONEY: 462 case CHIP_STONEY:
465 case CHIP_POLARIS11:
466 case CHIP_POLARIS10: 463 case CHIP_POLARIS10:
464 case CHIP_POLARIS11:
465 case CHIP_VEGAM:
467 dce_v11_0_disable_dce(adev); 466 dce_v11_0_disable_dce(adev);
468 break; 467 break;
469 case CHIP_TOPAZ: 468 case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
new file mode 100644
index 000000000000..4ffda996660f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "df_v1_7.h"
25
26#include "df/df_1_7_default.h"
27#include "df/df_1_7_offset.h"
28#include "df/df_1_7_sh_mask.h"
29
30static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
31
32static void df_v1_7_init (struct amdgpu_device *adev)
33{
34}
35
36static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
37 bool enable)
38{
39 u32 tmp;
40
41 if (enable) {
42 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
43 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
44 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
45 } else
46 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
47 mmFabricConfigAccessControl_DEFAULT);
48}
49
50static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev)
51{
52 u32 tmp;
53
54 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
55 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
56 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
57
58 return tmp;
59}
60
61static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
62{
63 int fb_channel_number;
64
65 fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
66
67 return df_v1_7_channel_number[fb_channel_number];
68}
69
70static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
71 bool enable)
72{
73 u32 tmp;
74
75 /* Put DF on broadcast mode */
76 adev->df_funcs->enable_broadcast_mode(adev, true);
77
78 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
79 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
80 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
81 tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY;
82 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
83 } else {
84 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
85 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
86 tmp |= DF_V1_7_MGCG_DISABLE;
87 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
88 }
89
90 /* Exit boradcast mode */
91 adev->df_funcs->enable_broadcast_mode(adev, false);
92}
93
94static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
95 u32 *flags)
96{
97 u32 tmp;
98
99 /* AMD_CG_SUPPORT_DF_MGCG */
100 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
101 if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY)
102 *flags |= AMD_CG_SUPPORT_DF_MGCG;
103}
104
105const struct amdgpu_df_funcs df_v1_7_funcs = {
106 .init = df_v1_7_init,
107 .enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
108 .get_fb_channel_number = df_v1_7_get_fb_channel_number,
109 .get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
110 .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
111 .get_clockgating_state = df_v1_7_get_clockgating_state,
112};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
index 214f370c5efd..74621104c487 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2016 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,33 +20,21 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef PP_SOC15_H
24#define PP_SOC15_H
25 23
26#include "soc15_hw_ip.h" 24#ifndef __DF_V1_7_H__
27#include "vega10_ip_offset.h" 25#define __DF_V1_7_H__
28 26
29inline static uint32_t soc15_get_register_offset( 27#include "soc15_common.h"
30 uint32_t hw_id, 28enum DF_V1_7_MGCG
31 uint32_t inst,
32 uint32_t segment,
33 uint32_t offset)
34{ 29{
35 uint32_t reg = 0; 30 DF_V1_7_MGCG_DISABLE = 0,
31 DF_V1_7_MGCG_ENABLE_00_CYCLE_DELAY =1,
32 DF_V1_7_MGCG_ENABLE_01_CYCLE_DELAY =2,
33 DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY =13,
34 DF_V1_7_MGCG_ENABLE_31_CYCLE_DELAY =14,
35 DF_V1_7_MGCG_ENABLE_63_CYCLE_DELAY =15
36};
36 37
37 if (hw_id == THM_HWID) 38extern const struct amdgpu_df_funcs df_v1_7_funcs;
38 reg = THM_BASE.instance[inst].segment[segment] + offset;
39 else if (hw_id == NBIF_HWID)
40 reg = NBIF_BASE.instance[inst].segment[segment] + offset;
41 else if (hw_id == MP1_HWID)
42 reg = MP1_BASE.instance[inst].segment[segment] + offset;
43 else if (hw_id == DF_HWID)
44 reg = DF_BASE.instance[inst].segment[segment] + offset;
45 else if (hw_id == GC_HWID)
46 reg = GC_BASE.instance[inst].segment[segment] + offset;
47 else if (hw_id == SMUIO_HWID)
48 reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
49 return reg;
50}
51 39
52#endif 40#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e14263fca1c9..818874b13c99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
125MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); 125MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
126MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); 126MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
127 127
128MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
129MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
130MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
131MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
132MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
133MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
134MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
135MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
136MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
137MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
138MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
139
140MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); 128MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
141MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin"); 129MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
142MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); 130MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
@@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
149MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin"); 137MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
150MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); 138MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
151 139
140MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
141MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
142MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
143MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
144MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
145MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
146MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
147MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
148MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
149MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
150MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
151
152MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); 152MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
153MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin"); 153MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
154MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); 154MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
@@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
161MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin"); 161MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
162MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); 162MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
163 163
164MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
165MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
166MODULE_FIRMWARE("amdgpu/vegam_me.bin");
167MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
168MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
169MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
170
164static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = 171static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
165{ 172{
166 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 173 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -292,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] =
292 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 299 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
293}; 300};
294 301
302static const u32 golden_settings_vegam_a11[] =
303{
304 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
305 mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
306 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
307 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
308 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
309 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
310 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
311 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
312 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
313 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
314 mmSQ_CONFIG, 0x07f80000, 0x01180000,
315 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
316 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
317 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
318 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
319 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
320 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
321};
322
323static const u32 vegam_golden_common_all[] =
324{
325 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
326 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
327 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
328 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
329 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
330 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
331};
332
295static const u32 golden_settings_polaris11_a11[] = 333static const u32 golden_settings_polaris11_a11[] =
296{ 334{
297 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, 335 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
@@ -712,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
712 tonga_golden_common_all, 750 tonga_golden_common_all,
713 ARRAY_SIZE(tonga_golden_common_all)); 751 ARRAY_SIZE(tonga_golden_common_all));
714 break; 752 break;
753 case CHIP_VEGAM:
754 amdgpu_device_program_register_sequence(adev,
755 golden_settings_vegam_a11,
756 ARRAY_SIZE(golden_settings_vegam_a11));
757 amdgpu_device_program_register_sequence(adev,
758 vegam_golden_common_all,
759 ARRAY_SIZE(vegam_golden_common_all));
760 break;
715 case CHIP_POLARIS11: 761 case CHIP_POLARIS11:
716 case CHIP_POLARIS12: 762 case CHIP_POLARIS12:
717 amdgpu_device_program_register_sequence(adev, 763 amdgpu_device_program_register_sequence(adev,
@@ -918,17 +964,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
918 case CHIP_FIJI: 964 case CHIP_FIJI:
919 chip_name = "fiji"; 965 chip_name = "fiji";
920 break; 966 break;
921 case CHIP_POLARIS11: 967 case CHIP_STONEY:
922 chip_name = "polaris11"; 968 chip_name = "stoney";
923 break; 969 break;
924 case CHIP_POLARIS10: 970 case CHIP_POLARIS10:
925 chip_name = "polaris10"; 971 chip_name = "polaris10";
926 break; 972 break;
973 case CHIP_POLARIS11:
974 chip_name = "polaris11";
975 break;
927 case CHIP_POLARIS12: 976 case CHIP_POLARIS12:
928 chip_name = "polaris12"; 977 chip_name = "polaris12";
929 break; 978 break;
930 case CHIP_STONEY: 979 case CHIP_VEGAM:
931 chip_name = "stoney"; 980 chip_name = "vegam";
932 break; 981 break;
933 default: 982 default:
934 BUG(); 983 BUG();
@@ -1770,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1770 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN; 1819 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1771 break; 1820 break;
1772 case CHIP_POLARIS10: 1821 case CHIP_POLARIS10:
1822 case CHIP_VEGAM:
1773 ret = amdgpu_atombios_get_gfx_info(adev); 1823 ret = amdgpu_atombios_get_gfx_info(adev);
1774 if (ret) 1824 if (ret)
1775 return ret; 1825 return ret;
@@ -1957,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle)
1957 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2007 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1958 2008
1959 switch (adev->asic_type) { 2009 switch (adev->asic_type) {
1960 case CHIP_FIJI:
1961 case CHIP_TONGA: 2010 case CHIP_TONGA:
2011 case CHIP_CARRIZO:
2012 case CHIP_FIJI:
2013 case CHIP_POLARIS10:
1962 case CHIP_POLARIS11: 2014 case CHIP_POLARIS11:
1963 case CHIP_POLARIS12: 2015 case CHIP_POLARIS12:
1964 case CHIP_POLARIS10: 2016 case CHIP_VEGAM:
1965 case CHIP_CARRIZO:
1966 adev->gfx.mec.num_mec = 2; 2017 adev->gfx.mec.num_mec = 2;
1967 break; 2018 break;
1968 case CHIP_TOPAZ: 2019 case CHIP_TOPAZ:
@@ -2323,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2323 2374
2324 break; 2375 break;
2325 case CHIP_FIJI: 2376 case CHIP_FIJI:
2377 case CHIP_VEGAM:
2326 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2378 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2327 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2379 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2328 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2380 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3504,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3504{ 3556{
3505 switch (adev->asic_type) { 3557 switch (adev->asic_type) {
3506 case CHIP_FIJI: 3558 case CHIP_FIJI:
3559 case CHIP_VEGAM:
3507 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | 3560 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3508 RB_XSEL2(1) | PKR_MAP(2) | 3561 RB_XSEL2(1) | PKR_MAP(2) |
3509 PKR_XSEL(1) | PKR_YSEL(1) | 3562 PKR_XSEL(1) | PKR_YSEL(1) |
@@ -4071,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4071 gfx_v8_0_init_power_gating(adev); 4124 gfx_v8_0_init_power_gating(adev);
4072 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); 4125 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4073 } else if ((adev->asic_type == CHIP_POLARIS11) || 4126 } else if ((adev->asic_type == CHIP_POLARIS11) ||
4074 (adev->asic_type == CHIP_POLARIS12)) { 4127 (adev->asic_type == CHIP_POLARIS12) ||
4128 (adev->asic_type == CHIP_VEGAM)) {
4075 gfx_v8_0_init_csb(adev); 4129 gfx_v8_0_init_csb(adev);
4076 gfx_v8_0_init_save_restore_list(adev); 4130 gfx_v8_0_init_save_restore_list(adev);
4077 gfx_v8_0_enable_save_restore_machine(adev); 4131 gfx_v8_0_enable_save_restore_machine(adev);
@@ -4146,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4146 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); 4200 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4147 if (adev->asic_type == CHIP_POLARIS11 || 4201 if (adev->asic_type == CHIP_POLARIS11 ||
4148 adev->asic_type == CHIP_POLARIS10 || 4202 adev->asic_type == CHIP_POLARIS10 ||
4149 adev->asic_type == CHIP_POLARIS12) { 4203 adev->asic_type == CHIP_POLARIS12 ||
4204 adev->asic_type == CHIP_VEGAM) {
4150 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); 4205 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
4151 tmp &= ~0x3; 4206 tmp &= ~0x3;
4152 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); 4207 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -5498,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
5498 bool enable) 5553 bool enable)
5499{ 5554{
5500 if ((adev->asic_type == CHIP_POLARIS11) || 5555 if ((adev->asic_type == CHIP_POLARIS11) ||
5501 (adev->asic_type == CHIP_POLARIS12)) 5556 (adev->asic_type == CHIP_POLARIS12) ||
5557 (adev->asic_type == CHIP_VEGAM))
5502 /* Send msg to SMU via Powerplay */ 5558 /* Send msg to SMU via Powerplay */
5503 amdgpu_device_ip_set_powergating_state(adev, 5559 amdgpu_device_ip_set_powergating_state(adev,
5504 AMD_IP_BLOCK_TYPE_SMC, 5560 AMD_IP_BLOCK_TYPE_SMC,
@@ -5588,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5588 break; 5644 break;
5589 case CHIP_POLARIS11: 5645 case CHIP_POLARIS11:
5590 case CHIP_POLARIS12: 5646 case CHIP_POLARIS12:
5647 case CHIP_VEGAM:
5591 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 5648 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5592 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); 5649 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5593 else 5650 else
@@ -6154,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
6154 case CHIP_POLARIS10: 6211 case CHIP_POLARIS10:
6155 case CHIP_POLARIS11: 6212 case CHIP_POLARIS11:
6156 case CHIP_POLARIS12: 6213 case CHIP_POLARIS12:
6214 case CHIP_VEGAM:
6157 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state); 6215 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6158 break; 6216 break;
6159 default: 6217 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e5962e61beb5..fc1911834ab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,6 @@
41#define GFX9_MEC_HPD_SIZE 2048 41#define GFX9_MEC_HPD_SIZE 2048
42#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 42#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L 43#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44#define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
45 44
46#define mmPWR_MISC_CNTL_STATUS 0x0183 45#define mmPWR_MISC_CNTL_STATUS 0x0183
47#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 46#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
@@ -185,6 +184,30 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) 184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
186}; 185};
187 186
187static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
188{
189 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
190 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
191 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
192 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
193 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
194 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
195 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
196 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
197};
198
199static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
200{
201 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
202 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
203 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
204 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
205 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
206 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
207 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
208 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
209};
210
188#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 211#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
189#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 212#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
190#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 213#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -401,6 +424,27 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
401 kfree(adev->gfx.rlc.register_list_format); 424 kfree(adev->gfx.rlc.register_list_format);
402} 425}
403 426
427static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
428{
429 const struct rlc_firmware_header_v2_1 *rlc_hdr;
430
431 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
432 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
433 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
434 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
435 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
436 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
437 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
438 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
439 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
440 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
441 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
442 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
443 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
444 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
445 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
446}
447
404static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) 448static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
405{ 449{
406 const char *chip_name; 450 const char *chip_name;
@@ -412,6 +456,8 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
412 const struct rlc_firmware_header_v2_0 *rlc_hdr; 456 const struct rlc_firmware_header_v2_0 *rlc_hdr;
413 unsigned int *tmp = NULL; 457 unsigned int *tmp = NULL;
414 unsigned int i = 0; 458 unsigned int i = 0;
459 uint16_t version_major;
460 uint16_t version_minor;
415 461
416 DRM_DEBUG("\n"); 462 DRM_DEBUG("\n");
417 463
@@ -468,6 +514,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
468 goto out; 514 goto out;
469 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 515 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
470 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 516 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
517
518 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
519 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
520 if (version_major == 2 && version_minor == 1)
521 adev->gfx.rlc.is_rlc_v2_1 = true;
522
471 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 523 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
472 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 524 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
473 adev->gfx.rlc.save_and_restore_offset = 525 adev->gfx.rlc.save_and_restore_offset =
@@ -508,6 +560,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
508 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 560 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
509 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 561 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
510 562
563 if (adev->gfx.rlc.is_rlc_v2_1)
564 gfx_v9_0_init_rlc_ext_microcode(adev);
565
511 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 566 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
512 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 567 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
513 if (err) 568 if (err)
@@ -566,6 +621,26 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
566 adev->firmware.fw_size += 621 adev->firmware.fw_size +=
567 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 622 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
568 623
624 if (adev->gfx.rlc.is_rlc_v2_1) {
625 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
626 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
627 info->fw = adev->gfx.rlc_fw;
628 adev->firmware.fw_size +=
629 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
630
631 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
632 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
633 info->fw = adev->gfx.rlc_fw;
634 adev->firmware.fw_size +=
635 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
636
637 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
638 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
639 info->fw = adev->gfx.rlc_fw;
640 adev->firmware.fw_size +=
641 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
642 }
643
569 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 644 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
570 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 645 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
571 info->fw = adev->gfx.mec_fw; 646 info->fw = adev->gfx.mec_fw;
@@ -1600,6 +1675,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1600 1675
1601 gfx_v9_0_setup_rb(adev); 1676 gfx_v9_0_setup_rb(adev);
1602 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); 1677 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1678 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1603 1679
1604 /* XXX SH_MEM regs */ 1680 /* XXX SH_MEM regs */
1605 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1681 /* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -1616,7 +1692,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1616 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1692 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1617 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1693 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1618 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1694 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1619 tmp = adev->gmc.shared_aperture_start >> 48; 1695 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1696 (adev->gmc.private_aperture_start >> 48));
1697 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1698 (adev->gmc.shared_aperture_start >> 48));
1620 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1699 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1621 } 1700 }
1622 } 1701 }
@@ -1708,55 +1787,42 @@ static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1708 adev->gfx.rlc.clear_state_size); 1787 adev->gfx.rlc.clear_state_size);
1709} 1788}
1710 1789
1711static void gfx_v9_0_parse_ind_reg_list(int *register_list_format, 1790static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
1712 int indirect_offset, 1791 int indirect_offset,
1713 int list_size, 1792 int list_size,
1714 int *unique_indirect_regs, 1793 int *unique_indirect_regs,
1715 int *unique_indirect_reg_count, 1794 int *unique_indirect_reg_count,
1716 int max_indirect_reg_count,
1717 int *indirect_start_offsets, 1795 int *indirect_start_offsets,
1718 int *indirect_start_offsets_count, 1796 int *indirect_start_offsets_count)
1719 int max_indirect_start_offsets_count)
1720{ 1797{
1721 int idx; 1798 int idx;
1722 bool new_entry = true;
1723 1799
1724 for (; indirect_offset < list_size; indirect_offset++) { 1800 for (; indirect_offset < list_size; indirect_offset++) {
1801 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1802 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1725 1803
1726 if (new_entry) { 1804 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
1727 new_entry = false; 1805 indirect_offset += 2;
1728 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1729 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1730 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1731 }
1732 1806
1733 if (register_list_format[indirect_offset] == 0xFFFFFFFF) { 1807 /* look for the matching indice */
1734 new_entry = true; 1808 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1735 continue; 1809 if (unique_indirect_regs[idx] ==
1736 } 1810 register_list_format[indirect_offset] ||
1811 !unique_indirect_regs[idx])
1812 break;
1813 }
1737 1814
1738 indirect_offset += 2; 1815 BUG_ON(idx >= *unique_indirect_reg_count);
1739 1816
1740 /* look for the matching indice */ 1817 if (!unique_indirect_regs[idx])
1741 for (idx = 0; idx < *unique_indirect_reg_count; idx++) { 1818 unique_indirect_regs[idx] = register_list_format[indirect_offset];
1742 if (unique_indirect_regs[idx] ==
1743 register_list_format[indirect_offset])
1744 break;
1745 }
1746 1819
1747 if (idx >= *unique_indirect_reg_count) { 1820 indirect_offset++;
1748 unique_indirect_regs[*unique_indirect_reg_count] =
1749 register_list_format[indirect_offset];
1750 idx = *unique_indirect_reg_count;
1751 *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1752 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1753 } 1821 }
1754
1755 register_list_format[indirect_offset] = idx;
1756 } 1822 }
1757} 1823}
1758 1824
1759static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) 1825static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
1760{ 1826{
1761 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 1827 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1762 int unique_indirect_reg_count = 0; 1828 int unique_indirect_reg_count = 0;
@@ -1765,7 +1831,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1765 int indirect_start_offsets_count = 0; 1831 int indirect_start_offsets_count = 0;
1766 1832
1767 int list_size = 0; 1833 int list_size = 0;
1768 int i = 0; 1834 int i = 0, j = 0;
1769 u32 tmp = 0; 1835 u32 tmp = 0;
1770 1836
1771 u32 *register_list_format = 1837 u32 *register_list_format =
@@ -1776,15 +1842,14 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1776 adev->gfx.rlc.reg_list_format_size_bytes); 1842 adev->gfx.rlc.reg_list_format_size_bytes);
1777 1843
1778 /* setup unique_indirect_regs array and indirect_start_offsets array */ 1844 /* setup unique_indirect_regs array and indirect_start_offsets array */
1779 gfx_v9_0_parse_ind_reg_list(register_list_format, 1845 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
1780 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH, 1846 gfx_v9_1_parse_ind_reg_list(register_list_format,
1781 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 1847 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
1782 unique_indirect_regs, 1848 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1783 &unique_indirect_reg_count, 1849 unique_indirect_regs,
1784 ARRAY_SIZE(unique_indirect_regs), 1850 &unique_indirect_reg_count,
1785 indirect_start_offsets, 1851 indirect_start_offsets,
1786 &indirect_start_offsets_count, 1852 &indirect_start_offsets_count);
1787 ARRAY_SIZE(indirect_start_offsets));
1788 1853
1789 /* enable auto inc in case it is disabled */ 1854 /* enable auto inc in case it is disabled */
1790 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 1855 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1798,19 +1863,37 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1798 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), 1863 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1799 adev->gfx.rlc.register_restore[i]); 1864 adev->gfx.rlc.register_restore[i]);
1800 1865
1801 /* load direct register */
1802 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1803 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1804 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1805 adev->gfx.rlc.register_restore[i]);
1806
1807 /* load indirect register */ 1866 /* load indirect register */
1808 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1867 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1809 adev->gfx.rlc.reg_list_format_start); 1868 adev->gfx.rlc.reg_list_format_start);
1810 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) 1869
1870 /* direct register portion */
1871 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
1811 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1872 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1812 register_list_format[i]); 1873 register_list_format[i]);
1813 1874
1875 /* indirect register portion */
1876 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
1877 if (register_list_format[i] == 0xFFFFFFFF) {
1878 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1879 continue;
1880 }
1881
1882 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1883 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1884
1885 for (j = 0; j < unique_indirect_reg_count; j++) {
1886 if (register_list_format[i] == unique_indirect_regs[j]) {
1887 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
1888 break;
1889 }
1890 }
1891
1892 BUG_ON(j >= unique_indirect_reg_count);
1893
1894 i++;
1895 }
1896
1814 /* set save/restore list size */ 1897 /* set save/restore list size */
1815 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; 1898 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1816 list_size = list_size >> 1; 1899 list_size = list_size >> 1;
@@ -1823,14 +1906,19 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1823 adev->gfx.rlc.starting_offsets_start); 1906 adev->gfx.rlc.starting_offsets_start);
1824 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) 1907 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1825 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1908 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1826 indirect_start_offsets[i]); 1909 indirect_start_offsets[i]);
1827 1910
1828 /* load unique indirect regs*/ 1911 /* load unique indirect regs*/
1829 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { 1912 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
1830 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i, 1913 if (unique_indirect_regs[i] != 0) {
1831 unique_indirect_regs[i] & 0x3FFFF); 1914 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
1832 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i, 1915 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
1833 unique_indirect_regs[i] >> 20); 1916 unique_indirect_regs[i] & 0x3FFFF);
1917
1918 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
1919 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
1920 unique_indirect_regs[i] >> 20);
1921 }
1834 } 1922 }
1835 1923
1836 kfree(register_list_format); 1924 kfree(register_list_format);
@@ -2010,6 +2098,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
2010 2098
2011static void gfx_v9_0_init_pg(struct amdgpu_device *adev) 2099static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2012{ 2100{
2101 if (!adev->gfx.rlc.is_rlc_v2_1)
2102 return;
2103
2013 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2104 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2014 AMD_PG_SUPPORT_GFX_SMG | 2105 AMD_PG_SUPPORT_GFX_SMG |
2015 AMD_PG_SUPPORT_GFX_DMG | 2106 AMD_PG_SUPPORT_GFX_DMG |
@@ -2017,27 +2108,12 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2017 AMD_PG_SUPPORT_GDS | 2108 AMD_PG_SUPPORT_GDS |
2018 AMD_PG_SUPPORT_RLC_SMU_HS)) { 2109 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2019 gfx_v9_0_init_csb(adev); 2110 gfx_v9_0_init_csb(adev);
2020 gfx_v9_0_init_rlc_save_restore_list(adev); 2111 gfx_v9_1_init_rlc_save_restore_list(adev);
2021 gfx_v9_0_enable_save_restore_machine(adev); 2112 gfx_v9_0_enable_save_restore_machine(adev);
2022 2113
2023 if (adev->asic_type == CHIP_RAVEN) { 2114 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2024 WREG32(mmRLC_JUMP_TABLE_RESTORE, 2115 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2025 adev->gfx.rlc.cp_table_gpu_addr >> 8); 2116 gfx_v9_0_init_gfx_power_gating(adev);
2026 gfx_v9_0_init_gfx_power_gating(adev);
2027
2028 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
2029 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
2030 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
2031 } else {
2032 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
2033 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
2034 }
2035
2036 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
2037 gfx_v9_0_enable_cp_power_gating(adev, true);
2038 else
2039 gfx_v9_0_enable_cp_power_gating(adev, false);
2040 }
2041 } 2117 }
2042} 2118}
2043 2119
@@ -3061,6 +3137,9 @@ static int gfx_v9_0_hw_fini(void *handle)
3061 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3062 int i; 3138 int i;
3063 3139
3140 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
3141 AMD_PG_STATE_UNGATE);
3142
3064 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3143 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3065 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3144 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3066 3145
@@ -3279,6 +3358,11 @@ static int gfx_v9_0_late_init(void *handle)
3279 if (r) 3358 if (r)
3280 return r; 3359 return r;
3281 3360
3361 r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
3362 AMD_PG_STATE_GATE);
3363 if (r)
3364 return r;
3365
3282 return 0; 3366 return 0;
3283} 3367}
3284 3368
@@ -3339,8 +3423,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3339static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, 3423static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3340 bool enable) 3424 bool enable)
3341{ 3425{
3342 /* TODO: double check if we need to perform under safe mdoe */ 3426 gfx_v9_0_enter_rlc_safe_mode(adev);
3343 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3344 3427
3345 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { 3428 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3346 gfx_v9_0_enable_gfx_cg_power_gating(adev, true); 3429 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3351,7 +3434,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3351 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); 3434 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3352 } 3435 }
3353 3436
3354 /* gfx_v9_0_exit_rlc_safe_mode(adev); */ 3437 gfx_v9_0_exit_rlc_safe_mode(adev);
3355} 3438}
3356 3439
3357static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, 3440static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3742,7 +3825,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3742 } 3825 }
3743 3826
3744 amdgpu_ring_write(ring, header); 3827 amdgpu_ring_write(ring, header);
3745BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3828 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3746 amdgpu_ring_write(ring, 3829 amdgpu_ring_write(ring,
3747#ifdef __BIG_ENDIAN 3830#ifdef __BIG_ENDIAN
3748 (2 << 0) | 3831 (2 << 0) |
@@ -3774,13 +3857,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3774{ 3857{
3775 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3858 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3776 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3859 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3860 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
3777 3861
3778 /* RELEASE_MEM - flush caches, send int */ 3862 /* RELEASE_MEM - flush caches, send int */
3779 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 3863 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3780 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 3864 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
3781 EOP_TC_ACTION_EN | 3865 EOP_TC_NC_ACTION_EN) :
3782 EOP_TC_WB_ACTION_EN | 3866 (EOP_TCL1_ACTION_EN |
3783 EOP_TC_MD_ACTION_EN | 3867 EOP_TC_ACTION_EN |
3868 EOP_TC_WB_ACTION_EN |
3869 EOP_TC_MD_ACTION_EN)) |
3784 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3870 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3785 EVENT_INDEX(5))); 3871 EVENT_INDEX(5)));
3786 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3872 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4137,6 +4223,20 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4137 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4223 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4138} 4224}
4139 4225
4226static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4227 uint32_t reg0, uint32_t reg1,
4228 uint32_t ref, uint32_t mask)
4229{
4230 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4231
4232 if (amdgpu_sriov_vf(ring->adev))
4233 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4234 ref, mask, 0x20);
4235 else
4236 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4237 ref, mask);
4238}
4239
4140static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4240static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4141 enum amdgpu_interrupt_state state) 4241 enum amdgpu_interrupt_state state)
4142{ 4242{
@@ -4458,6 +4558,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4458 .emit_tmz = gfx_v9_0_ring_emit_tmz, 4558 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4459 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4559 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4460 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4560 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4561 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4461}; 4562};
4462 4563
4463static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 4564static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4492,6 +4593,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4492 .set_priority = gfx_v9_0_ring_set_priority_compute, 4593 .set_priority = gfx_v9_0_ring_set_priority_compute,
4493 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4594 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4494 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4595 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4596 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4495}; 4597};
4496 4598
4497static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 4599static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4522,6 +4624,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4522 .emit_rreg = gfx_v9_0_ring_emit_rreg, 4624 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4523 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4625 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4524 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4626 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4627 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4525}; 4628};
4526 4629
4527static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) 4630static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5617cf62c566..79f9ac29019b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -819,12 +819,33 @@ static int gmc_v6_0_late_init(void *handle)
819{ 819{
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821 821
822 amdgpu_bo_late_init(adev);
823
822 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 824 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
823 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 825 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
824 else 826 else
825 return 0; 827 return 0;
826} 828}
827 829
830static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
831{
832 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
833 unsigned size;
834
835 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
836 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
837 } else {
838 u32 viewport = RREG32(mmVIEWPORT_SIZE);
839 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
840 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
841 4);
842 }
843 /* return 0 if the pre-OS buffer uses up most of vram */
844 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
845 return 0;
846 return size;
847}
848
828static int gmc_v6_0_sw_init(void *handle) 849static int gmc_v6_0_sw_init(void *handle)
829{ 850{
830 int r; 851 int r;
@@ -851,8 +872,6 @@ static int gmc_v6_0_sw_init(void *handle)
851 872
852 adev->gmc.mc_mask = 0xffffffffffULL; 873 adev->gmc.mc_mask = 0xffffffffffULL;
853 874
854 adev->gmc.stolen_size = 256 * 1024;
855
856 adev->need_dma32 = false; 875 adev->need_dma32 = false;
857 dma_bits = adev->need_dma32 ? 32 : 40; 876 dma_bits = adev->need_dma32 ? 32 : 40;
858 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); 877 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
@@ -878,6 +897,8 @@ static int gmc_v6_0_sw_init(void *handle)
878 if (r) 897 if (r)
879 return r; 898 return r;
880 899
900 adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
901
881 r = amdgpu_bo_init(adev); 902 r = amdgpu_bo_init(adev);
882 if (r) 903 if (r)
883 return r; 904 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 80054f36e487..7147bfe25a23 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -958,12 +958,33 @@ static int gmc_v7_0_late_init(void *handle)
958{ 958{
959 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 959 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
960 960
961 amdgpu_bo_late_init(adev);
962
961 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 963 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
962 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 964 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
963 else 965 else
964 return 0; 966 return 0;
965} 967}
966 968
969static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
970{
971 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
972 unsigned size;
973
974 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
975 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
976 } else {
977 u32 viewport = RREG32(mmVIEWPORT_SIZE);
978 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
979 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
980 4);
981 }
982 /* return 0 if the pre-OS buffer uses up most of vram */
983 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
984 return 0;
985 return size;
986}
987
967static int gmc_v7_0_sw_init(void *handle) 988static int gmc_v7_0_sw_init(void *handle)
968{ 989{
969 int r; 990 int r;
@@ -998,8 +1019,6 @@ static int gmc_v7_0_sw_init(void *handle)
998 */ 1019 */
999 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1020 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1000 1021
1001 adev->gmc.stolen_size = 256 * 1024;
1002
1003 /* set DMA mask + need_dma32 flags. 1022 /* set DMA mask + need_dma32 flags.
1004 * PCIE - can handle 40-bits. 1023 * PCIE - can handle 40-bits.
1005 * IGP - can handle 40-bits 1024 * IGP - can handle 40-bits
@@ -1030,6 +1049,8 @@ static int gmc_v7_0_sw_init(void *handle)
1030 if (r) 1049 if (r)
1031 return r; 1050 return r;
1032 1051
1052 adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
1053
1033 /* Memory manager */ 1054 /* Memory manager */
1034 r = amdgpu_bo_init(adev); 1055 r = amdgpu_bo_init(adev);
1035 if (r) 1056 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d71d4cb68f9c..1edbe6b477b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
138 break; 138 break;
139 case CHIP_POLARIS11: 139 case CHIP_POLARIS11:
140 case CHIP_POLARIS12: 140 case CHIP_POLARIS12:
141 case CHIP_VEGAM:
141 amdgpu_device_program_register_sequence(adev, 142 amdgpu_device_program_register_sequence(adev,
142 golden_settings_polaris11_a11, 143 golden_settings_polaris11_a11,
143 ARRAY_SIZE(golden_settings_polaris11_a11)); 144 ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -231,6 +232,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
231 case CHIP_FIJI: 232 case CHIP_FIJI:
232 case CHIP_CARRIZO: 233 case CHIP_CARRIZO:
233 case CHIP_STONEY: 234 case CHIP_STONEY:
235 case CHIP_VEGAM:
234 return 0; 236 return 0;
235 default: BUG(); 237 default: BUG();
236 } 238 }
@@ -567,9 +569,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
567 /* set the gart size */ 569 /* set the gart size */
568 if (amdgpu_gart_size == -1) { 570 if (amdgpu_gart_size == -1) {
569 switch (adev->asic_type) { 571 switch (adev->asic_type) {
570 case CHIP_POLARIS11: /* all engines support GPUVM */
571 case CHIP_POLARIS10: /* all engines support GPUVM */ 572 case CHIP_POLARIS10: /* all engines support GPUVM */
573 case CHIP_POLARIS11: /* all engines support GPUVM */
572 case CHIP_POLARIS12: /* all engines support GPUVM */ 574 case CHIP_POLARIS12: /* all engines support GPUVM */
575 case CHIP_VEGAM: /* all engines support GPUVM */
573 default: 576 default:
574 adev->gmc.gart_size = 256ULL << 20; 577 adev->gmc.gart_size = 256ULL << 20;
575 break; 578 break;
@@ -1049,12 +1052,33 @@ static int gmc_v8_0_late_init(void *handle)
1049{ 1052{
1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1053 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1051 1054
1055 amdgpu_bo_late_init(adev);
1056
1052 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 1057 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1053 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 1058 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1054 else 1059 else
1055 return 0; 1060 return 0;
1056} 1061}
1057 1062
1063static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1064{
1065 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1066 unsigned size;
1067
1068 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1069 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1070 } else {
1071 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1072 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1073 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1074 4);
1075 }
1076 /* return 0 if the pre-OS buffer uses up most of vram */
1077 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1078 return 0;
1079 return size;
1080}
1081
1058#define mmMC_SEQ_MISC0_FIJI 0xA71 1082#define mmMC_SEQ_MISC0_FIJI 0xA71
1059 1083
1060static int gmc_v8_0_sw_init(void *handle) 1084static int gmc_v8_0_sw_init(void *handle)
@@ -1068,7 +1092,8 @@ static int gmc_v8_0_sw_init(void *handle)
1068 } else { 1092 } else {
1069 u32 tmp; 1093 u32 tmp;
1070 1094
1071 if (adev->asic_type == CHIP_FIJI) 1095 if ((adev->asic_type == CHIP_FIJI) ||
1096 (adev->asic_type == CHIP_VEGAM))
1072 tmp = RREG32(mmMC_SEQ_MISC0_FIJI); 1097 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1073 else 1098 else
1074 tmp = RREG32(mmMC_SEQ_MISC0); 1099 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -1096,8 +1121,6 @@ static int gmc_v8_0_sw_init(void *handle)
1096 */ 1121 */
1097 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1122 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1098 1123
1099 adev->gmc.stolen_size = 256 * 1024;
1100
1101 /* set DMA mask + need_dma32 flags. 1124 /* set DMA mask + need_dma32 flags.
1102 * PCIE - can handle 40-bits. 1125 * PCIE - can handle 40-bits.
1103 * IGP - can handle 40-bits 1126 * IGP - can handle 40-bits
@@ -1128,6 +1151,8 @@ static int gmc_v8_0_sw_init(void *handle)
1128 if (r) 1151 if (r)
1129 return r; 1152 return r;
1130 1153
1154 adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
1155
1131 /* Memory manager */ 1156 /* Memory manager */
1132 r = amdgpu_bo_init(adev); 1157 r = amdgpu_bo_init(adev);
1133 if (r) 1158 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e687363900bb..6cccf0e0acd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,19 +43,13 @@
43#include "gfxhub_v1_0.h" 43#include "gfxhub_v1_0.h"
44#include "mmhub_v1_0.h" 44#include "mmhub_v1_0.h"
45 45
46#define mmDF_CS_AON0_DramBaseAddress0 0x0044 46/* add these here since we already include dce12 headers and these are for DCN */
47#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 47#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
48//DF_CS_AON0_DramBaseAddress0 48#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
49#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 49#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
50#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 50#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
51#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 51#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
52#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 52#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
53#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
54#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
55#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
56#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
57#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
58#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
59 53
60/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ 54/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
61#define AMDGPU_NUM_OF_VMIDS 8 55#define AMDGPU_NUM_OF_VMIDS 8
@@ -385,11 +379,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
385 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 379 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
386 upper_32_bits(pd_addr)); 380 upper_32_bits(pd_addr));
387 381
388 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); 382 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
389 383 hub->vm_inv_eng0_ack + eng,
390 /* wait for the invalidate to complete */ 384 req, 1 << vmid);
391 amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
392 1 << vmid, 1 << vmid);
393 385
394 return pd_addr; 386 return pd_addr;
395} 387}
@@ -556,8 +548,7 @@ static int gmc_v9_0_early_init(void *handle)
556 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 548 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
557 adev->gmc.shared_aperture_end = 549 adev->gmc.shared_aperture_end =
558 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 550 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
559 adev->gmc.private_aperture_start = 551 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
560 adev->gmc.shared_aperture_end + 1;
561 adev->gmc.private_aperture_end = 552 adev->gmc.private_aperture_end =
562 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 553 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
563 554
@@ -659,6 +650,11 @@ static int gmc_v9_0_late_init(void *handle)
659 unsigned i; 650 unsigned i;
660 int r; 651 int r;
661 652
653 /*
654 * TODO - Uncomment once GART corruption issue is fixed.
655 */
656 /* amdgpu_bo_late_init(adev); */
657
662 for(i = 0; i < adev->num_rings; ++i) { 658 for(i = 0; i < adev->num_rings; ++i) {
663 struct amdgpu_ring *ring = adev->rings[i]; 659 struct amdgpu_ring *ring = adev->rings[i];
664 unsigned vmhub = ring->funcs->vmhub; 660 unsigned vmhub = ring->funcs->vmhub;
@@ -714,7 +710,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
714 */ 710 */
715static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 711static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
716{ 712{
717 u32 tmp;
718 int chansize, numchan; 713 int chansize, numchan;
719 int r; 714 int r;
720 715
@@ -727,39 +722,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
727 else 722 else
728 chansize = 128; 723 chansize = 128;
729 724
730 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); 725 numchan = adev->df_funcs->get_hbm_channel_number(adev);
731 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
732 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
733 switch (tmp) {
734 case 0:
735 default:
736 numchan = 1;
737 break;
738 case 1:
739 numchan = 2;
740 break;
741 case 2:
742 numchan = 0;
743 break;
744 case 3:
745 numchan = 4;
746 break;
747 case 4:
748 numchan = 0;
749 break;
750 case 5:
751 numchan = 8;
752 break;
753 case 6:
754 numchan = 0;
755 break;
756 case 7:
757 numchan = 16;
758 break;
759 case 8:
760 numchan = 2;
761 break;
762 }
763 adev->gmc.vram_width = numchan * chansize; 726 adev->gmc.vram_width = numchan * chansize;
764 } 727 }
765 728
@@ -826,6 +789,52 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
826 return amdgpu_gart_table_vram_alloc(adev); 789 return amdgpu_gart_table_vram_alloc(adev);
827} 790}
828 791
792static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
793{
794#if 0
795 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
796#endif
797 unsigned size;
798
799 /*
800 * TODO Remove once GART corruption is resolved
801 * Check related code in gmc_v9_0_sw_fini
802 * */
803 size = 9 * 1024 * 1024;
804
805#if 0
806 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
807 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
808 } else {
809 u32 viewport;
810
811 switch (adev->asic_type) {
812 case CHIP_RAVEN:
813 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
814 size = (REG_GET_FIELD(viewport,
815 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
816 REG_GET_FIELD(viewport,
817 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
818 4);
819 break;
820 case CHIP_VEGA10:
821 case CHIP_VEGA12:
822 default:
823 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
824 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
825 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
826 4);
827 break;
828 }
829 }
830 /* return 0 if the pre-OS buffer uses up most of vram */
831 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
832 return 0;
833
834#endif
835 return size;
836}
837
829static int gmc_v9_0_sw_init(void *handle) 838static int gmc_v9_0_sw_init(void *handle)
830{ 839{
831 int r; 840 int r;
@@ -877,12 +886,6 @@ static int gmc_v9_0_sw_init(void *handle)
877 */ 886 */
878 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 887 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
879 888
880 /*
881 * It needs to reserve 8M stolen memory for vega10
882 * TODO: Figure out how to avoid that...
883 */
884 adev->gmc.stolen_size = 8 * 1024 * 1024;
885
886 /* set DMA mask + need_dma32 flags. 889 /* set DMA mask + need_dma32 flags.
887 * PCIE - can handle 44-bits. 890 * PCIE - can handle 44-bits.
888 * IGP - can handle 44-bits 891 * IGP - can handle 44-bits
@@ -907,6 +910,8 @@ static int gmc_v9_0_sw_init(void *handle)
907 if (r) 910 if (r)
908 return r; 911 return r;
909 912
913 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
914
910 /* Memory manager */ 915 /* Memory manager */
911 r = amdgpu_bo_init(adev); 916 r = amdgpu_bo_init(adev);
912 if (r) 917 if (r)
@@ -950,6 +955,18 @@ static int gmc_v9_0_sw_fini(void *handle)
950 amdgpu_gem_force_release(adev); 955 amdgpu_gem_force_release(adev);
951 amdgpu_vm_manager_fini(adev); 956 amdgpu_vm_manager_fini(adev);
952 gmc_v9_0_gart_fini(adev); 957 gmc_v9_0_gart_fini(adev);
958
959 /*
960 * TODO:
961 * Currently there is a bug where some memory client outside
962 * of the driver writes to first 8M of VRAM on S3 resume,
963 * this overrides GART which by default gets placed in first 8M and
964 * causes VM_FAULTS once GTT is accessed.
965 * Keep the stolen memory reservation until the while this is not solved.
966 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
967 */
968 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
969
953 amdgpu_bo_fini(adev); 970 amdgpu_bo_fini(adev);
954 971
955 return 0; 972 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 26ba984ab2b7..17f7f074cedc 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
2817 pi->caps_tcp_ramping = true; 2817 pi->caps_tcp_ramping = true;
2818 } 2818 }
2819 2819
2820 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) 2820 if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
2821 pi->caps_sclk_ds = true; 2821 pi->caps_sclk_ds = true;
2822 else 2822 else
2823 pi->caps_sclk_ds = false; 2823 pi->caps_sclk_ds = false;
@@ -2974,7 +2974,7 @@ static int kv_dpm_late_init(void *handle)
2974 /* powerdown unused blocks for now */ 2974 /* powerdown unused blocks for now */
2975 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2975 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2976 2976
2977 if (!amdgpu_dpm) 2977 if (!adev->pm.dpm_enabled)
2978 return 0; 2978 return 0;
2979 2979
2980 kv_dpm_powergate_acp(adev, true); 2980 kv_dpm_powergate_acp(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 493348672475..078f70faedcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
260 } while (timeout > 1); 260 } while (timeout > 1);
261 261
262flr_done: 262flr_done:
263 if (locked) 263 if (locked) {
264 adev->in_gpu_reset = 0;
264 mutex_unlock(&adev->lock_reset); 265 mutex_unlock(&adev->lock_reset);
266 }
265 267
266 /* Trigger recovery for world switch failure if no TDR */ 268 /* Trigger recovery for world switch failure if no TDR */
267 if (amdgpu_lockup_timeout == 0) 269 if (amdgpu_lockup_timeout == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 8da6da90b1c9..0cf48d26c676 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -40,11 +40,20 @@ enum psp_gfx_crtl_cmd_id
40 GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000, /* initialize GPCOM ring */ 40 GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000, /* initialize GPCOM ring */
41 GFX_CTRL_CMD_ID_DESTROY_RINGS = 0x00030000, /* destroy rings */ 41 GFX_CTRL_CMD_ID_DESTROY_RINGS = 0x00030000, /* destroy rings */
42 GFX_CTRL_CMD_ID_CAN_INIT_RINGS = 0x00040000, /* is it allowed to initialized the rings */ 42 GFX_CTRL_CMD_ID_CAN_INIT_RINGS = 0x00040000, /* is it allowed to initialized the rings */
43 GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
44 GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
45 GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
43 46
44 GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */ 47 GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
45}; 48};
46 49
47 50
51/*-----------------------------------------------------------------------------
52 NOTE: All physical addresses used in this interface are actually
53 GPU Virtual Addresses.
54*/
55
56
48/* Control registers of the TEE Gfx interface. These are located in 57/* Control registers of the TEE Gfx interface. These are located in
49* SRBM-to-PSP mailbox registers (total 8 registers). 58* SRBM-to-PSP mailbox registers (total 8 registers).
50*/ 59*/
@@ -55,8 +64,8 @@ struct psp_gfx_ctrl
55 volatile uint32_t rbi_rptr; /* +8 Read pointer (index) of RBI ring */ 64 volatile uint32_t rbi_rptr; /* +8 Read pointer (index) of RBI ring */
56 volatile uint32_t gpcom_wptr; /* +12 Write pointer (index) of GPCOM ring */ 65 volatile uint32_t gpcom_wptr; /* +12 Write pointer (index) of GPCOM ring */
57 volatile uint32_t gpcom_rptr; /* +16 Read pointer (index) of GPCOM ring */ 66 volatile uint32_t gpcom_rptr; /* +16 Read pointer (index) of GPCOM ring */
58 volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of physical address of ring buffer */ 67 volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of GPU Virtual of ring buffer (VMID=0)*/
59 volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of physical address of ring buffer */ 68 volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of GPU Virtual of ring buffer (VMID=0) */
60 volatile uint32_t ring_buf_size; /* +28 Ring buffer size (in bytes) */ 69 volatile uint32_t ring_buf_size; /* +28 Ring buffer size (in bytes) */
61 70
62}; 71};
@@ -78,6 +87,8 @@ enum psp_gfx_cmd_id
78 GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */ 87 GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */
79 GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */ 88 GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */
80 GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */ 89 GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
90 GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
91 GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
81 92
82}; 93};
83 94
@@ -85,11 +96,11 @@ enum psp_gfx_cmd_id
85/* Command to load Trusted Application binary into PSP OS. */ 96/* Command to load Trusted Application binary into PSP OS. */
86struct psp_gfx_cmd_load_ta 97struct psp_gfx_cmd_load_ta
87{ 98{
88 uint32_t app_phy_addr_lo; /* bits [31:0] of the physical address of the TA binary (must be 4 KB aligned) */ 99 uint32_t app_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of the TA binary (must be 4 KB aligned) */
89 uint32_t app_phy_addr_hi; /* bits [63:32] of the physical address of the TA binary */ 100 uint32_t app_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of the TA binary */
90 uint32_t app_len; /* length of the TA binary in bytes */ 101 uint32_t app_len; /* length of the TA binary in bytes */
91 uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the physical address of CMD buffer (must be 4 KB aligned) */ 102 uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of CMD buffer (must be 4 KB aligned) */
92 uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the physical address of CMD buffer */ 103 uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of CMD buffer */
93 uint32_t cmd_buf_len; /* length of the CMD buffer in bytes; must be multiple of 4 KB */ 104 uint32_t cmd_buf_len; /* length of the CMD buffer in bytes; must be multiple of 4 KB */
94 105
95 /* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided 106 /* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided
@@ -111,8 +122,8 @@ struct psp_gfx_cmd_unload_ta
111*/ 122*/
112struct psp_gfx_buf_desc 123struct psp_gfx_buf_desc
113{ 124{
114 uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of the buffer (must be 4 KB aligned) */ 125 uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of the buffer (must be 4 KB aligned) */
115 uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of the buffer */ 126 uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of the buffer */
116 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */ 127 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */
117 128
118}; 129};
@@ -145,8 +156,8 @@ struct psp_gfx_cmd_invoke_cmd
145/* Command to setup TMR region. */ 156/* Command to setup TMR region. */
146struct psp_gfx_cmd_setup_tmr 157struct psp_gfx_cmd_setup_tmr
147{ 158{
148 uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of TMR buffer (must be 4 KB aligned) */ 159 uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
149 uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of TMR buffer */ 160 uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
150 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */ 161 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
151 162
152}; 163};
@@ -174,18 +185,32 @@ enum psp_gfx_fw_type
174 GFX_FW_TYPE_ISP = 16, 185 GFX_FW_TYPE_ISP = 16,
175 GFX_FW_TYPE_ACP = 17, 186 GFX_FW_TYPE_ACP = 17,
176 GFX_FW_TYPE_SMU = 18, 187 GFX_FW_TYPE_SMU = 18,
188 GFX_FW_TYPE_MMSCH = 19,
189 GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
190 GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
191 GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22,
192 GFX_FW_TYPE_MAX = 23
177}; 193};
178 194
179/* Command to load HW IP FW. */ 195/* Command to load HW IP FW. */
180struct psp_gfx_cmd_load_ip_fw 196struct psp_gfx_cmd_load_ip_fw
181{ 197{
182 uint32_t fw_phy_addr_lo; /* bits [31:0] of physical address of FW location (must be 4 KB aligned) */ 198 uint32_t fw_phy_addr_lo; /* bits [31:0] of GPU Virtual address of FW location (must be 4 KB aligned) */
183 uint32_t fw_phy_addr_hi; /* bits [63:32] of physical address of FW location */ 199 uint32_t fw_phy_addr_hi; /* bits [63:32] of GPU Virtual address of FW location */
184 uint32_t fw_size; /* FW buffer size in bytes */ 200 uint32_t fw_size; /* FW buffer size in bytes */
185 enum psp_gfx_fw_type fw_type; /* FW type */ 201 enum psp_gfx_fw_type fw_type; /* FW type */
186 202
187}; 203};
188 204
205/* Command to save/restore HW IP FW. */
206struct psp_gfx_cmd_save_restore_ip_fw
207{
208 uint32_t save_fw; /* if set, command is used for saving fw otherwise for resetoring*/
209 uint32_t save_restore_addr_lo; /* bits [31:0] of FB address of GART memory used as save/restore buffer (must be 4 KB aligned) */
210 uint32_t save_restore_addr_hi; /* bits [63:32] of FB address of GART memory used as save/restore buffer */
211 uint32_t buf_size; /* Size of the save/restore buffer in bytes */
212 enum psp_gfx_fw_type fw_type; /* FW type */
213};
189 214
190/* All GFX ring buffer commands. */ 215/* All GFX ring buffer commands. */
191union psp_gfx_commands 216union psp_gfx_commands
@@ -195,7 +220,7 @@ union psp_gfx_commands
195 struct psp_gfx_cmd_invoke_cmd cmd_invoke_cmd; 220 struct psp_gfx_cmd_invoke_cmd cmd_invoke_cmd;
196 struct psp_gfx_cmd_setup_tmr cmd_setup_tmr; 221 struct psp_gfx_cmd_setup_tmr cmd_setup_tmr;
197 struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw; 222 struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw;
198 223 struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw;
199}; 224};
200 225
201 226
@@ -226,8 +251,8 @@ struct psp_gfx_cmd_resp
226 251
227 /* These fields are used for RBI only. They are all 0 in GPCOM commands 252 /* These fields are used for RBI only. They are all 0 in GPCOM commands
228 */ 253 */
229 uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of physical address of response buffer (must be 4 KB aligned) */ 254 uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of GPU Virtual address of response buffer (must be 4 KB aligned) */
230 uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of physical address of response buffer */ 255 uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of GPU Virtual address of response buffer */
231 uint32_t resp_offset; /* +20 offset within response buffer */ 256 uint32_t resp_offset; /* +20 offset within response buffer */
232 uint32_t resp_buf_size; /* +24 total size of the response buffer in bytes */ 257 uint32_t resp_buf_size; /* +24 total size of the response buffer in bytes */
233 258
@@ -251,19 +276,19 @@ struct psp_gfx_cmd_resp
251/* Structure of the Ring Buffer Frame */ 276/* Structure of the Ring Buffer Frame */
252struct psp_gfx_rb_frame 277struct psp_gfx_rb_frame
253{ 278{
254 uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of physical address of command buffer (must be 4 KB aligned) */ 279 uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of GPU Virtual address of command buffer (must be 4 KB aligned) */
255 uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of physical address of command buffer */ 280 uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of GPU Virtual address of command buffer */
256 uint32_t cmd_buf_size; /* +8 command buffer size in bytes */ 281 uint32_t cmd_buf_size; /* +8 command buffer size in bytes */
257 uint32_t fence_addr_lo; /* +12 bits [31:0] of physical address of Fence for this frame */ 282 uint32_t fence_addr_lo; /* +12 bits [31:0] of GPU Virtual address of Fence for this frame */
258 uint32_t fence_addr_hi; /* +16 bits [63:32] of physical address of Fence for this frame */ 283 uint32_t fence_addr_hi; /* +16 bits [63:32] of GPU Virtual address of Fence for this frame */
259 uint32_t fence_value; /* +20 Fence value */ 284 uint32_t fence_value; /* +20 Fence value */
260 uint32_t sid_lo; /* +24 bits [31:0] of SID value (used only for RBI frames) */ 285 uint32_t sid_lo; /* +24 bits [31:0] of SID value (used only for RBI frames) */
261 uint32_t sid_hi; /* +28 bits [63:32] of SID value (used only for RBI frames) */ 286 uint32_t sid_hi; /* +28 bits [63:32] of SID value (used only for RBI frames) */
262 uint8_t vmid; /* +32 VMID value used for mapping of all addresses for this frame */ 287 uint8_t vmid; /* +32 VMID value used for mapping of all addresses for this frame */
263 uint8_t frame_type; /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */ 288 uint8_t frame_type; /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */
264 uint8_t reserved1[2]; /* +34 reserved, must be 0 */ 289 uint8_t reserved1[2]; /* +34 reserved, must be 0 */
265 uint32_t reserved2[7]; /* +40 reserved, must be 0 */ 290 uint32_t reserved2[7]; /* +36 reserved, must be 0 */
266 /* total 64 bytes */ 291 /* total 64 bytes */
267}; 292};
268 293
269#endif /* _PSP_TEE_GFX_IF_H_ */ 294#endif /* _PSP_TEE_GFX_IF_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 8873d833a7f7..0ff136d02d9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -70,6 +70,15 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
70 case AMDGPU_UCODE_ID_RLC_G: 70 case AMDGPU_UCODE_ID_RLC_G:
71 *type = GFX_FW_TYPE_RLC_G; 71 *type = GFX_FW_TYPE_RLC_G;
72 break; 72 break;
73 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
74 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
75 break;
76 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
77 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
78 break;
79 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
80 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
81 break;
73 case AMDGPU_UCODE_ID_SMC: 82 case AMDGPU_UCODE_ID_SMC:
74 *type = GFX_FW_TYPE_SMU; 83 *type = GFX_FW_TYPE_SMU;
75 break; 84 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index be20a387d961..aa9ab299fd32 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); 62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
63MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); 63MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
64MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); 64MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
65MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
66MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
65 67
66 68
67static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 69static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -209,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
209 break; 211 break;
210 case CHIP_POLARIS11: 212 case CHIP_POLARIS11:
211 case CHIP_POLARIS12: 213 case CHIP_POLARIS12:
214 case CHIP_VEGAM:
212 amdgpu_device_program_register_sequence(adev, 215 amdgpu_device_program_register_sequence(adev,
213 golden_settings_polaris11_a11, 216 golden_settings_polaris11_a11,
214 ARRAY_SIZE(golden_settings_polaris11_a11)); 217 ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -275,15 +278,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
275 case CHIP_FIJI: 278 case CHIP_FIJI:
276 chip_name = "fiji"; 279 chip_name = "fiji";
277 break; 280 break;
278 case CHIP_POLARIS11:
279 chip_name = "polaris11";
280 break;
281 case CHIP_POLARIS10: 281 case CHIP_POLARIS10:
282 chip_name = "polaris10"; 282 chip_name = "polaris10";
283 break; 283 break;
284 case CHIP_POLARIS11:
285 chip_name = "polaris11";
286 break;
284 case CHIP_POLARIS12: 287 case CHIP_POLARIS12:
285 chip_name = "polaris12"; 288 chip_name = "polaris12";
286 break; 289 break;
290 case CHIP_VEGAM:
291 chip_name = "vegam";
292 break;
287 case CHIP_CARRIZO: 293 case CHIP_CARRIZO:
288 chip_name = "carrizo"; 294 chip_name = "carrizo";
289 break; 295 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 399f876f9cad..03a36cbe7557 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -360,6 +360,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
360 360
361} 361}
362 362
363static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
364 int mem_space, int hdp,
365 uint32_t addr0, uint32_t addr1,
366 uint32_t ref, uint32_t mask,
367 uint32_t inv)
368{
369 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
370 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
371 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
372 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
373 if (mem_space) {
374 /* memory */
375 amdgpu_ring_write(ring, addr0);
376 amdgpu_ring_write(ring, addr1);
377 } else {
378 /* registers */
379 amdgpu_ring_write(ring, addr0 << 2);
380 amdgpu_ring_write(ring, addr1 << 2);
381 }
382 amdgpu_ring_write(ring, ref); /* reference */
383 amdgpu_ring_write(ring, mask); /* mask */
384 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
385 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
386}
387
363/** 388/**
364 * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 389 * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
365 * 390 *
@@ -378,15 +403,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
378 else 403 else
379 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; 404 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
380 405
381 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 406 sdma_v4_0_wait_reg_mem(ring, 0, 1,
382 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 407 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
383 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 408 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
384 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); 409 ref_and_mask, ref_and_mask, 10);
385 amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
386 amdgpu_ring_write(ring, ref_and_mask); /* reference */
387 amdgpu_ring_write(ring, ref_and_mask); /* mask */
388 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
389 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
390} 410}
391 411
392/** 412/**
@@ -1114,16 +1134,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1114 uint64_t addr = ring->fence_drv.gpu_addr; 1134 uint64_t addr = ring->fence_drv.gpu_addr;
1115 1135
1116 /* wait for idle */ 1136 /* wait for idle */
1117 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1137 sdma_v4_0_wait_reg_mem(ring, 1, 0,
1118 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1138 addr & 0xfffffffc,
1119 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1139 upper_32_bits(addr) & 0xffffffff,
1120 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1140 seq, 0xffffffff, 4);
1121 amdgpu_ring_write(ring, addr & 0xfffffffc);
1122 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1123 amdgpu_ring_write(ring, seq); /* reference */
1124 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1125 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1126 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1127} 1141}
1128 1142
1129 1143
@@ -1154,15 +1168,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1154static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1168static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1155 uint32_t val, uint32_t mask) 1169 uint32_t val, uint32_t mask)
1156{ 1170{
1157 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1171 sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1158 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1159 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1160 amdgpu_ring_write(ring, reg << 2);
1161 amdgpu_ring_write(ring, 0);
1162 amdgpu_ring_write(ring, val); /* reference */
1163 amdgpu_ring_write(ring, mask); /* mask */
1164 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1165 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1166} 1172}
1167 1173
1168static int sdma_v4_0_early_init(void *handle) 1174static int sdma_v4_0_early_init(void *handle)
@@ -1605,6 +1611,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
1605 .pad_ib = sdma_v4_0_ring_pad_ib, 1611 .pad_ib = sdma_v4_0_ring_pad_ib,
1606 .emit_wreg = sdma_v4_0_ring_emit_wreg, 1612 .emit_wreg = sdma_v4_0_ring_emit_wreg,
1607 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, 1613 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
1614 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1608}; 1615};
1609 1616
1610static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) 1617static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index a675ec6d2811..c364ef94cc36 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,12 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
1252 } 1252 }
1253} 1253}
1254 1254
1255static bool si_need_full_reset(struct amdgpu_device *adev)
1256{
1257 /* change this when we support soft reset */
1258 return true;
1259}
1260
1255static int si_get_pcie_lanes(struct amdgpu_device *adev) 1261static int si_get_pcie_lanes(struct amdgpu_device *adev)
1256{ 1262{
1257 u32 link_width_cntl; 1263 u32 link_width_cntl;
@@ -1332,6 +1338,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1332 .get_config_memsize = &si_get_config_memsize, 1338 .get_config_memsize = &si_get_config_memsize,
1333 .flush_hdp = &si_flush_hdp, 1339 .flush_hdp = &si_flush_hdp,
1334 .invalidate_hdp = &si_invalidate_hdp, 1340 .invalidate_hdp = &si_invalidate_hdp,
1341 .need_full_reset = &si_need_full_reset,
1335}; 1342};
1336 1343
1337static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1344static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 797d505bf9ee..b12d7c9d42a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7580,7 +7580,7 @@ static int si_dpm_late_init(void *handle)
7580 int ret; 7580 int ret;
7581 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7582 7582
7583 if (!amdgpu_dpm) 7583 if (!adev->pm.dpm_enabled)
7584 return 0; 7584 return 0;
7585 7585
7586 ret = si_set_temperature_range(adev); 7586 ret = si_set_temperature_range(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 51cf8a30f6c2..90065766fffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -52,6 +52,7 @@
52#include "gmc_v9_0.h" 52#include "gmc_v9_0.h"
53#include "gfxhub_v1_0.h" 53#include "gfxhub_v1_0.h"
54#include "mmhub_v1_0.h" 54#include "mmhub_v1_0.h"
55#include "df_v1_7.h"
55#include "vega10_ih.h" 56#include "vega10_ih.h"
56#include "sdma_v4_0.h" 57#include "sdma_v4_0.h"
57#include "uvd_v7_0.h" 58#include "uvd_v7_0.h"
@@ -60,33 +61,6 @@
60#include "dce_virtual.h" 61#include "dce_virtual.h"
61#include "mxgpu_ai.h" 62#include "mxgpu_ai.h"
62 63
63#define mmFabricConfigAccessControl 0x0410
64#define mmFabricConfigAccessControl_BASE_IDX 0
65#define mmFabricConfigAccessControl_DEFAULT 0x00000000
66//FabricConfigAccessControl
67#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
68#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
69#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
70#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
71#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
72#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
73
74
75#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
76#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
77//DF_PIE_AON0_DfGlobalClkGater
78#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
79#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
80
81enum {
82 DF_MGCG_DISABLE = 0,
83 DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
84 DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
85 DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
86 DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
87 DF_MGCG_ENABLE_63_CYCLE_DELAY =15
88};
89
90#define mmMP0_MISC_CGTT_CTRL0 0x01b9 64#define mmMP0_MISC_CGTT_CTRL0 0x01b9
91#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 65#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
92#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 66#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
@@ -313,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
313 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 287 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
314 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 288 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
315 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 289 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
290 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
316}; 291};
317 292
318static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 293static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -341,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
341 } else { 316 } else {
342 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 317 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
343 return adev->gfx.config.gb_addr_config; 318 return adev->gfx.config.gb_addr_config;
319 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
320 return adev->gfx.config.db_debug2;
344 return RREG32(reg_offset); 321 return RREG32(reg_offset);
345 } 322 }
346} 323}
@@ -521,6 +498,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
521 else 498 else
522 adev->nbio_funcs = &nbio_v6_1_funcs; 499 adev->nbio_funcs = &nbio_v6_1_funcs;
523 500
501 adev->df_funcs = &df_v1_7_funcs;
524 adev->nbio_funcs->detect_hw_virt(adev); 502 adev->nbio_funcs->detect_hw_virt(adev);
525 503
526 if (amdgpu_sriov_vf(adev)) 504 if (amdgpu_sriov_vf(adev))
@@ -593,6 +571,12 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
593 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 571 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
594} 572}
595 573
574static bool soc15_need_full_reset(struct amdgpu_device *adev)
575{
576 /* change this when we implement soft reset */
577 return true;
578}
579
596static const struct amdgpu_asic_funcs soc15_asic_funcs = 580static const struct amdgpu_asic_funcs soc15_asic_funcs =
597{ 581{
598 .read_disabled_bios = &soc15_read_disabled_bios, 582 .read_disabled_bios = &soc15_read_disabled_bios,
@@ -606,6 +590,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
606 .get_config_memsize = &soc15_get_config_memsize, 590 .get_config_memsize = &soc15_get_config_memsize,
607 .flush_hdp = &soc15_flush_hdp, 591 .flush_hdp = &soc15_flush_hdp,
608 .invalidate_hdp = &soc15_invalidate_hdp, 592 .invalidate_hdp = &soc15_invalidate_hdp,
593 .need_full_reset = &soc15_need_full_reset,
609}; 594};
610 595
611static int soc15_common_early_init(void *handle) 596static int soc15_common_early_init(void *handle)
@@ -697,6 +682,11 @@ static int soc15_common_early_init(void *handle)
697 AMD_CG_SUPPORT_SDMA_LS; 682 AMD_CG_SUPPORT_SDMA_LS;
698 adev->pg_flags = AMD_PG_SUPPORT_SDMA; 683 adev->pg_flags = AMD_PG_SUPPORT_SDMA;
699 684
685 if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
686 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
687 AMD_PG_SUPPORT_CP |
688 AMD_PG_SUPPORT_RLC_SMU_HS;
689
700 adev->external_rev_id = 0x1; 690 adev->external_rev_id = 0x1;
701 break; 691 break;
702 default: 692 default:
@@ -871,32 +861,6 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade
871 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); 861 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
872} 862}
873 863
874static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
875 bool enable)
876{
877 uint32_t data;
878
879 /* Put DF on broadcast mode */
880 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
881 data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
882 WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
883
884 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
885 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
886 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
887 data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
888 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
889 } else {
890 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
891 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
892 data |= DF_MGCG_DISABLE;
893 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
894 }
895
896 WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
897 mmFabricConfigAccessControl_DEFAULT);
898}
899
900static int soc15_common_set_clockgating_state(void *handle, 864static int soc15_common_set_clockgating_state(void *handle,
901 enum amd_clockgating_state state) 865 enum amd_clockgating_state state)
902{ 866{
@@ -920,7 +884,7 @@ static int soc15_common_set_clockgating_state(void *handle,
920 state == AMD_CG_STATE_GATE ? true : false); 884 state == AMD_CG_STATE_GATE ? true : false);
921 soc15_update_rom_medium_grain_clock_gating(adev, 885 soc15_update_rom_medium_grain_clock_gating(adev,
922 state == AMD_CG_STATE_GATE ? true : false); 886 state == AMD_CG_STATE_GATE ? true : false);
923 soc15_update_df_medium_grain_clock_gating(adev, 887 adev->df_funcs->update_medium_grain_clock_gating(adev,
924 state == AMD_CG_STATE_GATE ? true : false); 888 state == AMD_CG_STATE_GATE ? true : false);
925 break; 889 break;
926 case CHIP_RAVEN: 890 case CHIP_RAVEN:
@@ -973,10 +937,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
973 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 937 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
974 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 938 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
975 939
976 /* AMD_CG_SUPPORT_DF_MGCG */ 940 adev->df_funcs->get_clockgating_state(adev, flags);
977 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
978 if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
979 *flags |= AMD_CG_SUPPORT_DF_MGCG;
980} 941}
981 942
982static int soc15_common_set_powergating_state(void *handle, 943static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index f22f7a88ce0f..8dc29107228f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -159,6 +159,7 @@
159#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ 159#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
160#define EOP_TCL1_ACTION_EN (1 << 16) 160#define EOP_TCL1_ACTION_EN (1 << 16)
161#define EOP_TC_ACTION_EN (1 << 17) /* L2 */ 161#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
162#define EOP_TC_NC_ACTION_EN (1 << 19)
162#define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */ 163#define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */
163 164
164#define DATA_SEL(x) ((x) << 29) 165#define DATA_SEL(x) ((x) << 29)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 948bb9437757..87cbb142dd0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -688,7 +688,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
688 688
689 if (state == AMD_PG_STATE_GATE) { 689 if (state == AMD_PG_STATE_GATE) {
690 uvd_v4_2_stop(adev); 690 uvd_v4_2_stop(adev);
691 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 691 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
692 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 692 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
693 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { 693 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
694 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 694 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
@@ -699,7 +699,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
699 } 699 }
700 return 0; 700 return 0;
701 } else { 701 } else {
702 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 702 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
703 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 703 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
704 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 704 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
705 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 705 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f26f515db2fb..ca6ab56357b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
62static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) 62static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
63{ 63{
64 return ((adev->asic_type >= CHIP_POLARIS10) && 64 return ((adev->asic_type >= CHIP_POLARIS10) &&
65 (adev->asic_type <= CHIP_POLARIS12) && 65 (adev->asic_type <= CHIP_VEGAM) &&
66 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); 66 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
67} 67}
68 68
@@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle)
429 ring = &adev->uvd.ring_enc[0]; 429 ring = &adev->uvd.ring_enc[0];
430 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 430 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
431 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, 431 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
432 rq, amdgpu_sched_jobs, NULL); 432 rq, NULL);
433 if (r) { 433 if (r) {
434 DRM_ERROR("Failed setting up UVD ENC run queue.\n"); 434 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
435 return r; 435 return r;
@@ -964,6 +964,16 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
964} 964}
965 965
966/** 966/**
967 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
968 *
969 * @ring: amdgpu_ring pointer
970 */
971static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
972{
973 /* The firmware doesn't seem to like touching registers at this point. */
974}
975
976/**
967 * uvd_v6_0_ring_test_ring - register write test 977 * uvd_v6_0_ring_test_ring - register write test
968 * 978 *
969 * @ring: amdgpu_ring pointer 979 * @ring: amdgpu_ring pointer
@@ -1528,12 +1538,13 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1528 .set_wptr = uvd_v6_0_ring_set_wptr, 1538 .set_wptr = uvd_v6_0_ring_set_wptr,
1529 .parse_cs = amdgpu_uvd_ring_parse_cs, 1539 .parse_cs = amdgpu_uvd_ring_parse_cs,
1530 .emit_frame_size = 1540 .emit_frame_size =
1531 6 + 6 + /* hdp flush / invalidate */ 1541 6 + /* hdp invalidate */
1532 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1542 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1533 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 1543 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1534 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1544 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1535 .emit_ib = uvd_v6_0_ring_emit_ib, 1545 .emit_ib = uvd_v6_0_ring_emit_ib,
1536 .emit_fence = uvd_v6_0_ring_emit_fence, 1546 .emit_fence = uvd_v6_0_ring_emit_fence,
1547 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1537 .test_ring = uvd_v6_0_ring_test_ring, 1548 .test_ring = uvd_v6_0_ring_test_ring,
1538 .test_ib = amdgpu_uvd_ring_test_ib, 1549 .test_ib = amdgpu_uvd_ring_test_ib,
1539 .insert_nop = amdgpu_ring_insert_nop, 1550 .insert_nop = amdgpu_ring_insert_nop,
@@ -1552,7 +1563,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1552 .get_wptr = uvd_v6_0_ring_get_wptr, 1563 .get_wptr = uvd_v6_0_ring_get_wptr,
1553 .set_wptr = uvd_v6_0_ring_set_wptr, 1564 .set_wptr = uvd_v6_0_ring_set_wptr,
1554 .emit_frame_size = 1565 .emit_frame_size =
1555 6 + 6 + /* hdp flush / invalidate */ 1566 6 + /* hdp invalidate */
1556 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1567 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1557 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ 1568 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1558 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 1569 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
@@ -1561,6 +1572,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1561 .emit_fence = uvd_v6_0_ring_emit_fence, 1572 .emit_fence = uvd_v6_0_ring_emit_fence,
1562 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1573 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1563 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, 1574 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1575 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1564 .test_ring = uvd_v6_0_ring_test_ring, 1576 .test_ring = uvd_v6_0_ring_test_ring,
1565 .test_ib = amdgpu_uvd_ring_test_ib, 1577 .test_ib = amdgpu_uvd_ring_test_ib,
1566 .insert_nop = amdgpu_ring_insert_nop, 1578 .insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index eddc57f3b72a..0ca63d588670 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
418 ring = &adev->uvd.ring_enc[0]; 418 ring = &adev->uvd.ring_enc[0];
419 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 419 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
420 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, 420 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
421 rq, amdgpu_sched_jobs, NULL); 421 rq, NULL);
422 if (r) { 422 if (r) {
423 DRM_ERROR("Failed setting up UVD ENC run queue.\n"); 423 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
424 return r; 424 return r;
@@ -1136,6 +1136,16 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1136} 1136}
1137 1137
1138/** 1138/**
1139 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1140 *
1141 * @ring: amdgpu_ring pointer
1142 */
1143static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1144{
1145 /* The firmware doesn't seem to like touching registers at this point. */
1146}
1147
1148/**
1139 * uvd_v7_0_ring_test_ring - register write test 1149 * uvd_v7_0_ring_test_ring - register write test
1140 * 1150 *
1141 * @ring: amdgpu_ring pointer 1151 * @ring: amdgpu_ring pointer
@@ -1654,7 +1664,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1654 .get_wptr = uvd_v7_0_ring_get_wptr, 1664 .get_wptr = uvd_v7_0_ring_get_wptr,
1655 .set_wptr = uvd_v7_0_ring_set_wptr, 1665 .set_wptr = uvd_v7_0_ring_set_wptr,
1656 .emit_frame_size = 1666 .emit_frame_size =
1657 6 + 6 + /* hdp flush / invalidate */ 1667 6 + /* hdp invalidate */
1658 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1668 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1659 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1669 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1660 8 + /* uvd_v7_0_ring_emit_vm_flush */ 1670 8 + /* uvd_v7_0_ring_emit_vm_flush */
@@ -1663,6 +1673,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1663 .emit_ib = uvd_v7_0_ring_emit_ib, 1673 .emit_ib = uvd_v7_0_ring_emit_ib,
1664 .emit_fence = uvd_v7_0_ring_emit_fence, 1674 .emit_fence = uvd_v7_0_ring_emit_fence,
1665 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush, 1675 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1676 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1666 .test_ring = uvd_v7_0_ring_test_ring, 1677 .test_ring = uvd_v7_0_ring_test_ring,
1667 .test_ib = amdgpu_uvd_ring_test_ib, 1678 .test_ib = amdgpu_uvd_ring_test_ib,
1668 .insert_nop = uvd_v7_0_ring_insert_nop, 1679 .insert_nop = uvd_v7_0_ring_insert_nop,
@@ -1671,6 +1682,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1671 .end_use = amdgpu_uvd_ring_end_use, 1682 .end_use = amdgpu_uvd_ring_end_use,
1672 .emit_wreg = uvd_v7_0_ring_emit_wreg, 1683 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1673 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait, 1684 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1685 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1674}; 1686};
1675 1687
1676static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { 1688static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
@@ -1702,6 +1714,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1702 .end_use = amdgpu_uvd_ring_end_use, 1714 .end_use = amdgpu_uvd_ring_end_use,
1703 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg, 1715 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1704 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait, 1716 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1717 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1705}; 1718};
1706 1719
1707static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) 1720static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 428d1928e44e..0999c843f623 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
388 default: 388 default:
389 if ((adev->asic_type == CHIP_POLARIS10) || 389 if ((adev->asic_type == CHIP_POLARIS10) ||
390 (adev->asic_type == CHIP_POLARIS11) || 390 (adev->asic_type == CHIP_POLARIS11) ||
391 (adev->asic_type == CHIP_POLARIS12)) 391 (adev->asic_type == CHIP_POLARIS12) ||
392 (adev->asic_type == CHIP_VEGAM))
392 return AMDGPU_VCE_HARVEST_VCE1; 393 return AMDGPU_VCE_HARVEST_VCE1;
393 394
394 return 0; 395 return 0;
@@ -467,8 +468,8 @@ static int vce_v3_0_hw_init(void *handle)
467 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
468 469
469 vce_v3_0_override_vce_clock_gating(adev, true); 470 vce_v3_0_override_vce_clock_gating(adev, true);
470 if (!(adev->flags & AMD_IS_APU)) 471
471 amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 472 amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
472 473
473 for (i = 0; i < adev->vce.num_rings; i++) 474 for (i = 0; i < adev->vce.num_rings; i++)
474 adev->vce.ring[i].ready = false; 475 adev->vce.ring[i].ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 73fd48d6c756..8fd1b742985a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1081 .end_use = amdgpu_vce_ring_end_use, 1081 .end_use = amdgpu_vce_ring_end_use,
1082 .emit_wreg = vce_v4_0_emit_wreg, 1082 .emit_wreg = vce_v4_0_emit_wreg,
1083 .emit_reg_wait = vce_v4_0_emit_reg_wait, 1083 .emit_reg_wait = vce_v4_0_emit_reg_wait,
1084 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1084}; 1085};
1085 1086
1086static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) 1087static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 8c132673bc79..0501746b6c2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1109,6 +1109,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1109 .end_use = amdgpu_vcn_ring_end_use, 1109 .end_use = amdgpu_vcn_ring_end_use,
1110 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, 1110 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1111 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, 1111 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1112 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1112}; 1113};
1113 1114
1114static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { 1115static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
@@ -1139,6 +1140,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1139 .end_use = amdgpu_vcn_ring_end_use, 1140 .end_use = amdgpu_vcn_ring_end_use,
1140 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, 1141 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1141 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, 1142 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1143 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1142}; 1144};
1143 1145
1144static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) 1146static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 126f1276d347..4ac1288ab7df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
305 stoney_mgcg_cgcg_init, 305 stoney_mgcg_cgcg_init,
306 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 306 ARRAY_SIZE(stoney_mgcg_cgcg_init));
307 break; 307 break;
308 case CHIP_POLARIS11:
309 case CHIP_POLARIS10: 308 case CHIP_POLARIS10:
309 case CHIP_POLARIS11:
310 case CHIP_POLARIS12: 310 case CHIP_POLARIS12:
311 case CHIP_VEGAM:
311 default: 312 default:
312 break; 313 break;
313 } 314 }
@@ -728,33 +729,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
728 return r; 729 return r;
729 730
730 tmp = RREG32_SMC(cntl_reg); 731 tmp = RREG32_SMC(cntl_reg);
731 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 732
732 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 733 if (adev->flags & AMD_IS_APU)
734 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
735 else
736 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
737 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
733 tmp |= dividers.post_divider; 738 tmp |= dividers.post_divider;
734 WREG32_SMC(cntl_reg, tmp); 739 WREG32_SMC(cntl_reg, tmp);
735 740
736 for (i = 0; i < 100; i++) { 741 for (i = 0; i < 100; i++) {
737 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 742 tmp = RREG32_SMC(status_reg);
738 break; 743 if (adev->flags & AMD_IS_APU) {
744 if (tmp & 0x10000)
745 break;
746 } else {
747 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
748 break;
749 }
739 mdelay(10); 750 mdelay(10);
740 } 751 }
741 if (i == 100) 752 if (i == 100)
742 return -ETIMEDOUT; 753 return -ETIMEDOUT;
743
744 return 0; 754 return 0;
745} 755}
746 756
757#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
758#define ixGNB_CLK1_STATUS 0xD822010C
759#define ixGNB_CLK2_DFS_CNTL 0xD8220110
760#define ixGNB_CLK2_STATUS 0xD822012C
761#define ixGNB_CLK3_DFS_CNTL 0xD8220130
762#define ixGNB_CLK3_STATUS 0xD822014C
763
747static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 764static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
748{ 765{
749 int r; 766 int r;
750 767
751 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 768 if (adev->flags & AMD_IS_APU) {
752 if (r) 769 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
753 return r; 770 if (r)
771 return r;
754 772
755 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 773 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
756 if (r) 774 if (r)
757 return r; 775 return r;
776 } else {
777 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
778 if (r)
779 return r;
780
781 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
782 if (r)
783 return r;
784 }
758 785
759 return 0; 786 return 0;
760} 787}
@@ -764,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
764 int r, i; 791 int r, i;
765 struct atom_clock_dividers dividers; 792 struct atom_clock_dividers dividers;
766 u32 tmp; 793 u32 tmp;
794 u32 reg_ctrl;
795 u32 reg_status;
796 u32 status_mask;
797 u32 reg_mask;
798
799 if (adev->flags & AMD_IS_APU) {
800 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
801 reg_status = ixGNB_CLK3_STATUS;
802 status_mask = 0x00010000;
803 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
804 } else {
805 reg_ctrl = ixCG_ECLK_CNTL;
806 reg_status = ixCG_ECLK_STATUS;
807 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
808 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
809 }
767 810
768 r = amdgpu_atombios_get_clock_dividers(adev, 811 r = amdgpu_atombios_get_clock_dividers(adev,
769 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
@@ -772,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
772 return r; 815 return r;
773 816
774 for (i = 0; i < 100; i++) { 817 for (i = 0; i < 100; i++) {
775 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 818 if (RREG32_SMC(reg_status) & status_mask)
776 break; 819 break;
777 mdelay(10); 820 mdelay(10);
778 } 821 }
822
779 if (i == 100) 823 if (i == 100)
780 return -ETIMEDOUT; 824 return -ETIMEDOUT;
781 825
782 tmp = RREG32_SMC(ixCG_ECLK_CNTL); 826 tmp = RREG32_SMC(reg_ctrl);
783 tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | 827 tmp &= ~reg_mask;
784 CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
785 tmp |= dividers.post_divider; 828 tmp |= dividers.post_divider;
786 WREG32_SMC(ixCG_ECLK_CNTL, tmp); 829 WREG32_SMC(reg_ctrl, tmp);
787 830
788 for (i = 0; i < 100; i++) { 831 for (i = 0; i < 100; i++) {
789 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 832 if (RREG32_SMC(reg_status) & status_mask)
790 break; 833 break;
791 mdelay(10); 834 mdelay(10);
792 } 835 }
836
793 if (i == 100) 837 if (i == 100)
794 return -ETIMEDOUT; 838 return -ETIMEDOUT;
795 839
@@ -876,6 +920,27 @@ static void vi_invalidate_hdp(struct amdgpu_device *adev,
876 } 920 }
877} 921}
878 922
923static bool vi_need_full_reset(struct amdgpu_device *adev)
924{
925 switch (adev->asic_type) {
926 case CHIP_CARRIZO:
927 case CHIP_STONEY:
928 /* CZ has hang issues with full reset at the moment */
929 return false;
930 case CHIP_FIJI:
931 case CHIP_TONGA:
932 /* XXX: soft reset should work on fiji and tonga */
933 return true;
934 case CHIP_POLARIS10:
935 case CHIP_POLARIS11:
936 case CHIP_POLARIS12:
937 case CHIP_TOPAZ:
938 default:
939 /* change this when we support soft reset */
940 return true;
941 }
942}
943
879static const struct amdgpu_asic_funcs vi_asic_funcs = 944static const struct amdgpu_asic_funcs vi_asic_funcs =
880{ 945{
881 .read_disabled_bios = &vi_read_disabled_bios, 946 .read_disabled_bios = &vi_read_disabled_bios,
@@ -889,6 +954,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
889 .get_config_memsize = &vi_get_config_memsize, 954 .get_config_memsize = &vi_get_config_memsize,
890 .flush_hdp = &vi_flush_hdp, 955 .flush_hdp = &vi_flush_hdp,
891 .invalidate_hdp = &vi_invalidate_hdp, 956 .invalidate_hdp = &vi_invalidate_hdp,
957 .need_full_reset = &vi_need_full_reset,
892}; 958};
893 959
894#define CZ_REV_BRISTOL(rev) \ 960#define CZ_REV_BRISTOL(rev) \
@@ -1031,6 +1097,30 @@ static int vi_common_early_init(void *handle)
1031 adev->pg_flags = 0; 1097 adev->pg_flags = 0;
1032 adev->external_rev_id = adev->rev_id + 0x64; 1098 adev->external_rev_id = adev->rev_id + 0x64;
1033 break; 1099 break;
1100 case CHIP_VEGAM:
1101 adev->cg_flags = 0;
1102 /*AMD_CG_SUPPORT_GFX_MGCG |
1103 AMD_CG_SUPPORT_GFX_RLC_LS |
1104 AMD_CG_SUPPORT_GFX_CP_LS |
1105 AMD_CG_SUPPORT_GFX_CGCG |
1106 AMD_CG_SUPPORT_GFX_CGLS |
1107 AMD_CG_SUPPORT_GFX_3D_CGCG |
1108 AMD_CG_SUPPORT_GFX_3D_CGLS |
1109 AMD_CG_SUPPORT_SDMA_MGCG |
1110 AMD_CG_SUPPORT_SDMA_LS |
1111 AMD_CG_SUPPORT_BIF_MGCG |
1112 AMD_CG_SUPPORT_BIF_LS |
1113 AMD_CG_SUPPORT_HDP_MGCG |
1114 AMD_CG_SUPPORT_HDP_LS |
1115 AMD_CG_SUPPORT_ROM_MGCG |
1116 AMD_CG_SUPPORT_MC_MGCG |
1117 AMD_CG_SUPPORT_MC_LS |
1118 AMD_CG_SUPPORT_DRM_LS |
1119 AMD_CG_SUPPORT_UVD_MGCG |
1120 AMD_CG_SUPPORT_VCE_MGCG;*/
1121 adev->pg_flags = 0;
1122 adev->external_rev_id = adev->rev_id + 0x6E;
1123 break;
1034 case CHIP_CARRIZO: 1124 case CHIP_CARRIZO:
1035 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1125 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1036 AMD_CG_SUPPORT_GFX_MGCG | 1126 AMD_CG_SUPPORT_GFX_MGCG |
@@ -1422,6 +1512,7 @@ static int vi_common_set_clockgating_state(void *handle,
1422 case CHIP_POLARIS10: 1512 case CHIP_POLARIS10:
1423 case CHIP_POLARIS11: 1513 case CHIP_POLARIS11:
1424 case CHIP_POLARIS12: 1514 case CHIP_POLARIS12:
1515 case CHIP_VEGAM:
1425 vi_common_set_clockgating_state_by_smu(adev, state); 1516 vi_common_set_clockgating_state_by_smu(adev, state);
1426 default: 1517 default:
1427 break; 1518 break;
@@ -1551,9 +1642,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1551 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1642 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1552 } 1643 }
1553 break; 1644 break;
1554 case CHIP_POLARIS11:
1555 case CHIP_POLARIS10: 1645 case CHIP_POLARIS10:
1646 case CHIP_POLARIS11:
1556 case CHIP_POLARIS12: 1647 case CHIP_POLARIS12:
1648 case CHIP_VEGAM:
1557 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1649 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1558 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1650 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1559 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1651 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5b124a67404c..e6ca72c0d347 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,14 +9,6 @@ config DRM_AMD_DC
9 support for AMDGPU. This adds required support for Vega and 9 support for AMDGPU. This adds required support for Vega and
10 Raven ASICs. 10 Raven ASICs.
11 11
12config DRM_AMD_DC_PRE_VEGA
13 bool "DC support for Polaris and older ASICs"
14 default y
15 help
16 Choose this option to enable the new DC support for older asics
17 by default. This includes Polaris, Carrizo, Tonga, Bonaire,
18 and Hawaii.
19
20config DRM_AMD_DC_FBC 12config DRM_AMD_DC_FBC
21 bool "AMD FBC - Enable Frame Buffer Compression" 13 bool "AMD FBC - Enable Frame Buffer Compression"
22 depends on DRM_AMD_DC 14 depends on DRM_AMD_DC
@@ -42,4 +34,10 @@ config DEBUG_KERNEL_DC
42 if you want to hit 34 if you want to hit
43 kdgb_break in assert. 35 kdgb_break in assert.
44 36
37config DRM_AMD_DC_VEGAM
38 bool "VEGAM support"
39 depends on DRM_AMD_DC
40 help
41 Choose this option if you want to have
42 VEGAM support for display engine
45endmenu 43endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1dd1142246c2..f2f54a9df56f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -433,11 +433,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
433 433
434 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 434 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
435 435
436 if (amdgpu_dc_log)
437 init_data.log_mask = DC_DEFAULT_LOG_MASK;
438 else
439 init_data.log_mask = DC_MIN_LOG_MASK;
440
441 /* 436 /*
442 * TODO debug why this doesn't work on Raven 437 * TODO debug why this doesn't work on Raven
443 */ 438 */
@@ -649,18 +644,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
649static int dm_resume(void *handle) 644static int dm_resume(void *handle)
650{ 645{
651 struct amdgpu_device *adev = handle; 646 struct amdgpu_device *adev = handle;
652 struct amdgpu_display_manager *dm = &adev->dm;
653 int ret = 0;
654
655 /* power on hardware */
656 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
657
658 ret = amdgpu_dm_display_resume(adev);
659 return ret;
660}
661
662int amdgpu_dm_display_resume(struct amdgpu_device *adev)
663{
664 struct drm_device *ddev = adev->ddev; 647 struct drm_device *ddev = adev->ddev;
665 struct amdgpu_display_manager *dm = &adev->dm; 648 struct amdgpu_display_manager *dm = &adev->dm;
666 struct amdgpu_dm_connector *aconnector; 649 struct amdgpu_dm_connector *aconnector;
@@ -671,10 +654,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
671 struct drm_plane *plane; 654 struct drm_plane *plane;
672 struct drm_plane_state *new_plane_state; 655 struct drm_plane_state *new_plane_state;
673 struct dm_plane_state *dm_new_plane_state; 656 struct dm_plane_state *dm_new_plane_state;
674 657 int ret;
675 int ret = 0;
676 int i; 658 int i;
677 659
660 /* power on hardware */
661 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
662
678 /* program HPD filter */ 663 /* program HPD filter */
679 dc_resume(dm->dc); 664 dc_resume(dm->dc);
680 665
@@ -688,8 +673,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
688 amdgpu_dm_irq_resume_early(adev); 673 amdgpu_dm_irq_resume_early(adev);
689 674
690 /* Do detection*/ 675 /* Do detection*/
691 list_for_each_entry(connector, 676 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
692 &ddev->mode_config.connector_list, head) {
693 aconnector = to_amdgpu_dm_connector(connector); 677 aconnector = to_amdgpu_dm_connector(connector);
694 678
695 /* 679 /*
@@ -711,7 +695,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
711 } 695 }
712 696
713 /* Force mode set in atomic comit */ 697 /* Force mode set in atomic comit */
714 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) 698 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
715 new_crtc_state->active_changed = true; 699 new_crtc_state->active_changed = true;
716 700
717 /* 701 /*
@@ -719,7 +703,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
719 * them here, since they were duplicated as part of the suspend 703 * them here, since they were duplicated as part of the suspend
720 * procedure. 704 * procedure.
721 */ 705 */
722 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) { 706 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
723 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 707 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
724 if (dm_new_crtc_state->stream) { 708 if (dm_new_crtc_state->stream) {
725 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 709 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
@@ -728,7 +712,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
728 } 712 }
729 } 713 }
730 714
731 for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) { 715 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
732 dm_new_plane_state = to_dm_plane_state(new_plane_state); 716 dm_new_plane_state = to_dm_plane_state(new_plane_state);
733 if (dm_new_plane_state->dc_state) { 717 if (dm_new_plane_state->dc_state) {
734 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 718 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
@@ -737,9 +721,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
737 } 721 }
738 } 722 }
739 723
740 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); 724 ret = drm_atomic_helper_resume(ddev, dm->cached_state);
741 725
742 adev->dm.cached_state = NULL; 726 dm->cached_state = NULL;
743 727
744 amdgpu_dm_irq_resume_late(adev); 728 amdgpu_dm_irq_resume_late(adev);
745 729
@@ -1529,6 +1513,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1529 case CHIP_POLARIS11: 1513 case CHIP_POLARIS11:
1530 case CHIP_POLARIS10: 1514 case CHIP_POLARIS10:
1531 case CHIP_POLARIS12: 1515 case CHIP_POLARIS12:
1516#if defined(CONFIG_DRM_AMD_DC_VEGAM)
1517 case CHIP_VEGAM:
1518#endif
1532 case CHIP_VEGA10: 1519 case CHIP_VEGA10:
1533 case CHIP_VEGA12: 1520 case CHIP_VEGA12:
1534 if (dce110_register_irq_handlers(dm->adev)) { 1521 if (dce110_register_irq_handlers(dm->adev)) {
@@ -1549,7 +1536,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1549 break; 1536 break;
1550#endif 1537#endif
1551 default: 1538 default:
1552 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); 1539 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1553 goto fail; 1540 goto fail;
1554 } 1541 }
1555 1542
@@ -1657,7 +1644,6 @@ static ssize_t s3_debug_store(struct device *device,
1657 if (ret == 0) { 1644 if (ret == 0) {
1658 if (s3_state) { 1645 if (s3_state) {
1659 dm_resume(adev); 1646 dm_resume(adev);
1660 amdgpu_dm_display_resume(adev);
1661 drm_kms_helper_hotplug_event(adev->ddev); 1647 drm_kms_helper_hotplug_event(adev->ddev);
1662 } else 1648 } else
1663 dm_suspend(adev); 1649 dm_suspend(adev);
@@ -1722,6 +1708,9 @@ static int dm_early_init(void *handle)
1722 adev->mode_info.plane_type = dm_plane_type_default; 1708 adev->mode_info.plane_type = dm_plane_type_default;
1723 break; 1709 break;
1724 case CHIP_POLARIS10: 1710 case CHIP_POLARIS10:
1711#if defined(CONFIG_DRM_AMD_DC_VEGAM)
1712 case CHIP_VEGAM:
1713#endif
1725 adev->mode_info.num_crtc = 6; 1714 adev->mode_info.num_crtc = 6;
1726 adev->mode_info.num_hpd = 6; 1715 adev->mode_info.num_hpd = 6;
1727 adev->mode_info.num_dig = 6; 1716 adev->mode_info.num_dig = 6;
@@ -1743,7 +1732,7 @@ static int dm_early_init(void *handle)
1743 break; 1732 break;
1744#endif 1733#endif
1745 default: 1734 default:
1746 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); 1735 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1747 return -EINVAL; 1736 return -EINVAL;
1748 } 1737 }
1749 1738
@@ -1848,7 +1837,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1848static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, 1837static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1849 uint64_t *tiling_flags) 1838 uint64_t *tiling_flags)
1850{ 1839{
1851 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 1840 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1852 int r = amdgpu_bo_reserve(rbo, false); 1841 int r = amdgpu_bo_reserve(rbo, false);
1853 1842
1854 if (unlikely(r)) { 1843 if (unlikely(r)) {
@@ -2017,7 +2006,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
2017 const struct amdgpu_framebuffer *amdgpu_fb = 2006 const struct amdgpu_framebuffer *amdgpu_fb =
2018 to_amdgpu_framebuffer(plane_state->fb); 2007 to_amdgpu_framebuffer(plane_state->fb);
2019 const struct drm_crtc *crtc = plane_state->crtc; 2008 const struct drm_crtc *crtc = plane_state->crtc;
2020 struct dc_transfer_func *input_tf;
2021 int ret = 0; 2009 int ret = 0;
2022 2010
2023 if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) 2011 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
@@ -2031,13 +2019,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
2031 if (ret) 2019 if (ret)
2032 return ret; 2020 return ret;
2033 2021
2034 input_tf = dc_create_transfer_func();
2035
2036 if (input_tf == NULL)
2037 return -ENOMEM;
2038
2039 dc_plane_state->in_transfer_func = input_tf;
2040
2041 /* 2022 /*
2042 * Always set input transfer function, since plane state is refreshed 2023 * Always set input transfer function, since plane state is refreshed
2043 * every time. 2024 * every time.
@@ -2206,7 +2187,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2206 const struct drm_connector *connector) 2187 const struct drm_connector *connector)
2207{ 2188{
2208 struct dc_crtc_timing *timing_out = &stream->timing; 2189 struct dc_crtc_timing *timing_out = &stream->timing;
2209 struct dc_transfer_func *tf = dc_create_transfer_func();
2210 2190
2211 memset(timing_out, 0, sizeof(struct dc_crtc_timing)); 2191 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2212 2192
@@ -2250,9 +2230,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2250 2230
2251 stream->output_color_space = get_output_color_space(timing_out); 2231 stream->output_color_space = get_output_color_space(timing_out);
2252 2232
2253 tf->type = TF_TYPE_PREDEFINED; 2233 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2254 tf->tf = TRANSFER_FUNCTION_SRGB; 2234 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2255 stream->out_transfer_func = tf;
2256} 2235}
2257 2236
2258static void fill_audio_info(struct audio_info *audio_info, 2237static void fill_audio_info(struct audio_info *audio_info,
@@ -2488,6 +2467,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2488 2467
2489 update_stream_signal(stream); 2468 update_stream_signal(stream);
2490 2469
2470 if (dm_state && dm_state->freesync_capable)
2471 stream->ignore_msa_timing_param = true;
2472
2491 return stream; 2473 return stream;
2492} 2474}
2493 2475
@@ -2710,18 +2692,15 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2710 const struct dc_link *link = aconnector->dc_link; 2692 const struct dc_link *link = aconnector->dc_link;
2711 struct amdgpu_device *adev = connector->dev->dev_private; 2693 struct amdgpu_device *adev = connector->dev->dev_private;
2712 struct amdgpu_display_manager *dm = &adev->dm; 2694 struct amdgpu_display_manager *dm = &adev->dm;
2695
2713#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 2696#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2714 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 2697 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2715 2698
2716 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 2699 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2717 link->type != dc_connection_none) { 2700 link->type != dc_connection_none &&
2718 amdgpu_dm_register_backlight_device(dm); 2701 dm->backlight_dev) {
2719 2702 backlight_device_unregister(dm->backlight_dev);
2720 if (dm->backlight_dev) { 2703 dm->backlight_dev = NULL;
2721 backlight_device_unregister(dm->backlight_dev);
2722 dm->backlight_dev = NULL;
2723 }
2724
2725 } 2704 }
2726#endif 2705#endif
2727 drm_connector_unregister(connector); 2706 drm_connector_unregister(connector);
@@ -2855,7 +2834,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2855 create_eml_sink(aconnector); 2834 create_eml_sink(aconnector);
2856} 2835}
2857 2836
2858int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 2837enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2859 struct drm_display_mode *mode) 2838 struct drm_display_mode *mode)
2860{ 2839{
2861 int result = MODE_ERROR; 2840 int result = MODE_ERROR;
@@ -3058,8 +3037,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3058 } 3037 }
3059 3038
3060 afb = to_amdgpu_framebuffer(new_state->fb); 3039 afb = to_amdgpu_framebuffer(new_state->fb);
3061 3040 obj = new_state->fb->obj[0];
3062 obj = afb->obj;
3063 rbo = gem_to_amdgpu_bo(obj); 3041 rbo = gem_to_amdgpu_bo(obj);
3064 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 3042 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3065 r = amdgpu_bo_reserve(rbo, false); 3043 r = amdgpu_bo_reserve(rbo, false);
@@ -3067,12 +3045,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3067 return r; 3045 return r;
3068 3046
3069 if (plane->type != DRM_PLANE_TYPE_CURSOR) 3047 if (plane->type != DRM_PLANE_TYPE_CURSOR)
3070 domain = amdgpu_display_framebuffer_domains(adev); 3048 domain = amdgpu_display_supported_domains(adev);
3071 else 3049 else
3072 domain = AMDGPU_GEM_DOMAIN_VRAM; 3050 domain = AMDGPU_GEM_DOMAIN_VRAM;
3073 3051
3074 r = amdgpu_bo_pin(rbo, domain, &afb->address); 3052 r = amdgpu_bo_pin(rbo, domain, &afb->address);
3075
3076 amdgpu_bo_unreserve(rbo); 3053 amdgpu_bo_unreserve(rbo);
3077 3054
3078 if (unlikely(r != 0)) { 3055 if (unlikely(r != 0)) {
@@ -3123,14 +3100,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3123 struct drm_plane_state *old_state) 3100 struct drm_plane_state *old_state)
3124{ 3101{
3125 struct amdgpu_bo *rbo; 3102 struct amdgpu_bo *rbo;
3126 struct amdgpu_framebuffer *afb;
3127 int r; 3103 int r;
3128 3104
3129 if (!old_state->fb) 3105 if (!old_state->fb)
3130 return; 3106 return;
3131 3107
3132 afb = to_amdgpu_framebuffer(old_state->fb); 3108 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3133 rbo = gem_to_amdgpu_bo(afb->obj);
3134 r = amdgpu_bo_reserve(rbo, false); 3109 r = amdgpu_bo_reserve(rbo, false);
3135 if (unlikely(r)) { 3110 if (unlikely(r)) {
3136 DRM_ERROR("failed to reserve rbo before unpin\n"); 3111 DRM_ERROR("failed to reserve rbo before unpin\n");
@@ -3773,7 +3748,7 @@ static void remove_stream(struct amdgpu_device *adev,
3773static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 3748static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3774 struct dc_cursor_position *position) 3749 struct dc_cursor_position *position)
3775{ 3750{
3776 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc); 3751 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3777 int x, y; 3752 int x, y;
3778 int xorigin = 0, yorigin = 0; 3753 int xorigin = 0, yorigin = 0;
3779 3754
@@ -3905,7 +3880,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3905 int r, vpos, hpos; 3880 int r, vpos, hpos;
3906 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3881 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3907 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 3882 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3908 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj); 3883 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
3909 struct amdgpu_device *adev = crtc->dev->dev_private; 3884 struct amdgpu_device *adev = crtc->dev->dev_private;
3910 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; 3885 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3911 struct dc_flip_addrs addr = { {0} }; 3886 struct dc_flip_addrs addr = { {0} };
@@ -3986,6 +3961,96 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3986 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 3961 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3987} 3962}
3988 3963
3964/*
3965 * TODO this whole function needs to go
3966 *
3967 * dc_surface_update is needlessly complex. See if we can just replace this
3968 * with a dc_plane_state and follow the atomic model a bit more closely here.
3969 */
3970static bool commit_planes_to_stream(
3971 struct dc *dc,
3972 struct dc_plane_state **plane_states,
3973 uint8_t new_plane_count,
3974 struct dm_crtc_state *dm_new_crtc_state,
3975 struct dm_crtc_state *dm_old_crtc_state,
3976 struct dc_state *state)
3977{
3978 /* no need to dynamically allocate this. it's pretty small */
3979 struct dc_surface_update updates[MAX_SURFACES];
3980 struct dc_flip_addrs *flip_addr;
3981 struct dc_plane_info *plane_info;
3982 struct dc_scaling_info *scaling_info;
3983 int i;
3984 struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
3985 struct dc_stream_update *stream_update =
3986 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
3987
3988 if (!stream_update) {
3989 BREAK_TO_DEBUGGER();
3990 return false;
3991 }
3992
3993 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
3994 GFP_KERNEL);
3995 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
3996 GFP_KERNEL);
3997 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
3998 GFP_KERNEL);
3999
4000 if (!flip_addr || !plane_info || !scaling_info) {
4001 kfree(flip_addr);
4002 kfree(plane_info);
4003 kfree(scaling_info);
4004 kfree(stream_update);
4005 return false;
4006 }
4007
4008 memset(updates, 0, sizeof(updates));
4009
4010 stream_update->src = dc_stream->src;
4011 stream_update->dst = dc_stream->dst;
4012 stream_update->out_transfer_func = dc_stream->out_transfer_func;
4013
4014 for (i = 0; i < new_plane_count; i++) {
4015 updates[i].surface = plane_states[i];
4016 updates[i].gamma =
4017 (struct dc_gamma *)plane_states[i]->gamma_correction;
4018 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4019 flip_addr[i].address = plane_states[i]->address;
4020 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4021 plane_info[i].color_space = plane_states[i]->color_space;
4022 plane_info[i].format = plane_states[i]->format;
4023 plane_info[i].plane_size = plane_states[i]->plane_size;
4024 plane_info[i].rotation = plane_states[i]->rotation;
4025 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4026 plane_info[i].stereo_format = plane_states[i]->stereo_format;
4027 plane_info[i].tiling_info = plane_states[i]->tiling_info;
4028 plane_info[i].visible = plane_states[i]->visible;
4029 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4030 plane_info[i].dcc = plane_states[i]->dcc;
4031 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4032 scaling_info[i].src_rect = plane_states[i]->src_rect;
4033 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4034 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4035
4036 updates[i].flip_addr = &flip_addr[i];
4037 updates[i].plane_info = &plane_info[i];
4038 updates[i].scaling_info = &scaling_info[i];
4039 }
4040
4041 dc_commit_updates_for_stream(
4042 dc,
4043 updates,
4044 new_plane_count,
4045 dc_stream, stream_update, plane_states, state);
4046
4047 kfree(flip_addr);
4048 kfree(plane_info);
4049 kfree(scaling_info);
4050 kfree(stream_update);
4051 return true;
4052}
4053
3989static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 4054static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3990 struct drm_device *dev, 4055 struct drm_device *dev,
3991 struct amdgpu_display_manager *dm, 4056 struct amdgpu_display_manager *dm,
@@ -4001,6 +4066,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4001 struct drm_crtc_state *new_pcrtc_state = 4066 struct drm_crtc_state *new_pcrtc_state =
4002 drm_atomic_get_new_crtc_state(state, pcrtc); 4067 drm_atomic_get_new_crtc_state(state, pcrtc);
4003 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 4068 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4069 struct dm_crtc_state *dm_old_crtc_state =
4070 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4004 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4071 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4005 int planes_count = 0; 4072 int planes_count = 0;
4006 unsigned long flags; 4073 unsigned long flags;
@@ -4037,7 +4104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4037 } 4104 }
4038 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4105 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4039 4106
4040 if (!pflip_needed) { 4107 if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4041 WARN_ON(!dm_new_plane_state->dc_state); 4108 WARN_ON(!dm_new_plane_state->dc_state);
4042 4109
4043 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state; 4110 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
@@ -4079,10 +4146,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4079 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 4146 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4080 } 4147 }
4081 4148
4082 if (false == dc_commit_planes_to_stream(dm->dc, 4149
4150 if (false == commit_planes_to_stream(dm->dc,
4083 plane_states_constructed, 4151 plane_states_constructed,
4084 planes_count, 4152 planes_count,
4085 dc_stream_attach, 4153 acrtc_state,
4154 dm_old_crtc_state,
4086 dm_state->context)) 4155 dm_state->context))
4087 dm_error("%s: Failed to attach plane!\n", __func__); 4156 dm_error("%s: Failed to attach plane!\n", __func__);
4088 } else { 4157 } else {
@@ -4307,8 +4376,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4307 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 4376 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4308 struct dc_stream_status *status = NULL; 4377 struct dc_stream_status *status = NULL;
4309 4378
4310 if (acrtc) 4379 if (acrtc) {
4311 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 4380 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4381 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
4382 }
4312 4383
4313 /* Skip any modesets/resets */ 4384 /* Skip any modesets/resets */
4314 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 4385 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -4331,11 +4402,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4331 WARN_ON(!status->plane_count); 4402 WARN_ON(!status->plane_count);
4332 4403
4333 /*TODO How it works with MPO ?*/ 4404 /*TODO How it works with MPO ?*/
4334 if (!dc_commit_planes_to_stream( 4405 if (!commit_planes_to_stream(
4335 dm->dc, 4406 dm->dc,
4336 status->plane_states, 4407 status->plane_states,
4337 status->plane_count, 4408 status->plane_count,
4338 dm_new_crtc_state->stream, 4409 dm_new_crtc_state,
4410 to_dm_crtc_state(old_crtc_state),
4339 dm_state->context)) 4411 dm_state->context))
4340 dm_error("%s: Failed to update stream scaling!\n", __func__); 4412 dm_error("%s: Failed to update stream scaling!\n", __func__);
4341 } 4413 }
@@ -4578,7 +4650,7 @@ static int dm_update_crtcs_state(struct dc *dc,
4578 if (aconnector && enable) { 4650 if (aconnector && enable) {
4579 // Make sure fake sink is created in plug-in scenario 4651 // Make sure fake sink is created in plug-in scenario
4580 new_con_state = drm_atomic_get_connector_state(state, 4652 new_con_state = drm_atomic_get_connector_state(state,
4581 &aconnector->base); 4653 &aconnector->base);
4582 4654
4583 if (IS_ERR(new_con_state)) { 4655 if (IS_ERR(new_con_state)) {
4584 ret = PTR_ERR_OR_ZERO(new_con_state); 4656 ret = PTR_ERR_OR_ZERO(new_con_state);
@@ -4755,7 +4827,8 @@ static int dm_update_planes_state(struct dc *dc,
4755 4827
4756 /* Remove any changed/removed planes */ 4828 /* Remove any changed/removed planes */
4757 if (!enable) { 4829 if (!enable) {
4758 if (pflip_needed) 4830 if (pflip_needed &&
4831 plane->type != DRM_PLANE_TYPE_OVERLAY)
4759 continue; 4832 continue;
4760 4833
4761 if (!old_plane_crtc) 4834 if (!old_plane_crtc)
@@ -4802,7 +4875,8 @@ static int dm_update_planes_state(struct dc *dc,
4802 if (!dm_new_crtc_state->stream) 4875 if (!dm_new_crtc_state->stream)
4803 continue; 4876 continue;
4804 4877
4805 if (pflip_needed) 4878 if (pflip_needed &&
4879 plane->type != DRM_PLANE_TYPE_OVERLAY)
4806 continue; 4880 continue;
4807 4881
4808 WARN_ON(dm_new_plane_state->dc_state); 4882 WARN_ON(dm_new_plane_state->dc_state);
@@ -5009,17 +5083,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5009 struct edid *edid) 5083 struct edid *edid)
5010{ 5084{
5011 int i; 5085 int i;
5012 uint64_t val_capable;
5013 bool edid_check_required; 5086 bool edid_check_required;
5014 struct detailed_timing *timing; 5087 struct detailed_timing *timing;
5015 struct detailed_non_pixel *data; 5088 struct detailed_non_pixel *data;
5016 struct detailed_data_monitor_range *range; 5089 struct detailed_data_monitor_range *range;
5017 struct amdgpu_dm_connector *amdgpu_dm_connector = 5090 struct amdgpu_dm_connector *amdgpu_dm_connector =
5018 to_amdgpu_dm_connector(connector); 5091 to_amdgpu_dm_connector(connector);
5092 struct dm_connector_state *dm_con_state;
5019 5093
5020 struct drm_device *dev = connector->dev; 5094 struct drm_device *dev = connector->dev;
5021 struct amdgpu_device *adev = dev->dev_private; 5095 struct amdgpu_device *adev = dev->dev_private;
5022 5096
5097 if (!connector->state) {
5098 DRM_ERROR("%s - Connector has no state", __func__);
5099 return;
5100 }
5101
5102 dm_con_state = to_dm_connector_state(connector->state);
5103
5023 edid_check_required = false; 5104 edid_check_required = false;
5024 if (!amdgpu_dm_connector->dc_sink) { 5105 if (!amdgpu_dm_connector->dc_sink) {
5025 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); 5106 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
@@ -5038,7 +5119,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5038 amdgpu_dm_connector); 5119 amdgpu_dm_connector);
5039 } 5120 }
5040 } 5121 }
5041 val_capable = 0; 5122 dm_con_state->freesync_capable = false;
5042 if (edid_check_required == true && (edid->version > 1 || 5123 if (edid_check_required == true && (edid->version > 1 ||
5043 (edid->version == 1 && edid->revision > 1))) { 5124 (edid->version == 1 && edid->revision > 1))) {
5044 for (i = 0; i < 4; i++) { 5125 for (i = 0; i < 4; i++) {
@@ -5074,7 +5155,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5074 amdgpu_dm_connector->min_vfreq * 1000000; 5155 amdgpu_dm_connector->min_vfreq * 1000000;
5075 amdgpu_dm_connector->caps.max_refresh_in_micro_hz = 5156 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
5076 amdgpu_dm_connector->max_vfreq * 1000000; 5157 amdgpu_dm_connector->max_vfreq * 1000000;
5077 val_capable = 1; 5158 dm_con_state->freesync_capable = true;
5078 } 5159 }
5079 } 5160 }
5080 5161
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b68400c1154b..d5aa89ad5571 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -28,7 +28,6 @@
28 28
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
31#include "dc.h"
32 31
33/* 32/*
34 * This file contains the definition for amdgpu_display_manager 33 * This file contains the definition for amdgpu_display_manager
@@ -53,6 +52,7 @@
53struct amdgpu_device; 52struct amdgpu_device;
54struct drm_device; 53struct drm_device;
55struct amdgpu_dm_irq_handler_data; 54struct amdgpu_dm_irq_handler_data;
55struct dc;
56 56
57struct amdgpu_dm_prev_state { 57struct amdgpu_dm_prev_state {
58 struct drm_framebuffer *fb; 58 struct drm_framebuffer *fb;
@@ -220,6 +220,7 @@ struct dm_connector_state {
220 uint8_t underscan_hborder; 220 uint8_t underscan_hborder;
221 bool underscan_enable; 221 bool underscan_enable;
222 struct mod_freesync_user_enable user_enable; 222 struct mod_freesync_user_enable user_enable;
223 bool freesync_capable;
223}; 224};
224 225
225#define to_dm_connector_state(x)\ 226#define to_dm_connector_state(x)\
@@ -246,7 +247,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
246 struct dc_link *link, 247 struct dc_link *link,
247 int link_index); 248 int link_index);
248 249
249int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 250enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
250 struct drm_display_mode *mode); 251 struct drm_display_mode *mode);
251 252
252void dm_restore_drm_connector_state(struct drm_device *dev, 253void dm_restore_drm_connector_state(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 25f064c01038..e3d90e918d1b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -25,6 +25,7 @@
25 25
26#include "amdgpu_mode.h" 26#include "amdgpu_mode.h"
27#include "amdgpu_dm.h" 27#include "amdgpu_dm.h"
28#include "dc.h"
28#include "modules/color/color_gamma.h" 29#include "modules/color/color_gamma.h"
29 30
30#define MAX_DRM_LUT_VALUE 0xFFFF 31#define MAX_DRM_LUT_VALUE 0xFFFF
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ace9ad578ca0..4304d9e408b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
83 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? 83 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
84 I2C_MOT_TRUE : I2C_MOT_FALSE; 84 I2C_MOT_TRUE : I2C_MOT_FALSE;
85 enum ddc_result res; 85 enum ddc_result res;
86 ssize_t read_bytes; 86 uint32_t read_bytes = msg->size;
87 87
88 if (WARN_ON(msg->size > 16)) 88 if (WARN_ON(msg->size > 16))
89 return -E2BIG; 89 return -E2BIG;
90 90
91 switch (msg->request & ~DP_AUX_I2C_MOT) { 91 switch (msg->request & ~DP_AUX_I2C_MOT) {
92 case DP_AUX_NATIVE_READ: 92 case DP_AUX_NATIVE_READ:
93 read_bytes = dal_ddc_service_read_dpcd_data( 93 res = dal_ddc_service_read_dpcd_data(
94 TO_DM_AUX(aux)->ddc_service, 94 TO_DM_AUX(aux)->ddc_service,
95 false, 95 false,
96 I2C_MOT_UNDEF, 96 I2C_MOT_UNDEF,
97 msg->address, 97 msg->address,
98 msg->buffer, 98 msg->buffer,
99 msg->size); 99 msg->size,
100 return read_bytes; 100 &read_bytes);
101 break;
101 case DP_AUX_NATIVE_WRITE: 102 case DP_AUX_NATIVE_WRITE:
102 res = dal_ddc_service_write_dpcd_data( 103 res = dal_ddc_service_write_dpcd_data(
103 TO_DM_AUX(aux)->ddc_service, 104 TO_DM_AUX(aux)->ddc_service,
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
108 msg->size); 109 msg->size);
109 break; 110 break;
110 case DP_AUX_I2C_READ: 111 case DP_AUX_I2C_READ:
111 read_bytes = dal_ddc_service_read_dpcd_data( 112 res = dal_ddc_service_read_dpcd_data(
112 TO_DM_AUX(aux)->ddc_service, 113 TO_DM_AUX(aux)->ddc_service,
113 true, 114 true,
114 mot, 115 mot,
115 msg->address, 116 msg->address,
116 msg->buffer, 117 msg->buffer,
117 msg->size); 118 msg->size,
118 return read_bytes; 119 &read_bytes);
120 break;
119 case DP_AUX_I2C_WRITE: 121 case DP_AUX_I2C_WRITE:
120 res = dal_ddc_service_write_dpcd_data( 122 res = dal_ddc_service_write_dpcd_data(
121 TO_DM_AUX(aux)->ddc_service, 123 TO_DM_AUX(aux)->ddc_service,
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
137 r == DDC_RESULT_SUCESSFULL); 139 r == DDC_RESULT_SUCESSFULL);
138#endif 140#endif
139 141
140 return msg->size; 142 if (res != DDC_RESULT_SUCESSFULL)
143 return -EIO;
144 return read_bytes;
141} 145}
142 146
143static enum drm_connector_status 147static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 89342b48be6b..0229c7edb8ad 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -37,8 +37,17 @@
37 37
38unsigned long long dm_get_timestamp(struct dc_context *ctx) 38unsigned long long dm_get_timestamp(struct dc_context *ctx)
39{ 39{
40 /* TODO: return actual timestamp */ 40 struct timespec64 time;
41 return 0; 41
42 getrawmonotonic64(&time);
43 return timespec64_to_ns(&time);
44}
45
46unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
47 unsigned long long current_time_stamp,
48 unsigned long long last_time_stamp)
49{
50 return current_time_stamp - last_time_stamp;
42} 51}
43 52
44void dm_perf_trace_timestamp(const char *func_name, unsigned int line) 53void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 8a9bba879207..7191c3213743 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -26,13 +26,13 @@
26#include "dm_services.h" 26#include "dm_services.h"
27#include "include/fixed31_32.h" 27#include "include/fixed31_32.h"
28 28
29static inline uint64_t abs_i64( 29static inline unsigned long long abs_i64(
30 int64_t arg) 30 long long arg)
31{ 31{
32 if (arg > 0) 32 if (arg > 0)
33 return (uint64_t)arg; 33 return (unsigned long long)arg;
34 else 34 else
35 return (uint64_t)(-arg); 35 return (unsigned long long)(-arg);
36} 36}
37 37
38/* 38/*
@@ -40,12 +40,12 @@ static inline uint64_t abs_i64(
40 * result = dividend / divisor 40 * result = dividend / divisor
41 * *remainder = dividend % divisor 41 * *remainder = dividend % divisor
42 */ 42 */
43static inline uint64_t complete_integer_division_u64( 43static inline unsigned long long complete_integer_division_u64(
44 uint64_t dividend, 44 unsigned long long dividend,
45 uint64_t divisor, 45 unsigned long long divisor,
46 uint64_t *remainder) 46 unsigned long long *remainder)
47{ 47{
48 uint64_t result; 48 unsigned long long result;
49 49
50 ASSERT(divisor); 50 ASSERT(divisor);
51 51
@@ -65,29 +65,29 @@ static inline uint64_t complete_integer_division_u64(
65 (FRACTIONAL_PART_MASK & (x)) 65 (FRACTIONAL_PART_MASK & (x))
66 66
67struct fixed31_32 dal_fixed31_32_from_fraction( 67struct fixed31_32 dal_fixed31_32_from_fraction(
68 int64_t numerator, 68 long long numerator,
69 int64_t denominator) 69 long long denominator)
70{ 70{
71 struct fixed31_32 res; 71 struct fixed31_32 res;
72 72
73 bool arg1_negative = numerator < 0; 73 bool arg1_negative = numerator < 0;
74 bool arg2_negative = denominator < 0; 74 bool arg2_negative = denominator < 0;
75 75
76 uint64_t arg1_value = arg1_negative ? -numerator : numerator; 76 unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
77 uint64_t arg2_value = arg2_negative ? -denominator : denominator; 77 unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
78 78
79 uint64_t remainder; 79 unsigned long long remainder;
80 80
81 /* determine integer part */ 81 /* determine integer part */
82 82
83 uint64_t res_value = complete_integer_division_u64( 83 unsigned long long res_value = complete_integer_division_u64(
84 arg1_value, arg2_value, &remainder); 84 arg1_value, arg2_value, &remainder);
85 85
86 ASSERT(res_value <= LONG_MAX); 86 ASSERT(res_value <= LONG_MAX);
87 87
88 /* determine fractional part */ 88 /* determine fractional part */
89 { 89 {
90 uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART; 90 unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
91 91
92 do { 92 do {
93 remainder <<= 1; 93 remainder <<= 1;
@@ -103,14 +103,14 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
103 103
104 /* round up LSB */ 104 /* round up LSB */
105 { 105 {
106 uint64_t summand = (remainder << 1) >= arg2_value; 106 unsigned long long summand = (remainder << 1) >= arg2_value;
107 107
108 ASSERT(res_value <= LLONG_MAX - summand); 108 ASSERT(res_value <= LLONG_MAX - summand);
109 109
110 res_value += summand; 110 res_value += summand;
111 } 111 }
112 112
113 res.value = (int64_t)res_value; 113 res.value = (long long)res_value;
114 114
115 if (arg1_negative ^ arg2_negative) 115 if (arg1_negative ^ arg2_negative)
116 res.value = -res.value; 116 res.value = -res.value;
@@ -119,7 +119,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
119} 119}
120 120
121struct fixed31_32 dal_fixed31_32_from_int_nonconst( 121struct fixed31_32 dal_fixed31_32_from_int_nonconst(
122 int64_t arg) 122 long long arg)
123{ 123{
124 struct fixed31_32 res; 124 struct fixed31_32 res;
125 125
@@ -132,7 +132,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
132 132
133struct fixed31_32 dal_fixed31_32_shl( 133struct fixed31_32 dal_fixed31_32_shl(
134 struct fixed31_32 arg, 134 struct fixed31_32 arg,
135 uint8_t shift) 135 unsigned char shift)
136{ 136{
137 struct fixed31_32 res; 137 struct fixed31_32 res;
138 138
@@ -181,16 +181,16 @@ struct fixed31_32 dal_fixed31_32_mul(
181 bool arg1_negative = arg1.value < 0; 181 bool arg1_negative = arg1.value < 0;
182 bool arg2_negative = arg2.value < 0; 182 bool arg2_negative = arg2.value < 0;
183 183
184 uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value; 184 unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
185 uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value; 185 unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
186 186
187 uint64_t arg1_int = GET_INTEGER_PART(arg1_value); 187 unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
188 uint64_t arg2_int = GET_INTEGER_PART(arg2_value); 188 unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
189 189
190 uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value); 190 unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
191 uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value); 191 unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
192 192
193 uint64_t tmp; 193 unsigned long long tmp;
194 194
195 res.value = arg1_int * arg2_int; 195 res.value = arg1_int * arg2_int;
196 196
@@ -200,22 +200,22 @@ struct fixed31_32 dal_fixed31_32_mul(
200 200
201 tmp = arg1_int * arg2_fra; 201 tmp = arg1_int * arg2_fra;
202 202
203 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 203 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
204 204
205 res.value += tmp; 205 res.value += tmp;
206 206
207 tmp = arg2_int * arg1_fra; 207 tmp = arg2_int * arg1_fra;
208 208
209 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 209 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
210 210
211 res.value += tmp; 211 res.value += tmp;
212 212
213 tmp = arg1_fra * arg2_fra; 213 tmp = arg1_fra * arg2_fra;
214 214
215 tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + 215 tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
216 (tmp >= (uint64_t)dal_fixed31_32_half.value); 216 (tmp >= (unsigned long long)dal_fixed31_32_half.value);
217 217
218 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 218 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
219 219
220 res.value += tmp; 220 res.value += tmp;
221 221
@@ -230,13 +230,13 @@ struct fixed31_32 dal_fixed31_32_sqr(
230{ 230{
231 struct fixed31_32 res; 231 struct fixed31_32 res;
232 232
233 uint64_t arg_value = abs_i64(arg.value); 233 unsigned long long arg_value = abs_i64(arg.value);
234 234
235 uint64_t arg_int = GET_INTEGER_PART(arg_value); 235 unsigned long long arg_int = GET_INTEGER_PART(arg_value);
236 236
237 uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value); 237 unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
238 238
239 uint64_t tmp; 239 unsigned long long tmp;
240 240
241 res.value = arg_int * arg_int; 241 res.value = arg_int * arg_int;
242 242
@@ -246,20 +246,20 @@ struct fixed31_32 dal_fixed31_32_sqr(
246 246
247 tmp = arg_int * arg_fra; 247 tmp = arg_int * arg_fra;
248 248
249 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 249 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
250 250
251 res.value += tmp; 251 res.value += tmp;
252 252
253 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 253 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
254 254
255 res.value += tmp; 255 res.value += tmp;
256 256
257 tmp = arg_fra * arg_fra; 257 tmp = arg_fra * arg_fra;
258 258
259 tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + 259 tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
260 (tmp >= (uint64_t)dal_fixed31_32_half.value); 260 (tmp >= (unsigned long long)dal_fixed31_32_half.value);
261 261
262 ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); 262 ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
263 263
264 res.value += tmp; 264 res.value += tmp;
265 265
@@ -288,7 +288,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
288 288
289 struct fixed31_32 res = dal_fixed31_32_one; 289 struct fixed31_32 res = dal_fixed31_32_one;
290 290
291 int32_t n = 27; 291 int n = 27;
292 292
293 struct fixed31_32 arg_norm = arg; 293 struct fixed31_32 arg_norm = arg;
294 294
@@ -299,7 +299,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
299 arg_norm, 299 arg_norm,
300 dal_fixed31_32_mul_int( 300 dal_fixed31_32_mul_int(
301 dal_fixed31_32_two_pi, 301 dal_fixed31_32_two_pi,
302 (int32_t)div64_s64( 302 (int)div64_s64(
303 arg_norm.value, 303 arg_norm.value,
304 dal_fixed31_32_two_pi.value))); 304 dal_fixed31_32_two_pi.value)));
305 } 305 }
@@ -343,7 +343,7 @@ struct fixed31_32 dal_fixed31_32_cos(
343 343
344 struct fixed31_32 res = dal_fixed31_32_one; 344 struct fixed31_32 res = dal_fixed31_32_one;
345 345
346 int32_t n = 26; 346 int n = 26;
347 347
348 do { 348 do {
349 res = dal_fixed31_32_sub( 349 res = dal_fixed31_32_sub(
@@ -370,7 +370,7 @@ struct fixed31_32 dal_fixed31_32_cos(
370static struct fixed31_32 fixed31_32_exp_from_taylor_series( 370static struct fixed31_32 fixed31_32_exp_from_taylor_series(
371 struct fixed31_32 arg) 371 struct fixed31_32 arg)
372{ 372{
373 uint32_t n = 9; 373 unsigned int n = 9;
374 374
375 struct fixed31_32 res = dal_fixed31_32_from_fraction( 375 struct fixed31_32 res = dal_fixed31_32_from_fraction(
376 n + 2, 376 n + 2,
@@ -409,7 +409,7 @@ struct fixed31_32 dal_fixed31_32_exp(
409 if (dal_fixed31_32_le( 409 if (dal_fixed31_32_le(
410 dal_fixed31_32_ln2_div_2, 410 dal_fixed31_32_ln2_div_2,
411 dal_fixed31_32_abs(arg))) { 411 dal_fixed31_32_abs(arg))) {
412 int32_t m = dal_fixed31_32_round( 412 int m = dal_fixed31_32_round(
413 dal_fixed31_32_div( 413 dal_fixed31_32_div(
414 arg, 414 arg,
415 dal_fixed31_32_ln2)); 415 dal_fixed31_32_ln2));
@@ -429,7 +429,7 @@ struct fixed31_32 dal_fixed31_32_exp(
429 if (m > 0) 429 if (m > 0)
430 return dal_fixed31_32_shl( 430 return dal_fixed31_32_shl(
431 fixed31_32_exp_from_taylor_series(r), 431 fixed31_32_exp_from_taylor_series(r),
432 (uint8_t)m); 432 (unsigned char)m);
433 else 433 else
434 return dal_fixed31_32_div_int( 434 return dal_fixed31_32_div_int(
435 fixed31_32_exp_from_taylor_series(r), 435 fixed31_32_exp_from_taylor_series(r),
@@ -482,50 +482,50 @@ struct fixed31_32 dal_fixed31_32_pow(
482 arg2)); 482 arg2));
483} 483}
484 484
485int32_t dal_fixed31_32_floor( 485int dal_fixed31_32_floor(
486 struct fixed31_32 arg) 486 struct fixed31_32 arg)
487{ 487{
488 uint64_t arg_value = abs_i64(arg.value); 488 unsigned long long arg_value = abs_i64(arg.value);
489 489
490 if (arg.value >= 0) 490 if (arg.value >= 0)
491 return (int32_t)GET_INTEGER_PART(arg_value); 491 return (int)GET_INTEGER_PART(arg_value);
492 else 492 else
493 return -(int32_t)GET_INTEGER_PART(arg_value); 493 return -(int)GET_INTEGER_PART(arg_value);
494} 494}
495 495
496int32_t dal_fixed31_32_round( 496int dal_fixed31_32_round(
497 struct fixed31_32 arg) 497 struct fixed31_32 arg)
498{ 498{
499 uint64_t arg_value = abs_i64(arg.value); 499 unsigned long long arg_value = abs_i64(arg.value);
500 500
501 const int64_t summand = dal_fixed31_32_half.value; 501 const long long summand = dal_fixed31_32_half.value;
502 502
503 ASSERT(LLONG_MAX - (int64_t)arg_value >= summand); 503 ASSERT(LLONG_MAX - (long long)arg_value >= summand);
504 504
505 arg_value += summand; 505 arg_value += summand;
506 506
507 if (arg.value >= 0) 507 if (arg.value >= 0)
508 return (int32_t)GET_INTEGER_PART(arg_value); 508 return (int)GET_INTEGER_PART(arg_value);
509 else 509 else
510 return -(int32_t)GET_INTEGER_PART(arg_value); 510 return -(int)GET_INTEGER_PART(arg_value);
511} 511}
512 512
513int32_t dal_fixed31_32_ceil( 513int dal_fixed31_32_ceil(
514 struct fixed31_32 arg) 514 struct fixed31_32 arg)
515{ 515{
516 uint64_t arg_value = abs_i64(arg.value); 516 unsigned long long arg_value = abs_i64(arg.value);
517 517
518 const int64_t summand = dal_fixed31_32_one.value - 518 const long long summand = dal_fixed31_32_one.value -
519 dal_fixed31_32_epsilon.value; 519 dal_fixed31_32_epsilon.value;
520 520
521 ASSERT(LLONG_MAX - (int64_t)arg_value >= summand); 521 ASSERT(LLONG_MAX - (long long)arg_value >= summand);
522 522
523 arg_value += summand; 523 arg_value += summand;
524 524
525 if (arg.value >= 0) 525 if (arg.value >= 0)
526 return (int32_t)GET_INTEGER_PART(arg_value); 526 return (int)GET_INTEGER_PART(arg_value);
527 else 527 else
528 return -(int32_t)GET_INTEGER_PART(arg_value); 528 return -(int)GET_INTEGER_PART(arg_value);
529} 529}
530 530
531/* this function is a generic helper to translate fixed point value to 531/* this function is a generic helper to translate fixed point value to
@@ -535,15 +535,15 @@ int32_t dal_fixed31_32_ceil(
535 * part in 32 bits. It is used in hw programming (scaler) 535 * part in 32 bits. It is used in hw programming (scaler)
536 */ 536 */
537 537
538static inline uint32_t ux_dy( 538static inline unsigned int ux_dy(
539 int64_t value, 539 long long value,
540 uint32_t integer_bits, 540 unsigned int integer_bits,
541 uint32_t fractional_bits) 541 unsigned int fractional_bits)
542{ 542{
543 /* 1. create mask of integer part */ 543 /* 1. create mask of integer part */
544 uint32_t result = (1 << integer_bits) - 1; 544 unsigned int result = (1 << integer_bits) - 1;
545 /* 2. mask out fractional part */ 545 /* 2. mask out fractional part */
546 uint32_t fractional_part = FRACTIONAL_PART_MASK & value; 546 unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
547 /* 3. shrink fixed point integer part to be of integer_bits width*/ 547 /* 3. shrink fixed point integer part to be of integer_bits width*/
548 result &= GET_INTEGER_PART(value); 548 result &= GET_INTEGER_PART(value);
549 /* 4. make space for fractional part to be filled in after integer */ 549 /* 4. make space for fractional part to be filled in after integer */
@@ -554,13 +554,13 @@ static inline uint32_t ux_dy(
554 return result | fractional_part; 554 return result | fractional_part;
555} 555}
556 556
557static inline uint32_t clamp_ux_dy( 557static inline unsigned int clamp_ux_dy(
558 int64_t value, 558 long long value,
559 uint32_t integer_bits, 559 unsigned int integer_bits,
560 uint32_t fractional_bits, 560 unsigned int fractional_bits,
561 uint32_t min_clamp) 561 unsigned int min_clamp)
562{ 562{
563 uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits); 563 unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
564 564
565 if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) 565 if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
566 return (1 << (integer_bits + fractional_bits)) - 1; 566 return (1 << (integer_bits + fractional_bits)) - 1;
@@ -570,35 +570,35 @@ static inline uint32_t clamp_ux_dy(
570 return min_clamp; 570 return min_clamp;
571} 571}
572 572
573uint32_t dal_fixed31_32_u2d19( 573unsigned int dal_fixed31_32_u2d19(
574 struct fixed31_32 arg) 574 struct fixed31_32 arg)
575{ 575{
576 return ux_dy(arg.value, 2, 19); 576 return ux_dy(arg.value, 2, 19);
577} 577}
578 578
579uint32_t dal_fixed31_32_u0d19( 579unsigned int dal_fixed31_32_u0d19(
580 struct fixed31_32 arg) 580 struct fixed31_32 arg)
581{ 581{
582 return ux_dy(arg.value, 0, 19); 582 return ux_dy(arg.value, 0, 19);
583} 583}
584 584
585uint32_t dal_fixed31_32_clamp_u0d14( 585unsigned int dal_fixed31_32_clamp_u0d14(
586 struct fixed31_32 arg) 586 struct fixed31_32 arg)
587{ 587{
588 return clamp_ux_dy(arg.value, 0, 14, 1); 588 return clamp_ux_dy(arg.value, 0, 14, 1);
589} 589}
590 590
591uint32_t dal_fixed31_32_clamp_u0d10( 591unsigned int dal_fixed31_32_clamp_u0d10(
592 struct fixed31_32 arg) 592 struct fixed31_32 arg)
593{ 593{
594 return clamp_ux_dy(arg.value, 0, 10, 1); 594 return clamp_ux_dy(arg.value, 0, 10, 1);
595} 595}
596 596
597int32_t dal_fixed31_32_s4d19( 597int dal_fixed31_32_s4d19(
598 struct fixed31_32 arg) 598 struct fixed31_32 arg)
599{ 599{
600 if (arg.value < 0) 600 if (arg.value < 0)
601 return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19); 601 return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
602 else 602 else
603 return ux_dy(arg.value, 4, 19); 603 return ux_dy(arg.value, 4, 19);
604} 604}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 985fe8c22875..10a5807a7e8b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
70 struct bios_parser *bp, 70 struct bios_parser *bp,
71 struct dc_firmware_info *info); 71 struct dc_firmware_info *info);
72 72
73static enum bp_result get_firmware_info_v3_2(
74 struct bios_parser *bp,
75 struct dc_firmware_info *info);
76
73static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, 77static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
74 struct atom_display_object_path_v2 *object); 78 struct atom_display_object_path_v2 *object);
75 79
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
1321 case 3: 1325 case 3:
1322 switch (revision.minor) { 1326 switch (revision.minor) {
1323 case 1: 1327 case 1:
1324 case 2:
1325 result = get_firmware_info_v3_1(bp, info); 1328 result = get_firmware_info_v3_1(bp, info);
1326 break; 1329 break;
1330 case 2:
1331 result = get_firmware_info_v3_2(bp, info);
1332 break;
1327 default: 1333 default:
1328 break; 1334 break;
1329 } 1335 }
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
1383 return BP_RESULT_OK; 1389 return BP_RESULT_OK;
1384} 1390}
1385 1391
1392static enum bp_result get_firmware_info_v3_2(
1393 struct bios_parser *bp,
1394 struct dc_firmware_info *info)
1395{
1396 struct atom_firmware_info_v3_2 *firmware_info;
1397 struct atom_display_controller_info_v4_1 *dce_info = NULL;
1398 struct atom_common_table_header *header;
1399 struct atom_data_revision revision;
1400 struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
1401 struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
1402
1403 if (!info)
1404 return BP_RESULT_BADINPUT;
1405
1406 firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
1407 DATA_TABLES(firmwareinfo));
1408
1409 dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
1410 DATA_TABLES(dce_info));
1411
1412 if (!firmware_info || !dce_info)
1413 return BP_RESULT_BADBIOSTABLE;
1414
1415 memset(info, 0, sizeof(*info));
1416
1417 header = GET_IMAGE(struct atom_common_table_header,
1418 DATA_TABLES(smu_info));
1419 get_atom_data_table_revision(header, &revision);
1420
1421 if (revision.minor == 2) {
1422 /* Vega12 */
1423 smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
1424 DATA_TABLES(smu_info));
1425
1426 if (!smu_info_v3_2)
1427 return BP_RESULT_BADBIOSTABLE;
1428
1429 info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
1430 } else if (revision.minor == 3) {
1431 /* Vega20 */
1432 smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
1433 DATA_TABLES(smu_info));
1434
1435 if (!smu_info_v3_3)
1436 return BP_RESULT_BADBIOSTABLE;
1437
1438 info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
1439 }
1440
1441 // We need to convert from 10KHz units into KHz units.
1442 info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
1443
1444 /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
1445 info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
1446 /* Hardcode frequency if BIOS gives no DCE Ref Clk */
1447 if (info->pll_info.crystal_frequency == 0) {
1448 if (revision.minor == 2)
1449 info->pll_info.crystal_frequency = 27000;
1450 else if (revision.minor == 3)
1451 info->pll_info.crystal_frequency = 100000;
1452 }
1453 /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
1454 info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
1455 info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
1456
1457 /* Get GPU PLL VCO Clock */
1458 if (bp->cmd_tbl.get_smu_clock_info != NULL) {
1459 if (revision.minor == 2)
1460 info->smu_gpu_pll_output_freq =
1461 bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
1462 else if (revision.minor == 3)
1463 info->smu_gpu_pll_output_freq =
1464 bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
1465 }
1466
1467 return BP_RESULT_OK;
1468}
1469
1386static enum bp_result bios_parser_get_encoder_cap_info( 1470static enum bp_result bios_parser_get_encoder_cap_info(
1387 struct dc_bios *dcb, 1471 struct dc_bios *dcb,
1388 struct graphics_object_id object_id, 1472 struct graphics_object_id object_id,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 4b5fdd577848..651e1fd4622f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26#include "dm_services.h" 26#include "dm_services.h"
27 27#include "amdgpu.h"
28#include "atom.h" 28#include "atom.h"
29 29
30#include "include/bios_parser_interface.h" 30#include "include/bios_parser_interface.h"
@@ -35,16 +35,16 @@
35#include "bios_parser_types_internal.h" 35#include "bios_parser_types_internal.h"
36 36
37#define EXEC_BIOS_CMD_TABLE(command, params)\ 37#define EXEC_BIOS_CMD_TABLE(command, params)\
38 (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \ 38 (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
39 GetIndexIntoMasterTable(COMMAND, command), \ 39 GetIndexIntoMasterTable(COMMAND, command), \
40 &params) == 0) 40 (uint32_t *)&params) == 0)
41 41
42#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ 42#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
43 cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \ 43 amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
44 GetIndexIntoMasterTable(COMMAND, command), &frev, &crev) 44 GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
45 45
46#define BIOS_CMD_TABLE_PARA_REVISION(command)\ 46#define BIOS_CMD_TABLE_PARA_REVISION(command)\
47 bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \ 47 bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
48 GetIndexIntoMasterTable(COMMAND, command)) 48 GetIndexIntoMasterTable(COMMAND, command))
49 49
50static void init_dig_encoder_control(struct bios_parser *bp); 50static void init_dig_encoder_control(struct bios_parser *bp);
@@ -82,16 +82,18 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
82 init_set_dce_clock(bp); 82 init_set_dce_clock(bp);
83} 83}
84 84
85static uint32_t bios_cmd_table_para_revision(void *cgs_device, 85static uint32_t bios_cmd_table_para_revision(void *dev,
86 uint32_t index) 86 uint32_t index)
87{ 87{
88 struct amdgpu_device *adev = dev;
88 uint8_t frev, crev; 89 uint8_t frev, crev;
89 90
90 if (cgs_atom_get_cmd_table_revs(cgs_device, 91 if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
91 index, 92 index,
92 &frev, &crev) != 0) 93 &frev, &crev))
94 return crev;
95 else
93 return 0; 96 return 0;
94 return crev;
95} 97}
96 98
97/******************************************************************************* 99/*******************************************************************************
@@ -368,7 +370,7 @@ static void init_transmitter_control(struct bios_parser *bp)
368 uint8_t crev; 370 uint8_t crev;
369 371
370 if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl, 372 if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
371 frev, crev) != 0) 373 frev, crev) == false)
372 BREAK_TO_DEBUGGER(); 374 BREAK_TO_DEBUGGER();
373 switch (crev) { 375 switch (crev) {
374 case 2: 376 case 2:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 3f63f712c8a4..752b08a42d3e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -26,14 +26,18 @@
26#include "dm_services.h" 26#include "dm_services.h"
27 27
28#include "ObjectID.h" 28#include "ObjectID.h"
29#include "atomfirmware.h"
30 29
30#include "atomfirmware.h"
31#include "atom.h"
31#include "include/bios_parser_interface.h" 32#include "include/bios_parser_interface.h"
32 33
33#include "command_table2.h" 34#include "command_table2.h"
34#include "command_table_helper2.h" 35#include "command_table_helper2.h"
35#include "bios_parser_helper.h" 36#include "bios_parser_helper.h"
36#include "bios_parser_types_internal2.h" 37#include "bios_parser_types_internal2.h"
38#include "amdgpu.h"
39
40
37#define DC_LOGGER \ 41#define DC_LOGGER \
38 bp->base.ctx->logger 42 bp->base.ctx->logger
39 43
@@ -43,16 +47,16 @@
43 ->FieldName)-(char *)0)/sizeof(uint16_t)) 47 ->FieldName)-(char *)0)/sizeof(uint16_t))
44 48
45#define EXEC_BIOS_CMD_TABLE(fname, params)\ 49#define EXEC_BIOS_CMD_TABLE(fname, params)\
46 (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \ 50 (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
47 GET_INDEX_INTO_MASTER_TABLE(command, fname), \ 51 GET_INDEX_INTO_MASTER_TABLE(command, fname), \
48 &params) == 0) 52 (uint32_t *)&params) == 0)
49 53
50#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ 54#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
51 cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \ 55 amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
52 GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev) 56 GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
53 57
54#define BIOS_CMD_TABLE_PARA_REVISION(fname)\ 58#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
55 bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \ 59 bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
56 GET_INDEX_INTO_MASTER_TABLE(command, fname)) 60 GET_INDEX_INTO_MASTER_TABLE(command, fname))
57 61
58static void init_dig_encoder_control(struct bios_parser *bp); 62static void init_dig_encoder_control(struct bios_parser *bp);
@@ -86,16 +90,18 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
86 init_get_smu_clock_info(bp); 90 init_get_smu_clock_info(bp);
87} 91}
88 92
89static uint32_t bios_cmd_table_para_revision(void *cgs_device, 93static uint32_t bios_cmd_table_para_revision(void *dev,
90 uint32_t index) 94 uint32_t index)
91{ 95{
96 struct amdgpu_device *adev = dev;
92 uint8_t frev, crev; 97 uint8_t frev, crev;
93 98
94 if (cgs_atom_get_cmd_table_revs(cgs_device, 99 if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
95 index, 100 index,
96 &frev, &crev) != 0) 101 &frev, &crev))
102 return crev;
103 else
97 return 0; 104 return 0;
98 return crev;
99} 105}
100 106
101/****************************************************************************** 107/******************************************************************************
@@ -201,7 +207,7 @@ static void init_transmitter_control(struct bios_parser *bp)
201 uint8_t frev; 207 uint8_t frev;
202 uint8_t crev; 208 uint8_t crev;
203 209
204 if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0) 210 if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
205 BREAK_TO_DEBUGGER(); 211 BREAK_TO_DEBUGGER();
206 switch (crev) { 212 switch (crev) {
207 case 6: 213 case 6:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 2979358c6a55..be066c49b984 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper(
51 return true; 51 return true;
52 52
53 case DCE_VERSION_11_2: 53 case DCE_VERSION_11_2:
54#if defined(CONFIG_DRM_AMD_DC_VEGAM)
55 case DCE_VERSION_11_22:
56#endif
54 *h = dal_cmd_tbl_helper_dce112_get_table(); 57 *h = dal_cmd_tbl_helper_dce112_get_table();
55 return true; 58 return true;
56 59
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9a4d30dd4969..9b9e06995805 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
52 return true; 52 return true;
53 53
54 case DCE_VERSION_11_2: 54 case DCE_VERSION_11_2:
55#if defined(CONFIG_DRM_AMD_DC_VEGAM)
56 case DCE_VERSION_11_22:
57#endif
55 *h = dal_cmd_tbl_helper_dce112_get_table2(); 58 *h = dal_cmd_tbl_helper_dce112_get_table2();
56 return true; 59 return true;
57#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 60#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
new file mode 100644
index 000000000000..fc3f98fb09ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -0,0 +1,579 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef _CALCS_CALCS_LOGGER_H_
27#define _CALCS_CALCS_LOGGER_H_
28#define DC_LOGGER \
29 logger
30
31static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
32{
33
34 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
35 DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip");
36 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
37 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version);
38 DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor);
39 DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker);
40 DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported);
41 DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support);
42 DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled);
43 DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d",
44 dceip->underlay_downscale_prefetch_enabled);
45 DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d",
46 dceip->graphics_lb_nodownscaling_multi_line_prefetching);
47 DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d",
48 dceip->limit_excessive_outstanding_dmif_requests);
49 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d",
50 dceip->cursor_max_outstanding_group_num);
51 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb);
52 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode);
53 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width);
54 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes);
55 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes);
56 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated);
57 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size);
58 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size);
59 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size);
60 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d",
61 dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
62 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d",
63 dceip->display_write_back420_luma_mcifwr_buffer_size);
64 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d",
65 dceip->display_write_back420_chroma_mcifwr_buffer_size);
66 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d",
67 dceip->scatter_gather_pte_request_rows_in_tiling_mode);
68 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d",
69 bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component));
70 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d",
71 bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component));
72 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d",
73 bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component));
74 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d",
75 bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component));
76 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d",
77 bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component));
78 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d",
79 bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component));
80 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d",
81 bw_fixed_to_int(dceip->alpha_vscaler_efficiency));
82 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d",
83 bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk));
84 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d",
85 bw_fixed_to_int(dceip->lb_size_per_component444));
86 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d",
87 bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor));
88 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d",
89 bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component));
90 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d",
91 bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component));
92 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d",
93 bw_fixed_to_int(dceip->underlay422_lb_size_per_component));
94 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width));
95 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d",
96 bw_fixed_to_int(dceip->cursor_dcp_buffer_lines));
97 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d",
98 bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling));
99 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d",
100 bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling));
101 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d",
102 bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display));
103 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d",
104 bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation));
105 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d",
106 bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit));
107 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d",
108 bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw));
109 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d",
110 bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice));
111 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency));
112 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request));
113 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d",
114 bw_fixed_to_int(dceip->dispclk_ramping_factor));
115 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d",
116 bw_fixed_to_int(dceip->display_pipe_throughput_factor));
117 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d",
118 bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time));
119 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d",
120 bw_fixed_to_int(dceip->dmif_request_buffer_size));
121
122
123}
124
125static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
126{
127
128 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
129 DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios");
130 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
131 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
132 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
133 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits);
134 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels);
135 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks);
136 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk));
137 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk));
138 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk));
139 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk));
140 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk));
141 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk));
142 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk));
143 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk));
144 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk));
145 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk));
146 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk));
147 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d",
148 bw_fixed_to_int(vbios->low_voltage_max_dispclk));
149 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d",
150 bw_fixed_to_int(vbios->mid_voltage_max_dispclk));
151 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d",
152 bw_fixed_to_int(vbios->high_voltage_max_dispclk));
153 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d",
154 bw_fixed_to_int(vbios->low_voltage_max_phyclk));
155 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d",
156 bw_fixed_to_int(vbios->mid_voltage_max_phyclk));
157 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d",
158 bw_fixed_to_int(vbios->high_voltage_max_phyclk));
159 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width));
160 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc));
161 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency));
162 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d",
163 bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency));
164 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d",
165 bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency));
166 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d",
167 bw_fixed_to_int(vbios->nbp_state_change_latency));
168 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d",
169 bw_fixed_to_int(vbios->mcifwrmc_urgent_latency));
170 DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable);
171 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d",
172 bw_fixed_to_int(vbios->down_spread_percentage));
173 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width);
174 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate);
175 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d",
176 vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel);
177 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration));
178 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d",
179 bw_fixed_to_int(vbios->maximum_blackout_recovery_time));
180
181
182}
183
184static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
185{
186
187 int i, j, k;
188
189 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
190 DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data");
191 DC_LOG_BANDWIDTH_CALCS("#####################################################################");
192 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays);
193 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type);
194 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d",
195 data->panning_and_bezel_adjustment);
196 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode);
197 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc);
198 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc);
199 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode);
200 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode);
201 DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable);
202 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode);
203 DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable);
204 DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable);
205 DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable);
206 DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable);
207 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level);
208 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level);
209 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces);
210 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels);
211 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay);
212 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels);
213 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode);
214 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode);
215 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk));
216 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency));
217 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d",
218 bw_fixed_to_int(data->src_width_after_surface_type));
219 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d",
220 bw_fixed_to_int(data->src_height_after_surface_type));
221 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d",
222 bw_fixed_to_int(data->hsr_after_surface_type));
223 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type));
224 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d",
225 bw_fixed_to_int(data->src_width_after_rotation));
226 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d",
227 bw_fixed_to_int(data->src_height_after_rotation));
228 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation));
229 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation));
230 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels));
231 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo));
232 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo));
233 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb));
234 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch));
235 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d",
236 bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling));
237 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d",
238 bw_fixed_to_int(data->num_lines_at_frame_start));
239 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time));
240 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d",
241 bw_fixed_to_int(data->min_mcifwr_size_in_time));
242 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d",
243 bw_fixed_to_int(data->total_requests_for_dmif_size));
244 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d",
245 bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting));
246 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d",
247 bw_fixed_to_int(data->useful_pte_per_pte_request));
248 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d",
249 bw_fixed_to_int(data->scatter_gather_pte_request_rows));
250 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d",
251 bw_fixed_to_int(data->scatter_gather_row_height));
252 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d",
253 bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank));
254 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d",
255 bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes));
256 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data));
257 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d",
258 bw_fixed_to_int(data->cursor_total_request_groups));
259 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d",
260 bw_fixed_to_int(data->scatter_gather_total_pte_requests));
261 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d",
262 bw_fixed_to_int(data->scatter_gather_total_pte_request_groups));
263 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels));
264 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d",
265 bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open));
266 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d",
267 bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open));
268 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d",
269 bw_fixed_to_int(data->bytes_per_page_close_open));
270 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d",
271 bw_fixed_to_int(data->mcifwr_total_page_close_open_time));
272 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d",
273 bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size));
274 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d",
275 bw_fixed_to_int(data->total_dmifmc_urgent_trips));
276 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d",
277 bw_fixed_to_int(data->total_dmifmc_urgent_latency));
278 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d",
279 bw_fixed_to_int(data->total_display_reads_required_data));
280 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d",
281 bw_fixed_to_int(data->total_display_reads_required_dram_access_data));
282 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d",
283 bw_fixed_to_int(data->total_display_writes_required_data));
284 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d",
285 bw_fixed_to_int(data->total_display_writes_required_dram_access_data));
286 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d",
287 bw_fixed_to_int(data->display_reads_required_data));
288 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d",
289 bw_fixed_to_int(data->display_reads_required_dram_access_data));
290 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d",
291 bw_fixed_to_int(data->dmif_total_page_close_open_time));
292 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d",
293 bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time));
294 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d",
295 bw_fixed_to_int(data->min_read_buffer_size_in_time));
296 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d",
297 bw_fixed_to_int(data->display_reads_time_for_data_transfer));
298 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d",
299 bw_fixed_to_int(data->display_writes_time_for_data_transfer));
300 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d",
301 bw_fixed_to_int(data->dmif_required_dram_bandwidth));
302 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d",
303 bw_fixed_to_int(data->mcifwr_required_dram_bandwidth));
304 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d",
305 bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open));
306 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d",
307 bw_fixed_to_int(data->required_mcifmcwr_urgent_latency));
308 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d",
309 bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second));
310 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth));
311 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk));
312 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk));
313 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk));
314 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor));
315 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency));
316 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor));
317 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d",
318 bw_fixed_to_int(data->display_pipe_pixel_throughput));
319 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d",
320 bw_fixed_to_int(data->total_dispclk_required_with_ramping));
321 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d",
322 bw_fixed_to_int(data->total_dispclk_required_without_ramping));
323 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d",
324 bw_fixed_to_int(data->total_read_request_bandwidth));
325 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d",
326 bw_fixed_to_int(data->total_write_request_bandwidth));
327 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d",
328 bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth));
329 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d",
330 bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth));
331 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d",
332 bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth));
333 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk));
334 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time));
335 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d",
336 bw_fixed_to_int(data->min_pixels_per_data_fifo_entry));
337 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep));
338 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time));
339 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time));
340 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d",
341 bw_fixed_to_int(data->line_source_pixels_transfer_time));
342 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d",
343 bw_fixed_to_int(data->dmifdram_access_efficiency));
344 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d",
345 bw_fixed_to_int(data->mcifwrdram_access_efficiency));
346 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d",
347 bw_fixed_to_int(data->total_average_bandwidth_no_compression));
348 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d",
349 bw_fixed_to_int(data->total_average_bandwidth));
350 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d",
351 bw_fixed_to_int(data->total_stutter_cycle_duration));
352 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time));
353 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh));
354 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency));
355 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d",
356 bw_fixed_to_int(data->worst_number_of_trips_to_memory));
357 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time));
358 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d",
359 bw_fixed_to_int(data->latency_for_non_dmif_clients));
360 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d",
361 bw_fixed_to_int(data->latency_for_non_mcifwr_clients));
362 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d",
363 bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk));
364 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d",
365 bw_fixed_to_int(data->nbp_state_dram_speed_change_margin));
366 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d",
367 bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency));
368 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d",
369 bw_fixed_to_int(data->dram_speed_change_margin));
370 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d",
371 bw_fixed_to_int(data->min_vblank_dram_speed_change_margin));
372 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d",
373 bw_fixed_to_int(data->min_stutter_refresh_duration));
374 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size);
375 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested);
376 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size);
377 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts);
378 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d",
379 bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported));
380 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d",
381 bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported));
382
383 for (i = 0; i < maximum_number_of_surfaces; i++) {
384 DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]);
385 DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]);
386 DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]);
387 DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]);
388 DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]);
389 DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]);
390 DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]);
391 DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d",
392 i, data->scatter_gather_enable_for_pipe[i]);
393 DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d",
394 i, data->interlace_mode[i]);
395 DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d",
396 i, data->display_pstate_change_enable[i]);
397 DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]);
398 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]);
399 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d",
400 i, data->max_chunks_non_fbc_mode[i]);
401 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]);
402 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]);
403 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]);
404 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d",
405 i, data->output_bppdp4_lane_hbr2[i]);
406 DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d",
407 i, data->output_bppdp4_lane_hbr3[i]);
408 DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]);
409 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d",
410 i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i]));
411 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d",
412 i, bw_fixed_to_int(data->displays_with_same_mode[i]));
413 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d",
414 i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i]));
415 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d",
416 i, bw_fixed_to_int(data->stutter_refresh_duration[i]));
417 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d",
418 i, bw_fixed_to_int(data->stutter_exit_watermark[i]));
419 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d",
420 i, bw_fixed_to_int(data->stutter_entry_watermark[i]));
421 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i]));
422 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i]));
423 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i]));
424 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i]));
425 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d",
426 i, bw_fixed_to_int(data->pitch_in_pixels[i]));
427 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d",
428 i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i]));
429 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i]));
430 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i]));
431 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i]));
432 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i]));
433 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i]));
434 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i]));
435 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d",
436 i, bw_fixed_to_int(data->rotation_angle[i]));
437 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d",
438 i, bw_fixed_to_int(data->compression_rate[i]));
439 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i]));
440 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i]));
441 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d",
442 i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i]));
443 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d",
444 i, bw_fixed_to_int(data->source_width_pixels[i]));
445 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d",
446 i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i]));
447 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d",
448 i, bw_fixed_to_int(data->display_bandwidth[i]));
449 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d",
450 i, bw_fixed_to_int(data->request_bandwidth[i]));
451 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d",
452 i, bw_fixed_to_int(data->bytes_per_request[i]));
453 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d",
454 i, bw_fixed_to_int(data->useful_bytes_per_request[i]));
455 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d",
456 i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i]));
457 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d",
458 i, bw_fixed_to_int(data->latency_hiding_lines[i]));
459 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d",
460 i, bw_fixed_to_int(data->lb_partitions[i]));
461 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d",
462 i, bw_fixed_to_int(data->lb_partitions_max[i]));
463 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d",
464 i, bw_fixed_to_int(data->dispclk_required_with_ramping[i]));
465 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d",
466 i, bw_fixed_to_int(data->dispclk_required_without_ramping[i]));
467 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d",
468 i, bw_fixed_to_int(data->data_buffer_size[i]));
469 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d",
470 i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i]));
471 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d",
472 i, bw_fixed_to_int(data->urgent_watermark[i]));
473 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d",
474 i, bw_fixed_to_int(data->nbp_state_change_watermark[i]));
475 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i]));
476 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d",
477 i, bw_fixed_to_int(data->stutter_cycle_duration[i]));
478 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d",
479 i, bw_fixed_to_int(data->average_bandwidth[i]));
480 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d",
481 i, bw_fixed_to_int(data->average_bandwidth_no_compression[i]));
482 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d",
483 i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i]));
484 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d",
485 i, bw_fixed_to_int(data->lb_size_per_component[i]));
486 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d",
487 i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i]));
488 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d",
489 i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i]));
490 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d",
491 i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i]));
492 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d",
493 i, bw_fixed_to_int(data->adjusted_data_buffer_size[i]));
494 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d",
495 i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i]));
496 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d",
497 i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i]));
498 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d",
499 i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i]));
500 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d",
501 i, bw_fixed_to_int(data->pte_request_per_chunk[i]));
502 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d",
503 i, bw_fixed_to_int(data->scatter_gather_page_width[i]));
504 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d",
505 i, bw_fixed_to_int(data->scatter_gather_page_height[i]));
506 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d",
507 i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i]));
508 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d",
509 i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i]));
510 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d",
511 i, bw_fixed_to_int(data->cursor_width_pixels[i]));
512 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d",
513 i, bw_fixed_to_int(data->minimum_latency_hiding[i]));
514 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d",
515 i, bw_fixed_to_int(data->maximum_latency_hiding[i]));
516 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d",
517 i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i]));
518 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d",
519 i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i]));
520 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d",
521 i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i]));
522 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d",
523 i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i]));
524 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d",
525 i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i]));
526 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d",
527 i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i]));
528 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i]));
529 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d",
530 i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i]));
531 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d",
532 i, bw_fixed_to_int(data->cursor_latency_hiding[i]));
533 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d",
534 i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i]));
535 }
536
537 for (i = 0; i < maximum_number_of_surfaces; i++) {
538 for (j = 0; j < 3; j++) {
539 for (k = 0; k < 8; k++) {
540
541 DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d",
542 i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k]));
543 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d",
544 i, j, k,
545 bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k]));
546 }
547 }
548 }
549
550 for (i = 0; i < 3; i++) {
551 for (j = 0; j < 8; j++) {
552
553 DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d",
554 i, j, data->num_displays_with_margin[i][j]);
555 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d",
556 i, j, bw_fixed_to_int(data->dmif_burst_time[i][j]));
557 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d",
558 i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j]));
559 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d",
560 i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j]));
561 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d",
562 i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j]));
563 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d",
564 i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j]));
565 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d",
566 i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j]));
567 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d",
568 i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j]));
569 }
570 }
571
572 for (i = 0; i < 6; i++) {
573 DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d",
574 i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i]));
575 }
576}
577;
578
579#endif /* _CALCS_CALCS_LOGGER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 0cbab81ab304..4ee3c26f7c13 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -28,6 +28,7 @@
28#include "dc.h" 28#include "dc.h"
29#include "core_types.h" 29#include "core_types.h"
30#include "dal_asic_id.h" 30#include "dal_asic_id.h"
31#include "calcs_logger.h"
31 32
32/* 33/*
33 * NOTE: 34 * NOTE:
@@ -52,11 +53,16 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
52 return BW_CALCS_VERSION_CARRIZO; 53 return BW_CALCS_VERSION_CARRIZO;
53 54
54 case FAMILY_VI: 55 case FAMILY_VI:
56 if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
57 return BW_CALCS_VERSION_POLARIS12;
55 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev)) 58 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
56 return BW_CALCS_VERSION_POLARIS10; 59 return BW_CALCS_VERSION_POLARIS10;
57 if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) || 60 if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
58 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
59 return BW_CALCS_VERSION_POLARIS11; 61 return BW_CALCS_VERSION_POLARIS11;
62#if defined(CONFIG_DRM_AMD_DC_VEGAM)
63 if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
64 return BW_CALCS_VERSION_VEGAM;
65#endif
60 return BW_CALCS_VERSION_INVALID; 66 return BW_CALCS_VERSION_INVALID;
61 67
62 case FAMILY_AI: 68 case FAMILY_AI:
@@ -2145,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2145 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ 2151 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
2146 break; 2152 break;
2147 case BW_CALCS_VERSION_POLARIS10: 2153 case BW_CALCS_VERSION_POLARIS10:
2154#if defined(CONFIG_DRM_AMD_DC_VEGAM)
2155 /* TODO: Treat VEGAM the same as P10 for now
2156 * Need to tune the para for VEGAM if needed */
2157 case BW_CALCS_VERSION_VEGAM:
2158#endif
2148 vbios.memory_type = bw_def_gddr5; 2159 vbios.memory_type = bw_def_gddr5;
2149 vbios.dram_channel_width_in_bits = 32; 2160 vbios.dram_channel_width_in_bits = 32;
2150 vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits; 2161 vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
@@ -2373,6 +2384,122 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2373 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2; 2384 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2374 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); 2385 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
2375 break; 2386 break;
2387 case BW_CALCS_VERSION_POLARIS12:
2388 vbios.memory_type = bw_def_gddr5;
2389 vbios.dram_channel_width_in_bits = 32;
2390 vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
2391 vbios.number_of_dram_banks = 8;
2392 vbios.high_yclk = bw_int_to_fixed(6000);
2393 vbios.mid_yclk = bw_int_to_fixed(3200);
2394 vbios.low_yclk = bw_int_to_fixed(1000);
2395 vbios.low_sclk = bw_int_to_fixed(678);
2396 vbios.mid1_sclk = bw_int_to_fixed(864);
2397 vbios.mid2_sclk = bw_int_to_fixed(900);
2398 vbios.mid3_sclk = bw_int_to_fixed(920);
2399 vbios.mid4_sclk = bw_int_to_fixed(940);
2400 vbios.mid5_sclk = bw_int_to_fixed(960);
2401 vbios.mid6_sclk = bw_int_to_fixed(980);
2402 vbios.high_sclk = bw_int_to_fixed(1049);
2403 vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
2404 vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
2405 vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
2406 vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
2407 vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
2408 vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
2409 vbios.data_return_bus_width = bw_int_to_fixed(32);
2410 vbios.trc = bw_int_to_fixed(48);
2411 if (vbios.number_of_dram_channels == 2) // 64-bit
2412 vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
2413 else
2414 vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
2415 vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
2416 vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
2417 vbios.nbp_state_change_latency = bw_int_to_fixed(250);
2418 vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
2419 vbios.scatter_gather_enable = false;
2420 vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
2421 vbios.cursor_width = 32;
2422 vbios.average_compression_rate = 4;
2423 vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
2424 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2425 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2426
2427 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2428 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2429 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2430 dceip.large_cursor = false;
2431 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2432 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
2433 dceip.cursor_max_outstanding_group_num = 1;
2434 dceip.lines_interleaved_into_lb = 2;
2435 dceip.chunk_width = 256;
2436 dceip.number_of_graphics_pipes = 5;
2437 dceip.number_of_underlay_pipes = 0;
2438 dceip.low_power_tiling_mode = 0;
2439 dceip.display_write_back_supported = true;
2440 dceip.argb_compression_support = true;
2441 dceip.underlay_vscaler_efficiency6_bit_per_component =
2442 bw_frc_to_fixed(35556, 10000);
2443 dceip.underlay_vscaler_efficiency8_bit_per_component =
2444 bw_frc_to_fixed(34286, 10000);
2445 dceip.underlay_vscaler_efficiency10_bit_per_component =
2446 bw_frc_to_fixed(32, 10);
2447 dceip.underlay_vscaler_efficiency12_bit_per_component =
2448 bw_int_to_fixed(3);
2449 dceip.graphics_vscaler_efficiency6_bit_per_component =
2450 bw_frc_to_fixed(35, 10);
2451 dceip.graphics_vscaler_efficiency8_bit_per_component =
2452 bw_frc_to_fixed(34286, 10000);
2453 dceip.graphics_vscaler_efficiency10_bit_per_component =
2454 bw_frc_to_fixed(32, 10);
2455 dceip.graphics_vscaler_efficiency12_bit_per_component =
2456 bw_int_to_fixed(3);
2457 dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
2458 dceip.max_dmif_buffer_allocated = 4;
2459 dceip.graphics_dmif_size = 12288;
2460 dceip.underlay_luma_dmif_size = 19456;
2461 dceip.underlay_chroma_dmif_size = 23552;
2462 dceip.pre_downscaler_enabled = true;
2463 dceip.underlay_downscale_prefetch_enabled = true;
2464 dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
2465 dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
2466 dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
2467 dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
2468 bw_int_to_fixed(1);
2469 dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
2470 82176);
2471 dceip.underlay420_chroma_lb_size_per_component =
2472 bw_int_to_fixed(164352);
2473 dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
2474 82176);
2475 dceip.cursor_chunk_width = bw_int_to_fixed(64);
2476 dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
2477 dceip.underlay_maximum_width_efficient_for_tiling =
2478 bw_int_to_fixed(1920);
2479 dceip.underlay_maximum_height_efficient_for_tiling =
2480 bw_int_to_fixed(1080);
2481 dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
2482 bw_frc_to_fixed(3, 10);
2483 dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
2484 bw_int_to_fixed(25);
2485 dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2486 2);
2487 dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
2488 bw_int_to_fixed(128);
2489 dceip.limit_excessive_outstanding_dmif_requests = true;
2490 dceip.linear_mode_line_request_alternation_slice =
2491 bw_int_to_fixed(64);
2492 dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
2493 32;
2494 dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
2495 dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
2496 dceip.request_efficiency = bw_frc_to_fixed(8, 10);
2497 dceip.dispclk_per_request = bw_int_to_fixed(2);
2498 dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
2499 dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
2500 dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
2501 dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
2502 break;
2376 case BW_CALCS_VERSION_STONEY: 2503 case BW_CALCS_VERSION_STONEY:
2377 vbios.memory_type = bw_def_gddr5; 2504 vbios.memory_type = bw_def_gddr5;
2378 vbios.dram_channel_width_in_bits = 64; 2505 vbios.dram_channel_width_in_bits = 64;
@@ -2815,6 +2942,19 @@ static void populate_initial_data(
2815 data->bytes_per_pixel[num_displays + 4] = 4; 2942 data->bytes_per_pixel[num_displays + 4] = 4;
2816 break; 2943 break;
2817 } 2944 }
2945 } else if (pipe[i].stream->dst.width != 0 &&
2946 pipe[i].stream->dst.height != 0 &&
2947 pipe[i].stream->src.width != 0 &&
2948 pipe[i].stream->src.height != 0) {
2949 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width);
2950 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
2951 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height);
2952 data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
2953 data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
2954 data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width);
2955 data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height);
2956 data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
2957 data->bytes_per_pixel[num_displays + 4] = 4;
2818 } else { 2958 } else {
2819 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable); 2959 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
2820 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; 2960 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
@@ -2873,6 +3013,11 @@ bool bw_calcs(struct dc_context *ctx,
2873 struct bw_fixed mid_yclk = vbios->mid_yclk; 3013 struct bw_fixed mid_yclk = vbios->mid_yclk;
2874 struct bw_fixed low_yclk = vbios->low_yclk; 3014 struct bw_fixed low_yclk = vbios->low_yclk;
2875 3015
3016 if (ctx->dc->debug.bandwidth_calcs_trace) {
3017 print_bw_calcs_dceip(ctx->logger, dceip);
3018 print_bw_calcs_vbios(ctx->logger, vbios);
3019 print_bw_calcs_data(ctx->logger, data);
3020 }
2876 calculate_bandwidth(dceip, vbios, data); 3021 calculate_bandwidth(dceip, vbios, data);
2877 3022
2878 yclk_lvl = data->y_clk_level; 3023 yclk_lvl = data->y_clk_level;
@@ -2968,7 +3113,33 @@ bool bw_calcs(struct dc_context *ctx,
2968 bw_fixed_to_int(bw_mul(data-> 3113 bw_fixed_to_int(bw_mul(data->
2969 stutter_exit_watermark[9], bw_int_to_fixed(1000))); 3114 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
2970 3115
2971 3116 calcs_output->stutter_entry_wm_ns[0].a_mark =
3117 bw_fixed_to_int(bw_mul(data->
3118 stutter_entry_watermark[4], bw_int_to_fixed(1000)));
3119 calcs_output->stutter_entry_wm_ns[1].a_mark =
3120 bw_fixed_to_int(bw_mul(data->
3121 stutter_entry_watermark[5], bw_int_to_fixed(1000)));
3122 calcs_output->stutter_entry_wm_ns[2].a_mark =
3123 bw_fixed_to_int(bw_mul(data->
3124 stutter_entry_watermark[6], bw_int_to_fixed(1000)));
3125 if (ctx->dc->caps.max_slave_planes) {
3126 calcs_output->stutter_entry_wm_ns[3].a_mark =
3127 bw_fixed_to_int(bw_mul(data->
3128 stutter_entry_watermark[0], bw_int_to_fixed(1000)));
3129 calcs_output->stutter_entry_wm_ns[4].a_mark =
3130 bw_fixed_to_int(bw_mul(data->
3131 stutter_entry_watermark[1], bw_int_to_fixed(1000)));
3132 } else {
3133 calcs_output->stutter_entry_wm_ns[3].a_mark =
3134 bw_fixed_to_int(bw_mul(data->
3135 stutter_entry_watermark[7], bw_int_to_fixed(1000)));
3136 calcs_output->stutter_entry_wm_ns[4].a_mark =
3137 bw_fixed_to_int(bw_mul(data->
3138 stutter_entry_watermark[8], bw_int_to_fixed(1000)));
3139 }
3140 calcs_output->stutter_entry_wm_ns[5].a_mark =
3141 bw_fixed_to_int(bw_mul(data->
3142 stutter_entry_watermark[9], bw_int_to_fixed(1000)));
2972 3143
2973 calcs_output->urgent_wm_ns[0].a_mark = 3144 calcs_output->urgent_wm_ns[0].a_mark =
2974 bw_fixed_to_int(bw_mul(data-> 3145 bw_fixed_to_int(bw_mul(data->
@@ -3063,7 +3234,33 @@ bool bw_calcs(struct dc_context *ctx,
3063 bw_fixed_to_int(bw_mul(data-> 3234 bw_fixed_to_int(bw_mul(data->
3064 stutter_exit_watermark[9], bw_int_to_fixed(1000))); 3235 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
3065 3236
3066 3237 calcs_output->stutter_entry_wm_ns[0].b_mark =
3238 bw_fixed_to_int(bw_mul(data->
3239 stutter_entry_watermark[4], bw_int_to_fixed(1000)));
3240 calcs_output->stutter_entry_wm_ns[1].b_mark =
3241 bw_fixed_to_int(bw_mul(data->
3242 stutter_entry_watermark[5], bw_int_to_fixed(1000)));
3243 calcs_output->stutter_entry_wm_ns[2].b_mark =
3244 bw_fixed_to_int(bw_mul(data->
3245 stutter_entry_watermark[6], bw_int_to_fixed(1000)));
3246 if (ctx->dc->caps.max_slave_planes) {
3247 calcs_output->stutter_entry_wm_ns[3].b_mark =
3248 bw_fixed_to_int(bw_mul(data->
3249 stutter_entry_watermark[0], bw_int_to_fixed(1000)));
3250 calcs_output->stutter_entry_wm_ns[4].b_mark =
3251 bw_fixed_to_int(bw_mul(data->
3252 stutter_entry_watermark[1], bw_int_to_fixed(1000)));
3253 } else {
3254 calcs_output->stutter_entry_wm_ns[3].b_mark =
3255 bw_fixed_to_int(bw_mul(data->
3256 stutter_entry_watermark[7], bw_int_to_fixed(1000)));
3257 calcs_output->stutter_entry_wm_ns[4].b_mark =
3258 bw_fixed_to_int(bw_mul(data->
3259 stutter_entry_watermark[8], bw_int_to_fixed(1000)));
3260 }
3261 calcs_output->stutter_entry_wm_ns[5].b_mark =
3262 bw_fixed_to_int(bw_mul(data->
3263 stutter_entry_watermark[9], bw_int_to_fixed(1000)));
3067 3264
3068 calcs_output->urgent_wm_ns[0].b_mark = 3265 calcs_output->urgent_wm_ns[0].b_mark =
3069 bw_fixed_to_int(bw_mul(data-> 3266 bw_fixed_to_int(bw_mul(data->
@@ -3156,6 +3353,34 @@ bool bw_calcs(struct dc_context *ctx,
3156 bw_fixed_to_int(bw_mul(data-> 3353 bw_fixed_to_int(bw_mul(data->
3157 stutter_exit_watermark[9], bw_int_to_fixed(1000))); 3354 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
3158 3355
3356 calcs_output->stutter_entry_wm_ns[0].c_mark =
3357 bw_fixed_to_int(bw_mul(data->
3358 stutter_entry_watermark[4], bw_int_to_fixed(1000)));
3359 calcs_output->stutter_entry_wm_ns[1].c_mark =
3360 bw_fixed_to_int(bw_mul(data->
3361 stutter_entry_watermark[5], bw_int_to_fixed(1000)));
3362 calcs_output->stutter_entry_wm_ns[2].c_mark =
3363 bw_fixed_to_int(bw_mul(data->
3364 stutter_entry_watermark[6], bw_int_to_fixed(1000)));
3365 if (ctx->dc->caps.max_slave_planes) {
3366 calcs_output->stutter_entry_wm_ns[3].c_mark =
3367 bw_fixed_to_int(bw_mul(data->
3368 stutter_entry_watermark[0], bw_int_to_fixed(1000)));
3369 calcs_output->stutter_entry_wm_ns[4].c_mark =
3370 bw_fixed_to_int(bw_mul(data->
3371 stutter_entry_watermark[1], bw_int_to_fixed(1000)));
3372 } else {
3373 calcs_output->stutter_entry_wm_ns[3].c_mark =
3374 bw_fixed_to_int(bw_mul(data->
3375 stutter_entry_watermark[7], bw_int_to_fixed(1000)));
3376 calcs_output->stutter_entry_wm_ns[4].c_mark =
3377 bw_fixed_to_int(bw_mul(data->
3378 stutter_entry_watermark[8], bw_int_to_fixed(1000)));
3379 }
3380 calcs_output->stutter_entry_wm_ns[5].c_mark =
3381 bw_fixed_to_int(bw_mul(data->
3382 stutter_entry_watermark[9], bw_int_to_fixed(1000)));
3383
3159 calcs_output->urgent_wm_ns[0].c_mark = 3384 calcs_output->urgent_wm_ns[0].c_mark =
3160 bw_fixed_to_int(bw_mul(data-> 3385 bw_fixed_to_int(bw_mul(data->
3161 urgent_watermark[4], bw_int_to_fixed(1000))); 3386 urgent_watermark[4], bw_int_to_fixed(1000)));
@@ -3260,6 +3485,33 @@ bool bw_calcs(struct dc_context *ctx,
3260 bw_fixed_to_int(bw_mul(data-> 3485 bw_fixed_to_int(bw_mul(data->
3261 stutter_exit_watermark[9], bw_int_to_fixed(1000))); 3486 stutter_exit_watermark[9], bw_int_to_fixed(1000)));
3262 3487
3488 calcs_output->stutter_entry_wm_ns[0].d_mark =
3489 bw_fixed_to_int(bw_mul(data->
3490 stutter_entry_watermark[4], bw_int_to_fixed(1000)));
3491 calcs_output->stutter_entry_wm_ns[1].d_mark =
3492 bw_fixed_to_int(bw_mul(data->
3493 stutter_entry_watermark[5], bw_int_to_fixed(1000)));
3494 calcs_output->stutter_entry_wm_ns[2].d_mark =
3495 bw_fixed_to_int(bw_mul(data->
3496 stutter_entry_watermark[6], bw_int_to_fixed(1000)));
3497 if (ctx->dc->caps.max_slave_planes) {
3498 calcs_output->stutter_entry_wm_ns[3].d_mark =
3499 bw_fixed_to_int(bw_mul(data->
3500 stutter_entry_watermark[0], bw_int_to_fixed(1000)));
3501 calcs_output->stutter_entry_wm_ns[4].d_mark =
3502 bw_fixed_to_int(bw_mul(data->
3503 stutter_entry_watermark[1], bw_int_to_fixed(1000)));
3504 } else {
3505 calcs_output->stutter_entry_wm_ns[3].d_mark =
3506 bw_fixed_to_int(bw_mul(data->
3507 stutter_entry_watermark[7], bw_int_to_fixed(1000)));
3508 calcs_output->stutter_entry_wm_ns[4].d_mark =
3509 bw_fixed_to_int(bw_mul(data->
3510 stutter_entry_watermark[8], bw_int_to_fixed(1000)));
3511 }
3512 calcs_output->stutter_entry_wm_ns[5].d_mark =
3513 bw_fixed_to_int(bw_mul(data->
3514 stutter_entry_watermark[9], bw_int_to_fixed(1000)));
3263 3515
3264 calcs_output->urgent_wm_ns[0].d_mark = 3516 calcs_output->urgent_wm_ns[0].d_mark =
3265 bw_fixed_to_int(bw_mul(data-> 3517 bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 4bb43a371292..a102c192328d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1459,39 +1459,39 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
1459void dcn_bw_sync_calcs_and_dml(struct dc *dc) 1459void dcn_bw_sync_calcs_and_dml(struct dc *dc)
1460{ 1460{
1461 kernel_fpu_begin(); 1461 kernel_fpu_begin();
1462 DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n" 1462 DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
1463 "sr_enter_plus_exit_time: %d ns\n" 1463 "sr_enter_plus_exit_time: %f ns\n"
1464 "urgent_latency: %d ns\n" 1464 "urgent_latency: %f ns\n"
1465 "write_back_latency: %d ns\n" 1465 "write_back_latency: %f ns\n"
1466 "percent_of_ideal_drambw_received_after_urg_latency: %d %\n" 1466 "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n"
1467 "max_request_size: %d bytes\n" 1467 "max_request_size: %d bytes\n"
1468 "dcfclkv_max0p9: %d kHz\n" 1468 "dcfclkv_max0p9: %f kHz\n"
1469 "dcfclkv_nom0p8: %d kHz\n" 1469 "dcfclkv_nom0p8: %f kHz\n"
1470 "dcfclkv_mid0p72: %d kHz\n" 1470 "dcfclkv_mid0p72: %f kHz\n"
1471 "dcfclkv_min0p65: %d kHz\n" 1471 "dcfclkv_min0p65: %f kHz\n"
1472 "max_dispclk_vmax0p9: %d kHz\n" 1472 "max_dispclk_vmax0p9: %f kHz\n"
1473 "max_dispclk_vnom0p8: %d kHz\n" 1473 "max_dispclk_vnom0p8: %f kHz\n"
1474 "max_dispclk_vmid0p72: %d kHz\n" 1474 "max_dispclk_vmid0p72: %f kHz\n"
1475 "max_dispclk_vmin0p65: %d kHz\n" 1475 "max_dispclk_vmin0p65: %f kHz\n"
1476 "max_dppclk_vmax0p9: %d kHz\n" 1476 "max_dppclk_vmax0p9: %f kHz\n"
1477 "max_dppclk_vnom0p8: %d kHz\n" 1477 "max_dppclk_vnom0p8: %f kHz\n"
1478 "max_dppclk_vmid0p72: %d kHz\n" 1478 "max_dppclk_vmid0p72: %f kHz\n"
1479 "max_dppclk_vmin0p65: %d kHz\n" 1479 "max_dppclk_vmin0p65: %f kHz\n"
1480 "socclk: %d kHz\n" 1480 "socclk: %f kHz\n"
1481 "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n" 1481 "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n"
1482 "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n" 1482 "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n"
1483 "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n" 1483 "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n"
1484 "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n" 1484 "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n"
1485 "phyclkv_max0p9: %d kHz\n" 1485 "phyclkv_max0p9: %f kHz\n"
1486 "phyclkv_nom0p8: %d kHz\n" 1486 "phyclkv_nom0p8: %f kHz\n"
1487 "phyclkv_mid0p72: %d kHz\n" 1487 "phyclkv_mid0p72: %f kHz\n"
1488 "phyclkv_min0p65: %d kHz\n" 1488 "phyclkv_min0p65: %f kHz\n"
1489 "downspreading: %d %\n" 1489 "downspreading: %f %%\n"
1490 "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n" 1490 "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
1491 "urgent_out_of_order_return_per_channel: %d Bytes\n" 1491 "urgent_out_of_order_return_per_channel: %d Bytes\n"
1492 "number_of_channels: %d\n" 1492 "number_of_channels: %d\n"
1493 "vmm_page_size: %d Bytes\n" 1493 "vmm_page_size: %d Bytes\n"
1494 "dram_clock_change_latency: %d ns\n" 1494 "dram_clock_change_latency: %f ns\n"
1495 "return_bus_width: %d Bytes\n", 1495 "return_bus_width: %d Bytes\n",
1496 dc->dcn_soc->sr_exit_time * 1000, 1496 dc->dcn_soc->sr_exit_time * 1000,
1497 dc->dcn_soc->sr_enter_plus_exit_time * 1000, 1497 dc->dcn_soc->sr_enter_plus_exit_time * 1000,
@@ -1527,11 +1527,11 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
1527 dc->dcn_soc->vmm_page_size, 1527 dc->dcn_soc->vmm_page_size,
1528 dc->dcn_soc->dram_clock_change_latency * 1000, 1528 dc->dcn_soc->dram_clock_change_latency * 1000,
1529 dc->dcn_soc->return_bus_width); 1529 dc->dcn_soc->return_bus_width);
1530 DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n" 1530 DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n"
1531 "det_buffer_size_in_kbyte: %d\n" 1531 "det_buffer_size_in_kbyte: %f\n"
1532 "dpp_output_buffer_pixels: %d\n" 1532 "dpp_output_buffer_pixels: %f\n"
1533 "opp_output_buffer_lines: %d\n" 1533 "opp_output_buffer_lines: %f\n"
1534 "pixel_chunk_size_in_kbyte: %d\n" 1534 "pixel_chunk_size_in_kbyte: %f\n"
1535 "pte_enable: %d\n" 1535 "pte_enable: %d\n"
1536 "pte_chunk_size: %d kbytes\n" 1536 "pte_chunk_size: %d kbytes\n"
1537 "meta_chunk_size: %d kbytes\n" 1537 "meta_chunk_size: %d kbytes\n"
@@ -1550,13 +1550,13 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
1550 "max_pscl_tolb_throughput: %d pixels/dppclk\n" 1550 "max_pscl_tolb_throughput: %d pixels/dppclk\n"
1551 "max_lb_tovscl_throughput: %d pixels/dppclk\n" 1551 "max_lb_tovscl_throughput: %d pixels/dppclk\n"
1552 "max_vscl_tohscl_throughput: %d pixels/dppclk\n" 1552 "max_vscl_tohscl_throughput: %d pixels/dppclk\n"
1553 "max_hscl_ratio: %d\n" 1553 "max_hscl_ratio: %f\n"
1554 "max_vscl_ratio: %d\n" 1554 "max_vscl_ratio: %f\n"
1555 "max_hscl_taps: %d\n" 1555 "max_hscl_taps: %d\n"
1556 "max_vscl_taps: %d\n" 1556 "max_vscl_taps: %d\n"
1557 "pte_buffer_size_in_requests: %d\n" 1557 "pte_buffer_size_in_requests: %d\n"
1558 "dispclk_ramping_margin: %d %\n" 1558 "dispclk_ramping_margin: %f %%\n"
1559 "under_scan_factor: %d %\n" 1559 "under_scan_factor: %f %%\n"
1560 "max_inter_dcn_tile_repeaters: %d\n" 1560 "max_inter_dcn_tile_repeaters: %d\n"
1561 "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n" 1561 "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
1562 "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n" 1562 "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 9cd3566def8d..644b2187507b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -936,95 +936,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
936 return true; 936 return true;
937} 937}
938 938
939/*
940 * TODO this whole function needs to go
941 *
942 * dc_surface_update is needlessly complex. See if we can just replace this
943 * with a dc_plane_state and follow the atomic model a bit more closely here.
944 */
945bool dc_commit_planes_to_stream(
946 struct dc *dc,
947 struct dc_plane_state **plane_states,
948 uint8_t new_plane_count,
949 struct dc_stream_state *dc_stream,
950 struct dc_state *state)
951{
952 /* no need to dynamically allocate this. it's pretty small */
953 struct dc_surface_update updates[MAX_SURFACES];
954 struct dc_flip_addrs *flip_addr;
955 struct dc_plane_info *plane_info;
956 struct dc_scaling_info *scaling_info;
957 int i;
958 struct dc_stream_update *stream_update =
959 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
960
961 if (!stream_update) {
962 BREAK_TO_DEBUGGER();
963 return false;
964 }
965
966 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
967 GFP_KERNEL);
968 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
969 GFP_KERNEL);
970 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
971 GFP_KERNEL);
972
973 if (!flip_addr || !plane_info || !scaling_info) {
974 kfree(flip_addr);
975 kfree(plane_info);
976 kfree(scaling_info);
977 kfree(stream_update);
978 return false;
979 }
980
981 memset(updates, 0, sizeof(updates));
982
983 stream_update->src = dc_stream->src;
984 stream_update->dst = dc_stream->dst;
985 stream_update->out_transfer_func = dc_stream->out_transfer_func;
986
987 for (i = 0; i < new_plane_count; i++) {
988 updates[i].surface = plane_states[i];
989 updates[i].gamma =
990 (struct dc_gamma *)plane_states[i]->gamma_correction;
991 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
992 flip_addr[i].address = plane_states[i]->address;
993 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
994 plane_info[i].color_space = plane_states[i]->color_space;
995 plane_info[i].input_tf = plane_states[i]->input_tf;
996 plane_info[i].format = plane_states[i]->format;
997 plane_info[i].plane_size = plane_states[i]->plane_size;
998 plane_info[i].rotation = plane_states[i]->rotation;
999 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
1000 plane_info[i].stereo_format = plane_states[i]->stereo_format;
1001 plane_info[i].tiling_info = plane_states[i]->tiling_info;
1002 plane_info[i].visible = plane_states[i]->visible;
1003 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
1004 plane_info[i].dcc = plane_states[i]->dcc;
1005 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
1006 scaling_info[i].src_rect = plane_states[i]->src_rect;
1007 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
1008 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
1009
1010 updates[i].flip_addr = &flip_addr[i];
1011 updates[i].plane_info = &plane_info[i];
1012 updates[i].scaling_info = &scaling_info[i];
1013 }
1014
1015 dc_commit_updates_for_stream(
1016 dc,
1017 updates,
1018 new_plane_count,
1019 dc_stream, stream_update, plane_states, state);
1020
1021 kfree(flip_addr);
1022 kfree(plane_info);
1023 kfree(scaling_info);
1024 kfree(stream_update);
1025 return true;
1026}
1027
1028struct dc_state *dc_create_state(void) 939struct dc_state *dc_create_state(void)
1029{ 940{
1030 struct dc_state *context = kzalloc(sizeof(struct dc_state), 941 struct dc_state *context = kzalloc(sizeof(struct dc_state),
@@ -1107,9 +1018,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1107 if (u->plane_info->color_space != u->surface->color_space) 1018 if (u->plane_info->color_space != u->surface->color_space)
1108 update_flags->bits.color_space_change = 1; 1019 update_flags->bits.color_space_change = 1;
1109 1020
1110 if (u->plane_info->input_tf != u->surface->input_tf)
1111 update_flags->bits.input_tf_change = 1;
1112
1113 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) 1021 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1114 update_flags->bits.horizontal_mirror_change = 1; 1022 update_flags->bits.horizontal_mirror_change = 1;
1115 1023
@@ -1243,12 +1151,20 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
1243 if (u->input_csc_color_matrix) 1151 if (u->input_csc_color_matrix)
1244 update_flags->bits.input_csc_change = 1; 1152 update_flags->bits.input_csc_change = 1;
1245 1153
1246 if (update_flags->bits.in_transfer_func_change 1154 if (u->coeff_reduction_factor)
1247 || update_flags->bits.input_csc_change) { 1155 update_flags->bits.coeff_reduction_change = 1;
1156
1157 if (update_flags->bits.in_transfer_func_change) {
1248 type = UPDATE_TYPE_MED; 1158 type = UPDATE_TYPE_MED;
1249 elevate_update_type(&overall_type, type); 1159 elevate_update_type(&overall_type, type);
1250 } 1160 }
1251 1161
1162 if (update_flags->bits.input_csc_change
1163 || update_flags->bits.coeff_reduction_change) {
1164 type = UPDATE_TYPE_FULL;
1165 elevate_update_type(&overall_type, type);
1166 }
1167
1252 return overall_type; 1168 return overall_type;
1253} 1169}
1254 1170
@@ -1297,7 +1213,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
1297 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 1213 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1298 if (type == UPDATE_TYPE_FULL) 1214 if (type == UPDATE_TYPE_FULL)
1299 for (i = 0; i < surface_count; i++) 1215 for (i = 0; i < surface_count; i++)
1300 updates[i].surface->update_flags.bits.full_update = 1; 1216 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1301 1217
1302 return type; 1218 return type;
1303} 1219}
@@ -1375,6 +1291,12 @@ static void commit_planes_for_stream(struct dc *dc,
1375 pipe_ctx->stream_res.abm->funcs->set_abm_level( 1291 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1376 pipe_ctx->stream_res.abm, stream->abm_level); 1292 pipe_ctx->stream_res.abm, stream->abm_level);
1377 } 1293 }
1294
1295 if (stream_update && stream_update->periodic_fn_vsync_delta &&
1296 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
1297 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
1298 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
1299 pipe_ctx->stream->periodic_fn_vsync_delta);
1378 } 1300 }
1379 } 1301 }
1380 1302
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 5a552cb3f8a7..267c76766dea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,8 +36,9 @@
36#include "hw_sequencer.h" 36#include "hw_sequencer.h"
37 37
38#include "resource.h" 38#include "resource.h"
39#define DC_LOGGER \ 39
40 logger 40#define DC_LOGGER_INIT(logger)
41
41 42
42#define SURFACE_TRACE(...) do {\ 43#define SURFACE_TRACE(...) do {\
43 if (dc->debug.surface_trace) \ 44 if (dc->debug.surface_trace) \
@@ -60,8 +61,7 @@ void pre_surface_trace(
60 int surface_count) 61 int surface_count)
61{ 62{
62 int i; 63 int i;
63 struct dc *core_dc = dc; 64 DC_LOGGER_INIT(dc->ctx->logger);
64 struct dal_logger *logger = core_dc->ctx->logger;
65 65
66 for (i = 0; i < surface_count; i++) { 66 for (i = 0; i < surface_count; i++) {
67 const struct dc_plane_state *plane_state = plane_states[i]; 67 const struct dc_plane_state *plane_state = plane_states[i];
@@ -72,8 +72,8 @@ void pre_surface_trace(
72 "plane_state->visible = %d;\n" 72 "plane_state->visible = %d;\n"
73 "plane_state->flip_immediate = %d;\n" 73 "plane_state->flip_immediate = %d;\n"
74 "plane_state->address.type = %d;\n" 74 "plane_state->address.type = %d;\n"
75 "plane_state->address.grph.addr.quad_part = 0x%X;\n" 75 "plane_state->address.grph.addr.quad_part = 0x%llX;\n"
76 "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n" 76 "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
77 "plane_state->scaling_quality.h_taps = %d;\n" 77 "plane_state->scaling_quality.h_taps = %d;\n"
78 "plane_state->scaling_quality.v_taps = %d;\n" 78 "plane_state->scaling_quality.v_taps = %d;\n"
79 "plane_state->scaling_quality.h_taps_c = %d;\n" 79 "plane_state->scaling_quality.h_taps_c = %d;\n"
@@ -155,7 +155,6 @@ void pre_surface_trace(
155 "plane_state->tiling_info.gfx8.pipe_config = %d;\n" 155 "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
156 "plane_state->tiling_info.gfx8.array_mode = %d;\n" 156 "plane_state->tiling_info.gfx8.array_mode = %d;\n"
157 "plane_state->color_space = %d;\n" 157 "plane_state->color_space = %d;\n"
158 "plane_state->input_tf = %d;\n"
159 "plane_state->dcc.enable = %d;\n" 158 "plane_state->dcc.enable = %d;\n"
160 "plane_state->format = %d;\n" 159 "plane_state->format = %d;\n"
161 "plane_state->rotation = %d;\n" 160 "plane_state->rotation = %d;\n"
@@ -163,7 +162,6 @@ void pre_surface_trace(
163 plane_state->tiling_info.gfx8.pipe_config, 162 plane_state->tiling_info.gfx8.pipe_config,
164 plane_state->tiling_info.gfx8.array_mode, 163 plane_state->tiling_info.gfx8.array_mode,
165 plane_state->color_space, 164 plane_state->color_space,
166 plane_state->input_tf,
167 plane_state->dcc.enable, 165 plane_state->dcc.enable,
168 plane_state->format, 166 plane_state->format,
169 plane_state->rotation, 167 plane_state->rotation,
@@ -183,8 +181,7 @@ void update_surface_trace(
183 int surface_count) 181 int surface_count)
184{ 182{
185 int i; 183 int i;
186 struct dc *core_dc = dc; 184 DC_LOGGER_INIT(dc->ctx->logger);
187 struct dal_logger *logger = core_dc->ctx->logger;
188 185
189 for (i = 0; i < surface_count; i++) { 186 for (i = 0; i < surface_count; i++) {
190 const struct dc_surface_update *update = &updates[i]; 187 const struct dc_surface_update *update = &updates[i];
@@ -192,8 +189,8 @@ void update_surface_trace(
192 SURFACE_TRACE("Update %d\n", i); 189 SURFACE_TRACE("Update %d\n", i);
193 if (update->flip_addr) { 190 if (update->flip_addr) {
194 SURFACE_TRACE("flip_addr->address.type = %d;\n" 191 SURFACE_TRACE("flip_addr->address.type = %d;\n"
195 "flip_addr->address.grph.addr.quad_part = 0x%X;\n" 192 "flip_addr->address.grph.addr.quad_part = 0x%llX;\n"
196 "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n" 193 "flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n"
197 "flip_addr->flip_immediate = %d;\n", 194 "flip_addr->flip_immediate = %d;\n",
198 update->flip_addr->address.type, 195 update->flip_addr->address.type,
199 update->flip_addr->address.grph.addr.quad_part, 196 update->flip_addr->address.grph.addr.quad_part,
@@ -204,16 +201,15 @@ void update_surface_trace(
204 if (update->plane_info) { 201 if (update->plane_info) {
205 SURFACE_TRACE( 202 SURFACE_TRACE(
206 "plane_info->color_space = %d;\n" 203 "plane_info->color_space = %d;\n"
207 "plane_info->input_tf = %d;\n"
208 "plane_info->format = %d;\n" 204 "plane_info->format = %d;\n"
209 "plane_info->plane_size.grph.surface_pitch = %d;\n" 205 "plane_info->plane_size.grph.surface_pitch = %d;\n"
210 "plane_info->plane_size.grph.surface_size.height = %d;\n" 206 "plane_info->plane_size.grph.surface_size.height = %d;\n"
211 "plane_info->plane_size.grph.surface_size.width = %d;\n" 207 "plane_info->plane_size.grph.surface_size.width = %d;\n"
212 "plane_info->plane_size.grph.surface_size.x = %d;\n" 208 "plane_info->plane_size.grph.surface_size.x = %d;\n"
213 "plane_info->plane_size.grph.surface_size.y = %d;\n" 209 "plane_info->plane_size.grph.surface_size.y = %d;\n"
214 "plane_info->rotation = %d;\n", 210 "plane_info->rotation = %d;\n"
211 "plane_info->stereo_format = %d;\n",
215 update->plane_info->color_space, 212 update->plane_info->color_space,
216 update->plane_info->input_tf,
217 update->plane_info->format, 213 update->plane_info->format,
218 update->plane_info->plane_size.grph.surface_pitch, 214 update->plane_info->plane_size.grph.surface_pitch,
219 update->plane_info->plane_size.grph.surface_size.height, 215 update->plane_info->plane_size.grph.surface_size.height,
@@ -303,8 +299,7 @@ void update_surface_trace(
303 299
304void post_surface_trace(struct dc *dc) 300void post_surface_trace(struct dc *dc)
305{ 301{
306 struct dc *core_dc = dc; 302 DC_LOGGER_INIT(dc->ctx->logger);
307 struct dal_logger *logger = core_dc->ctx->logger;
308 303
309 SURFACE_TRACE("post surface process.\n"); 304 SURFACE_TRACE("post surface process.\n");
310 305
@@ -316,10 +311,10 @@ void context_timing_trace(
316{ 311{
317 int i; 312 int i;
318 struct dc *core_dc = dc; 313 struct dc *core_dc = dc;
319 struct dal_logger *logger = core_dc->ctx->logger;
320 int h_pos[MAX_PIPES], v_pos[MAX_PIPES]; 314 int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
321 struct crtc_position position; 315 struct crtc_position position;
322 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; 316 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
317 DC_LOGGER_INIT(dc->ctx->logger);
323 318
324 319
325 for (i = 0; i < core_dc->res_pool->pipe_count; i++) { 320 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
@@ -354,9 +349,7 @@ void context_clock_trace(
354 struct dc_state *context) 349 struct dc_state *context)
355{ 350{
356#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 351#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
357 struct dc *core_dc = dc; 352 DC_LOGGER_INIT(dc->ctx->logger);
358 struct dal_logger *logger = core_dc->ctx->logger;
359
360 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 353 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
361 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", 354 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
362 context->bw.dcn.calc_clk.dispclk_khz, 355 context->bw.dcn.calc_clk.dispclk_khz,
@@ -371,6 +364,7 @@ void context_clock_trace(
371 context->bw.dcn.calc_clk.dppclk_khz, 364 context->bw.dcn.calc_clk.dppclk_khz,
372 context->bw.dcn.calc_clk.dcfclk_khz, 365 context->bw.dcn.calc_clk.dcfclk_khz,
373 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 366 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
374 context->bw.dcn.calc_clk.fclk_khz); 367 context->bw.dcn.calc_clk.fclk_khz,
368 context->bw.dcn.calc_clk.socclk_khz);
375#endif 369#endif
376} 370}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ebc96b720083..83d121510ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -208,6 +208,7 @@ void color_space_to_black_color(
208 case COLOR_SPACE_YCBCR709: 208 case COLOR_SPACE_YCBCR709:
209 case COLOR_SPACE_YCBCR601_LIMITED: 209 case COLOR_SPACE_YCBCR601_LIMITED:
210 case COLOR_SPACE_YCBCR709_LIMITED: 210 case COLOR_SPACE_YCBCR709_LIMITED:
211 case COLOR_SPACE_2020_YCBCR:
211 *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV]; 212 *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
212 break; 213 break;
213 214
@@ -216,7 +217,25 @@ void color_space_to_black_color(
216 black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED]; 217 black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
217 break; 218 break;
218 219
219 default: 220 /**
221 * Remove default and add case for all color space
222 * so when we forget to add new color space
223 * compiler will give a warning
224 */
225 case COLOR_SPACE_UNKNOWN:
226 case COLOR_SPACE_SRGB:
227 case COLOR_SPACE_XR_RGB:
228 case COLOR_SPACE_MSREF_SCRGB:
229 case COLOR_SPACE_XV_YCC_709:
230 case COLOR_SPACE_XV_YCC_601:
231 case COLOR_SPACE_2020_RGB_FULLRANGE:
232 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
233 case COLOR_SPACE_ADOBERGB:
234 case COLOR_SPACE_DCIP3:
235 case COLOR_SPACE_DISPLAYNATIVE:
236 case COLOR_SPACE_DOLBYVISION:
237 case COLOR_SPACE_APPCTRL:
238 case COLOR_SPACE_CUSTOMPOINTS:
220 /* fefault is sRGB black (full range). */ 239 /* fefault is sRGB black (full range). */
221 *black_color = 240 *black_color =
222 black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE]; 241 black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
@@ -230,6 +249,9 @@ bool hwss_wait_for_blank_complete(
230{ 249{
231 int counter; 250 int counter;
232 251
252 /* Not applicable if the pipe is not primary, save 300ms of boot time */
253 if (!tg->funcs->is_blanked)
254 return true;
233 for (counter = 0; counter < 100; counter++) { 255 for (counter = 0; counter < 100; counter++) {
234 if (tg->funcs->is_blanked(tg)) 256 if (tg->funcs->is_blanked(tg))
235 break; 257 break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 6d1c4981a185..b44cf52090a5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,8 +45,9 @@
45#include "dce/dce_11_0_d.h" 45#include "dce/dce_11_0_d.h"
46#include "dce/dce_11_0_enum.h" 46#include "dce/dce_11_0_enum.h"
47#include "dce/dce_11_0_sh_mask.h" 47#include "dce/dce_11_0_sh_mask.h"
48#define DC_LOGGER \ 48
49 dc_ctx->logger 49#define DC_LOGGER_INIT(logger)
50
50 51
51#define LINK_INFO(...) \ 52#define LINK_INFO(...) \
52 DC_LOG_HW_HOTPLUG( \ 53 DC_LOG_HW_HOTPLUG( \
@@ -561,7 +562,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
561 struct dc_context *dc_ctx = link->ctx; 562 struct dc_context *dc_ctx = link->ctx;
562 struct dc_sink *sink = NULL; 563 struct dc_sink *sink = NULL;
563 enum dc_connection_type new_connection_type = dc_connection_none; 564 enum dc_connection_type new_connection_type = dc_connection_none;
564 565 DC_LOGGER_INIT(link->ctx->logger);
565 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) 566 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
566 return false; 567 return false;
567 568
@@ -927,6 +928,7 @@ static bool construct(
927 struct integrated_info info = {{{ 0 }}}; 928 struct integrated_info info = {{{ 0 }}};
928 struct dc_bios *bios = init_params->dc->ctx->dc_bios; 929 struct dc_bios *bios = init_params->dc->ctx->dc_bios;
929 const struct dc_vbios_funcs *bp_funcs = bios->funcs; 930 const struct dc_vbios_funcs *bp_funcs = bios->funcs;
931 DC_LOGGER_INIT(dc_ctx->logger);
930 932
931 link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; 933 link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
932 link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; 934 link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1135,7 +1137,8 @@ static void dpcd_configure_panel_mode(
1135{ 1137{
1136 union dpcd_edp_config edp_config_set; 1138 union dpcd_edp_config edp_config_set;
1137 bool panel_mode_edp = false; 1139 bool panel_mode_edp = false;
1138 struct dc_context *dc_ctx = link->ctx; 1140 DC_LOGGER_INIT(link->ctx->logger);
1141
1139 memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); 1142 memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
1140 1143
1141 if (DP_PANEL_MODE_DEFAULT != panel_mode) { 1144 if (DP_PANEL_MODE_DEFAULT != panel_mode) {
@@ -1183,16 +1186,21 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
1183{ 1186{
1184 struct dc_stream_state *stream = pipe_ctx->stream; 1187 struct dc_stream_state *stream = pipe_ctx->stream;
1185 struct dc_link *link = stream->sink->link; 1188 struct dc_link *link = stream->sink->link;
1186 union down_spread_ctrl downspread; 1189 union down_spread_ctrl old_downspread;
1190 union down_spread_ctrl new_downspread;
1187 1191
1188 core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, 1192 core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
1189 &downspread.raw, sizeof(downspread)); 1193 &old_downspread.raw, sizeof(old_downspread));
1190 1194
1191 downspread.bits.IGNORE_MSA_TIMING_PARAM = 1195 new_downspread.raw = old_downspread.raw;
1196
1197 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
1192 (stream->ignore_msa_timing_param) ? 1 : 0; 1198 (stream->ignore_msa_timing_param) ? 1 : 0;
1193 1199
1194 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, 1200 if (new_downspread.raw != old_downspread.raw) {
1195 &downspread.raw, sizeof(downspread)); 1201 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
1202 &new_downspread.raw, sizeof(new_downspread));
1203 }
1196} 1204}
1197 1205
1198static enum dc_status enable_link_dp( 1206static enum dc_status enable_link_dp(
@@ -1843,9 +1851,22 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
1843 1851
1844static bool dp_active_dongle_validate_timing( 1852static bool dp_active_dongle_validate_timing(
1845 const struct dc_crtc_timing *timing, 1853 const struct dc_crtc_timing *timing,
1846 const struct dc_dongle_caps *dongle_caps) 1854 const struct dpcd_caps *dpcd_caps)
1847{ 1855{
1848 unsigned int required_pix_clk = timing->pix_clk_khz; 1856 unsigned int required_pix_clk = timing->pix_clk_khz;
1857 const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
1858
1859 switch (dpcd_caps->dongle_type) {
1860 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
1861 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
1862 case DISPLAY_DONGLE_DP_DVI_DONGLE:
1863 if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
1864 return true;
1865 else
1866 return false;
1867 default:
1868 break;
1869 }
1849 1870
1850 if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || 1871 if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
1851 dongle_caps->extendedCapValid == false) 1872 dongle_caps->extendedCapValid == false)
@@ -1911,7 +1932,7 @@ enum dc_status dc_link_validate_mode_timing(
1911 const struct dc_crtc_timing *timing) 1932 const struct dc_crtc_timing *timing)
1912{ 1933{
1913 uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; 1934 uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
1914 struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps; 1935 struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
1915 1936
1916 /* A hack to avoid failing any modes for EDID override feature on 1937 /* A hack to avoid failing any modes for EDID override feature on
1917 * topology change such as lower quality cable for DP or different dongle 1938 * topology change such as lower quality cable for DP or different dongle
@@ -1924,7 +1945,7 @@ enum dc_status dc_link_validate_mode_timing(
1924 return DC_EXCEED_DONGLE_CAP; 1945 return DC_EXCEED_DONGLE_CAP;
1925 1946
1926 /* Active Dongle*/ 1947 /* Active Dongle*/
1927 if (!dp_active_dongle_validate_timing(timing, dongle_caps)) 1948 if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
1928 return DC_EXCEED_DONGLE_CAP; 1949 return DC_EXCEED_DONGLE_CAP;
1929 1950
1930 switch (stream->signal) { 1951 switch (stream->signal) {
@@ -1950,10 +1971,10 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1950 struct dc *core_dc = link->ctx->dc; 1971 struct dc *core_dc = link->ctx->dc;
1951 struct abm *abm = core_dc->res_pool->abm; 1972 struct abm *abm = core_dc->res_pool->abm;
1952 struct dmcu *dmcu = core_dc->res_pool->dmcu; 1973 struct dmcu *dmcu = core_dc->res_pool->dmcu;
1953 struct dc_context *dc_ctx = link->ctx;
1954 unsigned int controller_id = 0; 1974 unsigned int controller_id = 0;
1955 bool use_smooth_brightness = true; 1975 bool use_smooth_brightness = true;
1956 int i; 1976 int i;
1977 DC_LOGGER_INIT(link->ctx->logger);
1957 1978
1958 if ((dmcu == NULL) || 1979 if ((dmcu == NULL) ||
1959 (abm == NULL) || 1980 (abm == NULL) ||
@@ -1961,7 +1982,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1961 return false; 1982 return false;
1962 1983
1963 if (stream) { 1984 if (stream) {
1964 if (stream->bl_pwm_level == 0) 1985 if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
1965 frame_ramp = 0; 1986 frame_ramp = 0;
1966 1987
1967 ((struct dc_stream_state *)stream)->bl_pwm_level = level; 1988 ((struct dc_stream_state *)stream)->bl_pwm_level = level;
@@ -2149,8 +2170,8 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2149 struct fixed31_32 avg_time_slots_per_mtp; 2170 struct fixed31_32 avg_time_slots_per_mtp;
2150 struct fixed31_32 pbn; 2171 struct fixed31_32 pbn;
2151 struct fixed31_32 pbn_per_slot; 2172 struct fixed31_32 pbn_per_slot;
2152 struct dc_context *dc_ctx = link->ctx;
2153 uint8_t i; 2173 uint8_t i;
2174 DC_LOGGER_INIT(link->ctx->logger);
2154 2175
2155 /* enable_link_dp_mst already check link->enabled_stream_count 2176 /* enable_link_dp_mst already check link->enabled_stream_count
2156 * and stream is in link->stream[]. This is called during set mode, 2177 * and stream is in link->stream[]. This is called during set mode,
@@ -2178,11 +2199,11 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2178 link->mst_stream_alloc_table.stream_count); 2199 link->mst_stream_alloc_table.stream_count);
2179 2200
2180 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 2201 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
2181 DC_LOG_MST("stream_enc[%d]: 0x%x " 2202 DC_LOG_MST("stream_enc[%d]: %p "
2182 "stream[%d].vcp_id: %d " 2203 "stream[%d].vcp_id: %d "
2183 "stream[%d].slot_count: %d\n", 2204 "stream[%d].slot_count: %d\n",
2184 i, 2205 i,
2185 link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 2206 (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
2186 i, 2207 i,
2187 link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 2208 link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
2188 i, 2209 i,
@@ -2229,7 +2250,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2229 struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0); 2250 struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
2230 uint8_t i; 2251 uint8_t i;
2231 bool mst_mode = (link->type == dc_connection_mst_branch); 2252 bool mst_mode = (link->type == dc_connection_mst_branch);
2232 struct dc_context *dc_ctx = link->ctx; 2253 DC_LOGGER_INIT(link->ctx->logger);
2233 2254
2234 /* deallocate_mst_payload is called before disable link. When mode or 2255 /* deallocate_mst_payload is called before disable link. When mode or
2235 * disable/enable monitor, new stream is created which is not in link 2256 * disable/enable monitor, new stream is created which is not in link
@@ -2268,11 +2289,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2268 link->mst_stream_alloc_table.stream_count); 2289 link->mst_stream_alloc_table.stream_count);
2269 2290
2270 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 2291 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
2271 DC_LOG_MST("stream_enc[%d]: 0x%x " 2292 DC_LOG_MST("stream_enc[%d]: %p "
2272 "stream[%d].vcp_id: %d " 2293 "stream[%d].vcp_id: %d "
2273 "stream[%d].slot_count: %d\n", 2294 "stream[%d].slot_count: %d\n",
2274 i, 2295 i,
2275 link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 2296 (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
2276 i, 2297 i,
2277 link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 2298 link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
2278 i, 2299 i,
@@ -2302,8 +2323,8 @@ void core_link_enable_stream(
2302 struct pipe_ctx *pipe_ctx) 2323 struct pipe_ctx *pipe_ctx)
2303{ 2324{
2304 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2325 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2305 struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
2306 enum dc_status status; 2326 enum dc_status status;
2327 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
2307 2328
2308 /* eDP lit up by bios already, no need to enable again. */ 2329 /* eDP lit up by bios already, no need to enable again. */
2309 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && 2330 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 49c2face1e7a..ae48d603ebd6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
629 return ret; 629 return ret;
630} 630}
631 631
632ssize_t dal_ddc_service_read_dpcd_data( 632enum ddc_result dal_ddc_service_read_dpcd_data(
633 struct ddc_service *ddc, 633 struct ddc_service *ddc,
634 bool i2c, 634 bool i2c,
635 enum i2c_mot_mode mot, 635 enum i2c_mot_mode mot,
636 uint32_t address, 636 uint32_t address,
637 uint8_t *data, 637 uint8_t *data,
638 uint32_t len) 638 uint32_t len,
639 uint32_t *read)
639{ 640{
640 struct aux_payload read_payload = { 641 struct aux_payload read_payload = {
641 .i2c_over_aux = i2c, 642 .i2c_over_aux = i2c,
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
652 .mot = mot 653 .mot = mot
653 }; 654 };
654 655
656 *read = 0;
657
655 if (len > DEFAULT_AUX_MAX_DATA_SIZE) { 658 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
656 BREAK_TO_DEBUGGER(); 659 BREAK_TO_DEBUGGER();
657 return DDC_RESULT_FAILED_INVALID_OPERATION; 660 return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
661 ddc->ctx->i2caux, 664 ddc->ctx->i2caux,
662 ddc->ddc_pin, 665 ddc->ddc_pin,
663 &command)) { 666 &command)) {
664 return (ssize_t)command.payloads->length; 667 *read = command.payloads->length;
668 return DDC_RESULT_SUCESSFULL;
665 } 669 }
666 670
667 return DDC_RESULT_FAILED_OPERATION; 671 return DDC_RESULT_FAILED_OPERATION;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 3b5053570229..7d609c71394b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1378,8 +1378,8 @@ static uint32_t bandwidth_in_kbps_from_timing(
1378{ 1378{
1379 uint32_t bits_per_channel = 0; 1379 uint32_t bits_per_channel = 0;
1380 uint32_t kbps; 1380 uint32_t kbps;
1381 switch (timing->display_color_depth) {
1382 1381
1382 switch (timing->display_color_depth) {
1383 case COLOR_DEPTH_666: 1383 case COLOR_DEPTH_666:
1384 bits_per_channel = 6; 1384 bits_per_channel = 6;
1385 break; 1385 break;
@@ -1401,14 +1401,20 @@ static uint32_t bandwidth_in_kbps_from_timing(
1401 default: 1401 default:
1402 break; 1402 break;
1403 } 1403 }
1404
1404 ASSERT(bits_per_channel != 0); 1405 ASSERT(bits_per_channel != 0);
1405 1406
1406 kbps = timing->pix_clk_khz; 1407 kbps = timing->pix_clk_khz;
1407 kbps *= bits_per_channel; 1408 kbps *= bits_per_channel;
1408 1409
1409 if (timing->flags.Y_ONLY != 1) 1410 if (timing->flags.Y_ONLY != 1) {
1410 /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ 1411 /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
1411 kbps *= 3; 1412 kbps *= 3;
1413 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
1414 kbps /= 2;
1415 else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
1416 kbps = kbps * 2 / 3;
1417 }
1412 1418
1413 return kbps; 1419 return kbps;
1414 1420
@@ -2278,6 +2284,8 @@ static bool retrieve_link_cap(struct dc_link *link)
2278 union edp_configuration_cap edp_config_cap; 2284 union edp_configuration_cap edp_config_cap;
2279 union dp_downstream_port_present ds_port = { 0 }; 2285 union dp_downstream_port_present ds_port = { 0 };
2280 enum dc_status status = DC_ERROR_UNEXPECTED; 2286 enum dc_status status = DC_ERROR_UNEXPECTED;
2287 uint32_t read_dpcd_retry_cnt = 3;
2288 int i;
2281 2289
2282 memset(dpcd_data, '\0', sizeof(dpcd_data)); 2290 memset(dpcd_data, '\0', sizeof(dpcd_data));
2283 memset(&down_strm_port_count, 2291 memset(&down_strm_port_count,
@@ -2285,11 +2293,15 @@ static bool retrieve_link_cap(struct dc_link *link)
2285 memset(&edp_config_cap, '\0', 2293 memset(&edp_config_cap, '\0',
2286 sizeof(union edp_configuration_cap)); 2294 sizeof(union edp_configuration_cap));
2287 2295
2288 status = core_link_read_dpcd( 2296 for (i = 0; i < read_dpcd_retry_cnt; i++) {
2289 link, 2297 status = core_link_read_dpcd(
2290 DP_DPCD_REV, 2298 link,
2291 dpcd_data, 2299 DP_DPCD_REV,
2292 sizeof(dpcd_data)); 2300 dpcd_data,
2301 sizeof(dpcd_data));
2302 if (status == DC_OK)
2303 break;
2304 }
2293 2305
2294 if (status != DC_OK) { 2306 if (status != DC_OK) {
2295 dm_error("%s: Read dpcd data failed.\n", __func__); 2307 dm_error("%s: Read dpcd data failed.\n", __func__);
@@ -2376,6 +2388,10 @@ bool detect_dp_sink_caps(struct dc_link *link)
2376void detect_edp_sink_caps(struct dc_link *link) 2388void detect_edp_sink_caps(struct dc_link *link)
2377{ 2389{
2378 retrieve_link_cap(link); 2390 retrieve_link_cap(link);
2391
2392 if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
2393 link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
2394
2379 link->verified_link_cap = link->reported_link_cap; 2395 link->verified_link_cap = link->reported_link_cap;
2380} 2396}
2381 2397
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ba3487e97361..9eb731fb5251 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -45,8 +45,9 @@
45#include "dcn10/dcn10_resource.h" 45#include "dcn10/dcn10_resource.h"
46#endif 46#endif
47#include "dce120/dce120_resource.h" 47#include "dce120/dce120_resource.h"
48#define DC_LOGGER \ 48
49 ctx->logger 49#define DC_LOGGER_INIT(logger)
50
50enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) 51enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
51{ 52{
52 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 53 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -78,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
78 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) { 79 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
79 dc_version = DCE_VERSION_11_2; 80 dc_version = DCE_VERSION_11_2;
80 } 81 }
82#if defined(CONFIG_DRM_AMD_DC_VEGAM)
83 if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
84 dc_version = DCE_VERSION_11_22;
85#endif
81 break; 86 break;
82 case FAMILY_AI: 87 case FAMILY_AI:
83 dc_version = DCE_VERSION_12_0; 88 dc_version = DCE_VERSION_12_0;
@@ -124,6 +129,9 @@ struct resource_pool *dc_create_resource_pool(
124 num_virtual_links, dc, asic_id); 129 num_virtual_links, dc, asic_id);
125 break; 130 break;
126 case DCE_VERSION_11_2: 131 case DCE_VERSION_11_2:
132#if defined(CONFIG_DRM_AMD_DC_VEGAM)
133 case DCE_VERSION_11_22:
134#endif
127 res_pool = dce112_create_resource_pool( 135 res_pool = dce112_create_resource_pool(
128 num_virtual_links, dc); 136 num_virtual_links, dc);
129 break; 137 break;
@@ -835,7 +843,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
835 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 843 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
836 struct view recout_skip = { 0 }; 844 struct view recout_skip = { 0 };
837 bool res = false; 845 bool res = false;
838 struct dc_context *ctx = pipe_ctx->stream->ctx; 846 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
839 /* Important: scaling ratio calculation requires pixel format, 847 /* Important: scaling ratio calculation requires pixel format,
840 * lb depth calculation requires recout and taps require scaling ratios. 848 * lb depth calculation requires recout and taps require scaling ratios.
841 * Inits require viewport, taps, ratios and recout of split pipe 849 * Inits require viewport, taps, ratios and recout of split pipe
@@ -843,6 +851,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
843 pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( 851 pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
844 pipe_ctx->plane_state->format); 852 pipe_ctx->plane_state->format);
845 853
854 if (pipe_ctx->stream->timing.flags.INTERLACE)
855 pipe_ctx->stream->dst.height *= 2;
856
846 calculate_scaling_ratios(pipe_ctx); 857 calculate_scaling_ratios(pipe_ctx);
847 858
848 calculate_viewport(pipe_ctx); 859 calculate_viewport(pipe_ctx);
@@ -863,6 +874,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
863 874
864 pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; 875 pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
865 pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; 876 pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
877 if (pipe_ctx->stream->timing.flags.INTERLACE)
878 pipe_ctx->plane_res.scl_data.v_active *= 2;
866 879
867 880
868 /* Taps calculations */ 881 /* Taps calculations */
@@ -908,6 +921,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
908 plane_state->dst_rect.x, 921 plane_state->dst_rect.x,
909 plane_state->dst_rect.y); 922 plane_state->dst_rect.y);
910 923
924 if (pipe_ctx->stream->timing.flags.INTERLACE)
925 pipe_ctx->stream->dst.height /= 2;
926
911 return res; 927 return res;
912} 928}
913 929
@@ -1294,6 +1310,19 @@ bool dc_add_all_planes_for_stream(
1294} 1310}
1295 1311
1296 1312
1313static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
1314 struct dc_stream_state *new_stream)
1315{
1316 if (cur_stream == NULL)
1317 return true;
1318
1319 if (memcmp(&cur_stream->hdr_static_metadata,
1320 &new_stream->hdr_static_metadata,
1321 sizeof(struct dc_info_packet)) != 0)
1322 return true;
1323
1324 return false;
1325}
1297 1326
1298static bool is_timing_changed(struct dc_stream_state *cur_stream, 1327static bool is_timing_changed(struct dc_stream_state *cur_stream,
1299 struct dc_stream_state *new_stream) 1328 struct dc_stream_state *new_stream)
@@ -1329,6 +1358,9 @@ static bool are_stream_backends_same(
1329 if (is_timing_changed(stream_a, stream_b)) 1358 if (is_timing_changed(stream_a, stream_b))
1330 return false; 1359 return false;
1331 1360
1361 if (is_hdr_static_meta_changed(stream_a, stream_b))
1362 return false;
1363
1332 return true; 1364 return true;
1333} 1365}
1334 1366
@@ -1599,18 +1631,6 @@ enum dc_status dc_remove_stream_from_ctx(
1599 return DC_OK; 1631 return DC_OK;
1600} 1632}
1601 1633
1602static void copy_pipe_ctx(
1603 const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
1604{
1605 struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
1606 struct dc_stream_state *stream = to_pipe_ctx->stream;
1607
1608 *to_pipe_ctx = *from_pipe_ctx;
1609 to_pipe_ctx->stream = stream;
1610 if (plane_state != NULL)
1611 to_pipe_ctx->plane_state = plane_state;
1612}
1613
1614static struct dc_stream_state *find_pll_sharable_stream( 1634static struct dc_stream_state *find_pll_sharable_stream(
1615 struct dc_stream_state *stream_needs_pll, 1635 struct dc_stream_state *stream_needs_pll,
1616 struct dc_state *context) 1636 struct dc_state *context)
@@ -1703,7 +1723,7 @@ enum dc_status resource_map_pool_resources(
1703 pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1723 pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
1704#endif 1724#endif
1705 1725
1706 if (pipe_idx < 0) 1726 if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
1707 return DC_NO_CONTROLLER_RESOURCE; 1727 return DC_NO_CONTROLLER_RESOURCE;
1708 1728
1709 pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; 1729 pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
@@ -1752,26 +1772,6 @@ enum dc_status resource_map_pool_resources(
1752 return DC_ERROR_UNEXPECTED; 1772 return DC_ERROR_UNEXPECTED;
1753} 1773}
1754 1774
1755/* first stream in the context is used to populate the rest */
1756void validate_guaranteed_copy_streams(
1757 struct dc_state *context,
1758 int max_streams)
1759{
1760 int i;
1761
1762 for (i = 1; i < max_streams; i++) {
1763 context->streams[i] = context->streams[0];
1764
1765 copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
1766 &context->res_ctx.pipe_ctx[i]);
1767 context->res_ctx.pipe_ctx[i].stream =
1768 context->res_ctx.pipe_ctx[0].stream;
1769
1770 dc_stream_retain(context->streams[i]);
1771 context->stream_count++;
1772 }
1773}
1774
1775void dc_resource_state_copy_construct_current( 1775void dc_resource_state_copy_construct_current(
1776 const struct dc *dc, 1776 const struct dc *dc,
1777 struct dc_state *dst_ctx) 1777 struct dc_state *dst_ctx)
@@ -1798,9 +1798,9 @@ enum dc_status dc_validate_global_state(
1798 return DC_ERROR_UNEXPECTED; 1798 return DC_ERROR_UNEXPECTED;
1799 1799
1800 if (dc->res_pool->funcs->validate_global) { 1800 if (dc->res_pool->funcs->validate_global) {
1801 result = dc->res_pool->funcs->validate_global(dc, new_ctx); 1801 result = dc->res_pool->funcs->validate_global(dc, new_ctx);
1802 if (result != DC_OK) 1802 if (result != DC_OK)
1803 return result; 1803 return result;
1804 } 1804 }
1805 1805
1806 for (i = 0; i < new_ctx->stream_count; i++) { 1806 for (i = 0; i < new_ctx->stream_count; i++) {
@@ -1843,7 +1843,7 @@ enum dc_status dc_validate_global_state(
1843} 1843}
1844 1844
1845static void patch_gamut_packet_checksum( 1845static void patch_gamut_packet_checksum(
1846 struct encoder_info_packet *gamut_packet) 1846 struct dc_info_packet *gamut_packet)
1847{ 1847{
1848 /* For gamut we recalc checksum */ 1848 /* For gamut we recalc checksum */
1849 if (gamut_packet->valid) { 1849 if (gamut_packet->valid) {
@@ -1862,12 +1862,11 @@ static void patch_gamut_packet_checksum(
1862} 1862}
1863 1863
1864static void set_avi_info_frame( 1864static void set_avi_info_frame(
1865 struct encoder_info_packet *info_packet, 1865 struct dc_info_packet *info_packet,
1866 struct pipe_ctx *pipe_ctx) 1866 struct pipe_ctx *pipe_ctx)
1867{ 1867{
1868 struct dc_stream_state *stream = pipe_ctx->stream; 1868 struct dc_stream_state *stream = pipe_ctx->stream;
1869 enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; 1869 enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
1870 struct info_frame info_frame = { {0} };
1871 uint32_t pixel_encoding = 0; 1870 uint32_t pixel_encoding = 0;
1872 enum scanning_type scan_type = SCANNING_TYPE_NODATA; 1871 enum scanning_type scan_type = SCANNING_TYPE_NODATA;
1873 enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA; 1872 enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
@@ -1877,22 +1876,24 @@ static void set_avi_info_frame(
1877 unsigned int cn0_cn1_value = 0; 1876 unsigned int cn0_cn1_value = 0;
1878 uint8_t *check_sum = NULL; 1877 uint8_t *check_sum = NULL;
1879 uint8_t byte_index = 0; 1878 uint8_t byte_index = 0;
1880 union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi; 1879 union hdmi_info_packet hdmi_info;
1881 union display_content_support support = {0}; 1880 union display_content_support support = {0};
1882 unsigned int vic = pipe_ctx->stream->timing.vic; 1881 unsigned int vic = pipe_ctx->stream->timing.vic;
1883 enum dc_timing_3d_format format; 1882 enum dc_timing_3d_format format;
1884 1883
1884 memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
1885
1885 color_space = pipe_ctx->stream->output_color_space; 1886 color_space = pipe_ctx->stream->output_color_space;
1886 if (color_space == COLOR_SPACE_UNKNOWN) 1887 if (color_space == COLOR_SPACE_UNKNOWN)
1887 color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? 1888 color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
1888 COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709; 1889 COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
1889 1890
1890 /* Initialize header */ 1891 /* Initialize header */
1891 hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI; 1892 hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
1892 /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall 1893 /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
1893 * not be used in HDMI 2.0 (Section 10.1) */ 1894 * not be used in HDMI 2.0 (Section 10.1) */
1894 hdmi_info->bits.header.version = 2; 1895 hdmi_info.bits.header.version = 2;
1895 hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE; 1896 hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
1896 1897
1897 /* 1898 /*
1898 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built 1899 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
@@ -1918,39 +1919,39 @@ static void set_avi_info_frame(
1918 1919
1919 /* Y0_Y1_Y2 : The pixel encoding */ 1920 /* Y0_Y1_Y2 : The pixel encoding */
1920 /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */ 1921 /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
1921 hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding; 1922 hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding;
1922 1923
1923 /* A0 = 1 Active Format Information valid */ 1924 /* A0 = 1 Active Format Information valid */
1924 hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID; 1925 hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID;
1925 1926
1926 /* B0, B1 = 3; Bar info data is valid */ 1927 /* B0, B1 = 3; Bar info data is valid */
1927 hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID; 1928 hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID;
1928 1929
1929 hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM; 1930 hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
1930 1931
1931 /* S0, S1 : Underscan / Overscan */ 1932 /* S0, S1 : Underscan / Overscan */
1932 /* TODO: un-hardcode scan type */ 1933 /* TODO: un-hardcode scan type */
1933 scan_type = SCANNING_TYPE_UNDERSCAN; 1934 scan_type = SCANNING_TYPE_UNDERSCAN;
1934 hdmi_info->bits.S0_S1 = scan_type; 1935 hdmi_info.bits.S0_S1 = scan_type;
1935 1936
1936 /* C0, C1 : Colorimetry */ 1937 /* C0, C1 : Colorimetry */
1937 if (color_space == COLOR_SPACE_YCBCR709 || 1938 if (color_space == COLOR_SPACE_YCBCR709 ||
1938 color_space == COLOR_SPACE_YCBCR709_LIMITED) 1939 color_space == COLOR_SPACE_YCBCR709_LIMITED)
1939 hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709; 1940 hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
1940 else if (color_space == COLOR_SPACE_YCBCR601 || 1941 else if (color_space == COLOR_SPACE_YCBCR601 ||
1941 color_space == COLOR_SPACE_YCBCR601_LIMITED) 1942 color_space == COLOR_SPACE_YCBCR601_LIMITED)
1942 hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601; 1943 hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
1943 else { 1944 else {
1944 hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA; 1945 hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
1945 } 1946 }
1946 if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE || 1947 if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
1947 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE || 1948 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
1948 color_space == COLOR_SPACE_2020_YCBCR) { 1949 color_space == COLOR_SPACE_2020_YCBCR) {
1949 hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; 1950 hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
1950 hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED; 1951 hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
1951 } else if (color_space == COLOR_SPACE_ADOBERGB) { 1952 } else if (color_space == COLOR_SPACE_ADOBERGB) {
1952 hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; 1953 hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
1953 hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED; 1954 hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
1954 } 1955 }
1955 1956
1956 /* TODO: un-hardcode aspect ratio */ 1957 /* TODO: un-hardcode aspect ratio */
@@ -1959,18 +1960,18 @@ static void set_avi_info_frame(
1959 switch (aspect) { 1960 switch (aspect) {
1960 case ASPECT_RATIO_4_3: 1961 case ASPECT_RATIO_4_3:
1961 case ASPECT_RATIO_16_9: 1962 case ASPECT_RATIO_16_9:
1962 hdmi_info->bits.M0_M1 = aspect; 1963 hdmi_info.bits.M0_M1 = aspect;
1963 break; 1964 break;
1964 1965
1965 case ASPECT_RATIO_NO_DATA: 1966 case ASPECT_RATIO_NO_DATA:
1966 case ASPECT_RATIO_64_27: 1967 case ASPECT_RATIO_64_27:
1967 case ASPECT_RATIO_256_135: 1968 case ASPECT_RATIO_256_135:
1968 default: 1969 default:
1969 hdmi_info->bits.M0_M1 = 0; 1970 hdmi_info.bits.M0_M1 = 0;
1970 } 1971 }
1971 1972
1972 /* Active Format Aspect ratio - same as Picture Aspect Ratio. */ 1973 /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
1973 hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE; 1974 hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
1974 1975
1975 /* TODO: un-hardcode cn0_cn1 and itc */ 1976 /* TODO: un-hardcode cn0_cn1 and itc */
1976 1977
@@ -2013,8 +2014,8 @@ static void set_avi_info_frame(
2013 } 2014 }
2014 } 2015 }
2015 } 2016 }
2016 hdmi_info->bits.CN0_CN1 = cn0_cn1_value; 2017 hdmi_info.bits.CN0_CN1 = cn0_cn1_value;
2017 hdmi_info->bits.ITC = itc_value; 2018 hdmi_info.bits.ITC = itc_value;
2018 } 2019 }
2019 2020
2020 /* TODO : We should handle YCC quantization */ 2021 /* TODO : We should handle YCC quantization */
@@ -2023,19 +2024,19 @@ static void set_avi_info_frame(
2023 stream->sink->edid_caps.qy_bit == 1) { 2024 stream->sink->edid_caps.qy_bit == 1) {
2024 if (color_space == COLOR_SPACE_SRGB || 2025 if (color_space == COLOR_SPACE_SRGB ||
2025 color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { 2026 color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
2026 hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; 2027 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
2027 hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; 2028 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
2028 } else if (color_space == COLOR_SPACE_SRGB_LIMITED || 2029 } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
2029 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { 2030 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
2030 hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; 2031 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
2031 hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2032 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
2032 } else { 2033 } else {
2033 hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2034 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
2034 hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2035 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
2035 } 2036 }
2036 } else { 2037 } else {
2037 hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2038 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
2038 hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2039 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
2039 } 2040 }
2040 2041
2041 ///VIC 2042 ///VIC
@@ -2060,51 +2061,49 @@ static void set_avi_info_frame(
2060 break; 2061 break;
2061 } 2062 }
2062 } 2063 }
2063 hdmi_info->bits.VIC0_VIC7 = vic; 2064 hdmi_info.bits.VIC0_VIC7 = vic;
2064 2065
2065 /* pixel repetition 2066 /* pixel repetition
2066 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel 2067 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
2067 * repetition start from 1 */ 2068 * repetition start from 1 */
2068 hdmi_info->bits.PR0_PR3 = 0; 2069 hdmi_info.bits.PR0_PR3 = 0;
2069 2070
2070 /* Bar Info 2071 /* Bar Info
2071 * barTop: Line Number of End of Top Bar. 2072 * barTop: Line Number of End of Top Bar.
2072 * barBottom: Line Number of Start of Bottom Bar. 2073 * barBottom: Line Number of Start of Bottom Bar.
2073 * barLeft: Pixel Number of End of Left Bar. 2074 * barLeft: Pixel Number of End of Left Bar.
2074 * barRight: Pixel Number of Start of Right Bar. */ 2075 * barRight: Pixel Number of Start of Right Bar. */
2075 hdmi_info->bits.bar_top = stream->timing.v_border_top; 2076 hdmi_info.bits.bar_top = stream->timing.v_border_top;
2076 hdmi_info->bits.bar_bottom = (stream->timing.v_total 2077 hdmi_info.bits.bar_bottom = (stream->timing.v_total
2077 - stream->timing.v_border_bottom + 1); 2078 - stream->timing.v_border_bottom + 1);
2078 hdmi_info->bits.bar_left = stream->timing.h_border_left; 2079 hdmi_info.bits.bar_left = stream->timing.h_border_left;
2079 hdmi_info->bits.bar_right = (stream->timing.h_total 2080 hdmi_info.bits.bar_right = (stream->timing.h_total
2080 - stream->timing.h_border_right + 1); 2081 - stream->timing.h_border_right + 1);
2081 2082
2082 /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */ 2083 /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
2083 check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0]; 2084 check_sum = &hdmi_info.packet_raw_data.sb[0];
2084 2085
2085 *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2; 2086 *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
2086 2087
2087 for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++) 2088 for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
2088 *check_sum += hdmi_info->packet_raw_data.sb[byte_index]; 2089 *check_sum += hdmi_info.packet_raw_data.sb[byte_index];
2089 2090
2090 /* one byte complement */ 2091 /* one byte complement */
2091 *check_sum = (uint8_t) (0x100 - *check_sum); 2092 *check_sum = (uint8_t) (0x100 - *check_sum);
2092 2093
2093 /* Store in hw_path_mode */ 2094 /* Store in hw_path_mode */
2094 info_packet->hb0 = hdmi_info->packet_raw_data.hb0; 2095 info_packet->hb0 = hdmi_info.packet_raw_data.hb0;
2095 info_packet->hb1 = hdmi_info->packet_raw_data.hb1; 2096 info_packet->hb1 = hdmi_info.packet_raw_data.hb1;
2096 info_packet->hb2 = hdmi_info->packet_raw_data.hb2; 2097 info_packet->hb2 = hdmi_info.packet_raw_data.hb2;
2097 2098
2098 for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet. 2099 for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++)
2099 info_packet_hdmi.packet_raw_data.sb); byte_index++) 2100 info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index];
2100 info_packet->sb[byte_index] = info_frame.avi_info_packet.
2101 info_packet_hdmi.packet_raw_data.sb[byte_index];
2102 2101
2103 info_packet->valid = true; 2102 info_packet->valid = true;
2104} 2103}
2105 2104
2106static void set_vendor_info_packet( 2105static void set_vendor_info_packet(
2107 struct encoder_info_packet *info_packet, 2106 struct dc_info_packet *info_packet,
2108 struct dc_stream_state *stream) 2107 struct dc_stream_state *stream)
2109{ 2108{
2110 uint32_t length = 0; 2109 uint32_t length = 0;
@@ -2217,7 +2216,7 @@ static void set_vendor_info_packet(
2217} 2216}
2218 2217
2219static void set_spd_info_packet( 2218static void set_spd_info_packet(
2220 struct encoder_info_packet *info_packet, 2219 struct dc_info_packet *info_packet,
2221 struct dc_stream_state *stream) 2220 struct dc_stream_state *stream)
2222{ 2221{
2223 /* SPD info packet for FreeSync */ 2222 /* SPD info packet for FreeSync */
@@ -2338,104 +2337,19 @@ static void set_spd_info_packet(
2338} 2337}
2339 2338
2340static void set_hdr_static_info_packet( 2339static void set_hdr_static_info_packet(
2341 struct encoder_info_packet *info_packet, 2340 struct dc_info_packet *info_packet,
2342 struct dc_stream_state *stream) 2341 struct dc_stream_state *stream)
2343{ 2342{
2344 uint16_t i = 0; 2343 /* HDR Static Metadata info packet for HDR10 */
2345 enum signal_type signal = stream->signal;
2346 uint32_t data;
2347 2344
2348 if (!stream->hdr_static_metadata.hdr_supported) 2345 if (!stream->hdr_static_metadata.valid)
2349 return; 2346 return;
2350 2347
2351 if (dc_is_hdmi_signal(signal)) { 2348 *info_packet = stream->hdr_static_metadata;
2352 info_packet->valid = true;
2353
2354 info_packet->hb0 = 0x87;
2355 info_packet->hb1 = 0x01;
2356 info_packet->hb2 = 0x1A;
2357 i = 1;
2358 } else if (dc_is_dp_signal(signal)) {
2359 info_packet->valid = true;
2360
2361 info_packet->hb0 = 0x00;
2362 info_packet->hb1 = 0x87;
2363 info_packet->hb2 = 0x1D;
2364 info_packet->hb3 = (0x13 << 2);
2365 i = 2;
2366 }
2367
2368 data = stream->hdr_static_metadata.is_hdr;
2369 info_packet->sb[i++] = data ? 0x02 : 0x00;
2370 info_packet->sb[i++] = 0x00;
2371
2372 data = stream->hdr_static_metadata.chromaticity_green_x / 2;
2373 info_packet->sb[i++] = data & 0xFF;
2374 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2375
2376 data = stream->hdr_static_metadata.chromaticity_green_y / 2;
2377 info_packet->sb[i++] = data & 0xFF;
2378 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2379
2380 data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
2381 info_packet->sb[i++] = data & 0xFF;
2382 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2383
2384 data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
2385 info_packet->sb[i++] = data & 0xFF;
2386 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2387
2388 data = stream->hdr_static_metadata.chromaticity_red_x / 2;
2389 info_packet->sb[i++] = data & 0xFF;
2390 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2391
2392 data = stream->hdr_static_metadata.chromaticity_red_y / 2;
2393 info_packet->sb[i++] = data & 0xFF;
2394 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2395
2396 data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
2397 info_packet->sb[i++] = data & 0xFF;
2398 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2399
2400 data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
2401 info_packet->sb[i++] = data & 0xFF;
2402 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2403
2404 data = stream->hdr_static_metadata.max_luminance;
2405 info_packet->sb[i++] = data & 0xFF;
2406 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2407
2408 data = stream->hdr_static_metadata.min_luminance;
2409 info_packet->sb[i++] = data & 0xFF;
2410 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2411
2412 data = stream->hdr_static_metadata.maximum_content_light_level;
2413 info_packet->sb[i++] = data & 0xFF;
2414 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2415
2416 data = stream->hdr_static_metadata.maximum_frame_average_light_level;
2417 info_packet->sb[i++] = data & 0xFF;
2418 info_packet->sb[i++] = (data & 0xFF00) >> 8;
2419
2420 if (dc_is_hdmi_signal(signal)) {
2421 uint32_t checksum = 0;
2422
2423 checksum += info_packet->hb0;
2424 checksum += info_packet->hb1;
2425 checksum += info_packet->hb2;
2426
2427 for (i = 1; i <= info_packet->hb2; i++)
2428 checksum += info_packet->sb[i];
2429
2430 info_packet->sb[0] = 0x100 - checksum;
2431 } else if (dc_is_dp_signal(signal)) {
2432 info_packet->sb[0] = 0x01;
2433 info_packet->sb[1] = 0x1A;
2434 }
2435} 2349}
2436 2350
2437static void set_vsc_info_packet( 2351static void set_vsc_info_packet(
2438 struct encoder_info_packet *info_packet, 2352 struct dc_info_packet *info_packet,
2439 struct dc_stream_state *stream) 2353 struct dc_stream_state *stream)
2440{ 2354{
2441 unsigned int vscPacketRevision = 0; 2355 unsigned int vscPacketRevision = 0;
@@ -2650,6 +2564,8 @@ bool pipe_need_reprogram(
2650 if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) 2564 if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
2651 return true; 2565 return true;
2652 2566
2567 if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
2568 return true;
2653 2569
2654 return false; 2570 return false;
2655} 2571}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ce0747ed0f00..3732a1de9d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -101,14 +101,16 @@ static void construct(struct dc_stream_state *stream,
101 stream->status.link = stream->sink->link; 101 stream->status.link = stream->sink->link;
102 102
103 update_stream_signal(stream); 103 update_stream_signal(stream);
104
105 stream->out_transfer_func = dc_create_transfer_func();
106 stream->out_transfer_func->type = TF_TYPE_BYPASS;
104} 107}
105 108
106static void destruct(struct dc_stream_state *stream) 109static void destruct(struct dc_stream_state *stream)
107{ 110{
108 dc_sink_release(stream->sink); 111 dc_sink_release(stream->sink);
109 if (stream->out_transfer_func != NULL) { 112 if (stream->out_transfer_func != NULL) {
110 dc_transfer_func_release( 113 dc_transfer_func_release(stream->out_transfer_func);
111 stream->out_transfer_func);
112 stream->out_transfer_func = NULL; 114 stream->out_transfer_func = NULL;
113 } 115 }
114} 116}
@@ -176,6 +178,7 @@ bool dc_stream_set_cursor_attributes(
176 int i; 178 int i;
177 struct dc *core_dc; 179 struct dc *core_dc;
178 struct resource_context *res_ctx; 180 struct resource_context *res_ctx;
181 struct pipe_ctx *pipe_to_program = NULL;
179 182
180 if (NULL == stream) { 183 if (NULL == stream) {
181 dm_error("DC: dc_stream is NULL!\n"); 184 dm_error("DC: dc_stream is NULL!\n");
@@ -203,9 +206,17 @@ bool dc_stream_set_cursor_attributes(
203 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) 206 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
204 continue; 207 continue;
205 208
209 if (!pipe_to_program) {
210 pipe_to_program = pipe_ctx;
211 core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
212 }
206 213
207 core_dc->hwss.set_cursor_attribute(pipe_ctx); 214 core_dc->hwss.set_cursor_attribute(pipe_ctx);
208 } 215 }
216
217 if (pipe_to_program)
218 core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
219
209 return true; 220 return true;
210} 221}
211 222
@@ -216,6 +227,7 @@ bool dc_stream_set_cursor_position(
216 int i; 227 int i;
217 struct dc *core_dc; 228 struct dc *core_dc;
218 struct resource_context *res_ctx; 229 struct resource_context *res_ctx;
230 struct pipe_ctx *pipe_to_program = NULL;
219 231
220 if (NULL == stream) { 232 if (NULL == stream) {
221 dm_error("DC: dc_stream is NULL!\n"); 233 dm_error("DC: dc_stream is NULL!\n");
@@ -241,9 +253,17 @@ bool dc_stream_set_cursor_position(
241 !pipe_ctx->plane_res.ipp) 253 !pipe_ctx->plane_res.ipp)
242 continue; 254 continue;
243 255
256 if (!pipe_to_program) {
257 pipe_to_program = pipe_ctx;
258 core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
259 }
260
244 core_dc->hwss.set_cursor_position(pipe_ctx); 261 core_dc->hwss.set_cursor_position(pipe_ctx);
245 } 262 }
246 263
264 if (pipe_to_program)
265 core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
266
247 return true; 267 return true;
248} 268}
249 269
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ade5b8ee9c3c..68a71adeb12e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -38,6 +38,12 @@
38static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) 38static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
39{ 39{
40 plane_state->ctx = ctx; 40 plane_state->ctx = ctx;
41
42 plane_state->gamma_correction = dc_create_gamma();
43 plane_state->gamma_correction->is_identity = true;
44
45 plane_state->in_transfer_func = dc_create_transfer_func();
46 plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
41} 47}
42 48
43static void destruct(struct dc_plane_state *plane_state) 49static void destruct(struct dc_plane_state *plane_state)
@@ -66,8 +72,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
66{ 72{
67 struct dc *core_dc = dc; 73 struct dc *core_dc = dc;
68 74
69 struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state), 75 struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
70 GFP_KERNEL); 76 GFP_KERNEL);
71 77
72 if (NULL == plane_state) 78 if (NULL == plane_state)
73 return NULL; 79 return NULL;
@@ -120,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref)
120{ 126{
121 struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); 127 struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
122 destruct(plane_state); 128 destruct(plane_state);
123 kfree(plane_state); 129 kvfree(plane_state);
124} 130}
125 131
126void dc_plane_state_release(struct dc_plane_state *plane_state) 132void dc_plane_state_release(struct dc_plane_state *plane_state)
@@ -136,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
136static void dc_gamma_free(struct kref *kref) 142static void dc_gamma_free(struct kref *kref)
137{ 143{
138 struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); 144 struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
139 kfree(gamma); 145 kvfree(gamma);
140} 146}
141 147
142void dc_gamma_release(struct dc_gamma **gamma) 148void dc_gamma_release(struct dc_gamma **gamma)
@@ -147,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
147 153
148struct dc_gamma *dc_create_gamma(void) 154struct dc_gamma *dc_create_gamma(void)
149{ 155{
150 struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL); 156 struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
151 157
152 if (gamma == NULL) 158 if (gamma == NULL)
153 goto alloc_fail; 159 goto alloc_fail;
@@ -167,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
167static void dc_transfer_func_free(struct kref *kref) 173static void dc_transfer_func_free(struct kref *kref)
168{ 174{
169 struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); 175 struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
170 kfree(tf); 176 kvfree(tf);
171} 177}
172 178
173void dc_transfer_func_release(struct dc_transfer_func *tf) 179void dc_transfer_func_release(struct dc_transfer_func *tf)
@@ -175,9 +181,9 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
175 kref_put(&tf->refcount, dc_transfer_func_free); 181 kref_put(&tf->refcount, dc_transfer_func_free);
176} 182}
177 183
178struct dc_transfer_func *dc_create_transfer_func(void) 184struct dc_transfer_func *dc_create_transfer_func()
179{ 185{
180 struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); 186 struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
181 187
182 if (tf == NULL) 188 if (tf == NULL)
183 goto alloc_fail; 189 goto alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index fa4b3c8b3bb7..cd4f4341cb53 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.38" 41#define DC_VER "3.1.44"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -202,6 +202,7 @@ struct dc_debug {
202 bool timing_trace; 202 bool timing_trace;
203 bool clock_trace; 203 bool clock_trace;
204 bool validation_trace; 204 bool validation_trace;
205 bool bandwidth_calcs_trace;
205 206
206 /* stutter efficiency related */ 207 /* stutter efficiency related */
207 bool disable_stutter; 208 bool disable_stutter;
@@ -332,20 +333,6 @@ enum {
332 TRANSFER_FUNC_POINTS = 1025 333 TRANSFER_FUNC_POINTS = 1025
333}; 334};
334 335
335// Moved here from color module for linux
336enum color_transfer_func {
337 transfer_func_unknown,
338 transfer_func_srgb,
339 transfer_func_bt709,
340 transfer_func_pq2084,
341 transfer_func_pq2084_interim,
342 transfer_func_linear_0_1,
343 transfer_func_linear_0_125,
344 transfer_func_dolbyvision,
345 transfer_func_gamma_22,
346 transfer_func_gamma_26
347};
348
349struct dc_hdr_static_metadata { 336struct dc_hdr_static_metadata {
350 /* display chromaticities and white point in units of 0.00001 */ 337 /* display chromaticities and white point in units of 0.00001 */
351 unsigned int chromaticity_green_x; 338 unsigned int chromaticity_green_x;
@@ -361,9 +348,6 @@ struct dc_hdr_static_metadata {
361 uint32_t max_luminance; 348 uint32_t max_luminance;
362 uint32_t maximum_content_light_level; 349 uint32_t maximum_content_light_level;
363 uint32_t maximum_frame_average_light_level; 350 uint32_t maximum_frame_average_light_level;
364
365 bool hdr_supported;
366 bool is_hdr;
367}; 351};
368 352
369enum dc_transfer_func_type { 353enum dc_transfer_func_type {
@@ -419,7 +403,6 @@ union surface_update_flags {
419 /* Medium updates */ 403 /* Medium updates */
420 uint32_t dcc_change:1; 404 uint32_t dcc_change:1;
421 uint32_t color_space_change:1; 405 uint32_t color_space_change:1;
422 uint32_t input_tf_change:1;
423 uint32_t horizontal_mirror_change:1; 406 uint32_t horizontal_mirror_change:1;
424 uint32_t per_pixel_alpha_change:1; 407 uint32_t per_pixel_alpha_change:1;
425 uint32_t rotation_change:1; 408 uint32_t rotation_change:1;
@@ -428,6 +411,7 @@ union surface_update_flags {
428 uint32_t position_change:1; 411 uint32_t position_change:1;
429 uint32_t in_transfer_func_change:1; 412 uint32_t in_transfer_func_change:1;
430 uint32_t input_csc_change:1; 413 uint32_t input_csc_change:1;
414 uint32_t coeff_reduction_change:1;
431 uint32_t output_tf_change:1; 415 uint32_t output_tf_change:1;
432 uint32_t pixel_format_change:1; 416 uint32_t pixel_format_change:1;
433 417
@@ -460,7 +444,7 @@ struct dc_plane_state {
460 struct dc_gamma *gamma_correction; 444 struct dc_gamma *gamma_correction;
461 struct dc_transfer_func *in_transfer_func; 445 struct dc_transfer_func *in_transfer_func;
462 struct dc_bias_and_scale *bias_and_scale; 446 struct dc_bias_and_scale *bias_and_scale;
463 struct csc_transform input_csc_color_matrix; 447 struct dc_csc_transform input_csc_color_matrix;
464 struct fixed31_32 coeff_reduction_factor; 448 struct fixed31_32 coeff_reduction_factor;
465 uint32_t sdr_white_level; 449 uint32_t sdr_white_level;
466 450
@@ -468,7 +452,6 @@ struct dc_plane_state {
468 struct dc_hdr_static_metadata hdr_static_ctx; 452 struct dc_hdr_static_metadata hdr_static_ctx;
469 453
470 enum dc_color_space color_space; 454 enum dc_color_space color_space;
471 enum color_transfer_func input_tf;
472 455
473 enum surface_pixel_format format; 456 enum surface_pixel_format format;
474 enum dc_rotation_angle rotation; 457 enum dc_rotation_angle rotation;
@@ -498,7 +481,6 @@ struct dc_plane_info {
498 enum dc_rotation_angle rotation; 481 enum dc_rotation_angle rotation;
499 enum plane_stereo_format stereo_format; 482 enum plane_stereo_format stereo_format;
500 enum dc_color_space color_space; 483 enum dc_color_space color_space;
501 enum color_transfer_func input_tf;
502 unsigned int sdr_white_level; 484 unsigned int sdr_white_level;
503 bool horizontal_mirror; 485 bool horizontal_mirror;
504 bool visible; 486 bool visible;
@@ -525,10 +507,9 @@ struct dc_surface_update {
525 * null means no updates 507 * null means no updates
526 */ 508 */
527 struct dc_gamma *gamma; 509 struct dc_gamma *gamma;
528 enum color_transfer_func color_input_tf;
529 struct dc_transfer_func *in_transfer_func; 510 struct dc_transfer_func *in_transfer_func;
530 511
531 struct csc_transform *input_csc_color_matrix; 512 struct dc_csc_transform *input_csc_color_matrix;
532 struct fixed31_32 *coeff_reduction_factor; 513 struct fixed31_32 *coeff_reduction_factor;
533}; 514};
534 515
@@ -699,6 +680,7 @@ struct dc_cursor {
699 struct dc_cursor_attributes attributes; 680 struct dc_cursor_attributes attributes;
700}; 681};
701 682
683
702/******************************************************************************* 684/*******************************************************************************
703 * Interrupt interfaces 685 * Interrupt interfaces
704 ******************************************************************************/ 686 ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 48e1fcf53d43..bd0fda0ceb91 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
117 return reg_val; 117 return reg_val;
118} 118}
119 119
120uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
121 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
122 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
123 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
124 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
125 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
126 uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
127{
128 uint32_t reg_val = dm_read_reg(ctx, addr);
129 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
130 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
131 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
132 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
133 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
134 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
135 return reg_val;
136}
137
138uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
139 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
140 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
141 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
142 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
143 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
144 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
145 uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
146{
147 uint32_t reg_val = dm_read_reg(ctx, addr);
148 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
149 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
150 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
151 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
152 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
153 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
154 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
155 return reg_val;
156}
157
158uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
159 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
160 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
161 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
162 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
163 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
164 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
165 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
166 uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
167{
168 uint32_t reg_val = dm_read_reg(ctx, addr);
169 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
170 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
171 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
172 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
173 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
174 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
175 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
176 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
177 return reg_val;
178}
120/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer 179/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
121 * compiler won't be able to check for size match and is prone to stack corruption type of bugs 180 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
122 181
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b83a7dc2f5a9..b1f70579d61b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -423,6 +423,11 @@ enum dc_gamma_type {
423 GAMMA_CS_TFM_1D = 3, 423 GAMMA_CS_TFM_1D = 3,
424}; 424};
425 425
426struct dc_csc_transform {
427 uint16_t matrix[12];
428 bool enable_adjustment;
429};
430
426struct dc_gamma { 431struct dc_gamma {
427 struct kref refcount; 432 struct kref refcount;
428 enum dc_gamma_type type; 433 enum dc_gamma_type type;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index dc34515ef01f..8a716baa1203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -51,6 +51,14 @@ struct link_mst_stream_allocation_table {
51 struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; 51 struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
52}; 52};
53 53
54struct time_stamp {
55 uint64_t edp_poweroff;
56 uint64_t edp_poweron;
57};
58
59struct link_trace {
60 struct time_stamp time_stamp;
61};
54/* 62/*
55 * A link contains one or more sinks and their connected status. 63 * A link contains one or more sinks and their connected status.
56 * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. 64 * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -114,6 +122,7 @@ struct dc_link {
114 122
115 struct dc_link_status link_status; 123 struct dc_link_status link_status;
116 124
125 struct link_trace link_trace;
117}; 126};
118 127
119const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); 128const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d017df56b2ba..d7e6d53bb383 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -58,18 +58,20 @@ struct dc_stream_state {
58 58
59 struct freesync_context freesync_ctx; 59 struct freesync_context freesync_ctx;
60 60
61 struct dc_hdr_static_metadata hdr_static_metadata; 61 struct dc_info_packet hdr_static_metadata;
62 struct dc_transfer_func *out_transfer_func; 62 struct dc_transfer_func *out_transfer_func;
63 struct colorspace_transform gamut_remap_matrix; 63 struct colorspace_transform gamut_remap_matrix;
64 struct csc_transform csc_color_matrix; 64 struct dc_csc_transform csc_color_matrix;
65 65
66 enum dc_color_space output_color_space; 66 enum dc_color_space output_color_space;
67 enum dc_dither_option dither_option; 67 enum dc_dither_option dither_option;
68 68
69 enum view_3d_format view_format; 69 enum view_3d_format view_format;
70 enum color_transfer_func output_tf;
71 70
72 bool ignore_msa_timing_param; 71 bool ignore_msa_timing_param;
72
73 unsigned long long periodic_fn_vsync_delta;
74
73 /* TODO: custom INFO packets */ 75 /* TODO: custom INFO packets */
74 /* TODO: ABM info (DMCU) */ 76 /* TODO: ABM info (DMCU) */
75 /* PSR info */ 77 /* PSR info */
@@ -110,9 +112,10 @@ struct dc_stream_update {
110 struct rect src; 112 struct rect src;
111 struct rect dst; 113 struct rect dst;
112 struct dc_transfer_func *out_transfer_func; 114 struct dc_transfer_func *out_transfer_func;
113 struct dc_hdr_static_metadata *hdr_static_metadata; 115 struct dc_info_packet *hdr_static_metadata;
114 enum color_transfer_func color_output_tf;
115 unsigned int *abm_level; 116 unsigned int *abm_level;
117
118 unsigned long long *periodic_fn_vsync_delta;
116}; 119};
117 120
118bool dc_is_stream_unchanged( 121bool dc_is_stream_unchanged(
@@ -131,13 +134,6 @@ bool dc_is_stream_scaling_unchanged(
131 * This does not trigger a flip. No surface address is programmed. 134 * This does not trigger a flip. No surface address is programmed.
132 */ 135 */
133 136
134bool dc_commit_planes_to_stream(
135 struct dc *dc,
136 struct dc_plane_state **plane_states,
137 uint8_t new_plane_count,
138 struct dc_stream_state *dc_stream,
139 struct dc_state *state);
140
141void dc_commit_updates_for_stream(struct dc *dc, 137void dc_commit_updates_for_stream(struct dc *dc,
142 struct dc_surface_update *srf_updates, 138 struct dc_surface_update *srf_updates,
143 int surface_count, 139 int surface_count,
@@ -209,14 +205,6 @@ bool dc_add_all_planes_for_stream(
209enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); 205enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
210 206
211/* 207/*
212 * This function takes a stream and checks if it is guaranteed to be supported.
213 * Guaranteed means that MAX_COFUNC similar streams are supported.
214 *
215 * After this call:
216 * No hardware is programmed for call. Only validation is done.
217 */
218
219/*
220 * Set up streams and links associated to drive sinks 208 * Set up streams and links associated to drive sinks
221 * The streams parameter is an absolute set of all active streams. 209 * The streams parameter is an absolute set of all active streams.
222 * 210 *
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9441305d3ab5..9defe3b17617 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -370,12 +370,6 @@ struct dc_csc_adjustments {
370 struct fixed31_32 hue; 370 struct fixed31_32 hue;
371}; 371};
372 372
373enum {
374 MAX_LANES = 2,
375 MAX_COFUNC_PATH = 6,
376 LAYER_INDEX_PRIMARY = -1,
377};
378
379enum dpcd_downstream_port_max_bpc { 373enum dpcd_downstream_port_max_bpc {
380 DOWN_STREAM_MAX_8BPC = 0, 374 DOWN_STREAM_MAX_8BPC = 0,
381 DOWN_STREAM_MAX_10BPC, 375 DOWN_STREAM_MAX_10BPC,
@@ -530,6 +524,15 @@ struct vrr_params {
530 uint32_t frame_counter; 524 uint32_t frame_counter;
531}; 525};
532 526
527struct dc_info_packet {
528 bool valid;
529 uint8_t hb0;
530 uint8_t hb1;
531 uint8_t hb2;
532 uint8_t hb3;
533 uint8_t sb[32];
534};
535
533#define DC_PLANE_UPDATE_TIMES_MAX 10 536#define DC_PLANE_UPDATE_TIMES_MAX 10
534 537
535struct dc_plane_flip_time { 538struct dc_plane_flip_time {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 6d5cdcdc8ec9..7f6d724686f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -33,8 +33,9 @@
33 33
34#define CTX \ 34#define CTX \
35 aud->base.ctx 35 aud->base.ctx
36#define DC_LOGGER \ 36
37 aud->base.ctx->logger 37#define DC_LOGGER_INIT()
38
38#define REG(reg)\ 39#define REG(reg)\
39 (aud->regs->reg) 40 (aud->regs->reg)
40 41
@@ -348,8 +349,8 @@ static void set_audio_latency(
348 349
349void dce_aud_az_enable(struct audio *audio) 350void dce_aud_az_enable(struct audio *audio)
350{ 351{
351 struct dce_audio *aud = DCE_AUD(audio);
352 uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 352 uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
353 DC_LOGGER_INIT();
353 354
354 set_reg_field_value(value, 1, 355 set_reg_field_value(value, 1,
355 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 356 AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
@@ -371,7 +372,7 @@ void dce_aud_az_enable(struct audio *audio)
371void dce_aud_az_disable(struct audio *audio) 372void dce_aud_az_disable(struct audio *audio)
372{ 373{
373 uint32_t value; 374 uint32_t value;
374 struct dce_audio *aud = DCE_AUD(audio); 375 DC_LOGGER_INIT();
375 376
376 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 377 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
377 set_reg_field_value(value, 1, 378 set_reg_field_value(value, 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 0aa2cda60890..223db98a568a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -41,8 +41,9 @@
41 41
42#define CTX \ 42#define CTX \
43 clk_src->base.ctx 43 clk_src->base.ctx
44#define DC_LOGGER \ 44
45 calc_pll_cs->ctx->logger 45#define DC_LOGGER_INIT()
46
46#undef FN 47#undef FN
47#define FN(reg_name, field_name) \ 48#define FN(reg_name, field_name) \
48 clk_src->cs_shift->field_name, clk_src->cs_mask->field_name 49 clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
@@ -467,7 +468,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
467{ 468{
468 uint32_t field = 0; 469 uint32_t field = 0;
469 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; 470 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
470 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll; 471 DC_LOGGER_INIT();
471 /* Check if reference clock is external (not pcie/xtalin) 472 /* Check if reference clock is external (not pcie/xtalin)
472 * HW Dce80 spec: 473 * HW Dce80 spec:
473 * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB 474 * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
@@ -557,8 +558,8 @@ static uint32_t dce110_get_pix_clk_dividers(
557 struct pll_settings *pll_settings) 558 struct pll_settings *pll_settings)
558{ 559{
559 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); 560 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
560 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
561 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; 561 uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
562 DC_LOGGER_INIT();
562 563
563 if (pix_clk_params == NULL || pll_settings == NULL 564 if (pix_clk_params == NULL || pll_settings == NULL
564 || pix_clk_params->requested_pix_clk == 0) { 565 || pix_clk_params->requested_pix_clk == 0) {
@@ -589,6 +590,9 @@ static uint32_t dce110_get_pix_clk_dividers(
589 pll_settings, pix_clk_params); 590 pll_settings, pix_clk_params);
590 break; 591 break;
591 case DCE_VERSION_11_2: 592 case DCE_VERSION_11_2:
593#if defined(CONFIG_DRM_AMD_DC_VEGAM)
594 case DCE_VERSION_11_22:
595#endif
592 case DCE_VERSION_12_0: 596 case DCE_VERSION_12_0:
593#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 597#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
594 case DCN_VERSION_1_0: 598 case DCN_VERSION_1_0:
@@ -978,6 +982,9 @@ static bool dce110_program_pix_clk(
978 982
979 break; 983 break;
980 case DCE_VERSION_11_2: 984 case DCE_VERSION_11_2:
985#if defined(CONFIG_DRM_AMD_DC_VEGAM)
986 case DCE_VERSION_11_22:
987#endif
981 case DCE_VERSION_12_0: 988 case DCE_VERSION_12_0:
982#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 989#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
983 case DCN_VERSION_1_0: 990 case DCN_VERSION_1_0:
@@ -1054,7 +1061,7 @@ static void get_ss_info_from_atombios(
1054 struct spread_spectrum_info *ss_info_cur; 1061 struct spread_spectrum_info *ss_info_cur;
1055 struct spread_spectrum_data *ss_data_cur; 1062 struct spread_spectrum_data *ss_data_cur;
1056 uint32_t i; 1063 uint32_t i;
1057 struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll; 1064 DC_LOGGER_INIT();
1058 if (ss_entries_num == NULL) { 1065 if (ss_entries_num == NULL) {
1059 DC_LOG_SYNC( 1066 DC_LOG_SYNC(
1060 "Invalid entry !!!\n"); 1067 "Invalid entry !!!\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 487724345d9d..0275d6d60da4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -53,7 +53,8 @@ void dce_pipe_control_lock(struct dc *dc,
53 struct dce_hwseq *hws = dc->hwseq; 53 struct dce_hwseq *hws = dc->hwseq;
54 54
55 /* Not lock pipe when blank */ 55 /* Not lock pipe when blank */
56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) 56 if (lock && pipe->stream_res.tg->funcs->is_blanked &&
57 pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
57 return; 58 return;
58 59
59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], 60 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8167cad7bcf7..dbe3b26b6d9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -113,6 +113,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
113 .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe, 113 .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
114 .enable_hpd = dce110_link_encoder_enable_hpd, 114 .enable_hpd = dce110_link_encoder_enable_hpd,
115 .disable_hpd = dce110_link_encoder_disable_hpd, 115 .disable_hpd = dce110_link_encoder_disable_hpd,
116 .is_dig_enabled = dce110_is_dig_enabled,
116 .destroy = dce110_link_encoder_destroy 117 .destroy = dce110_link_encoder_destroy
117}; 118};
118 119
@@ -535,8 +536,9 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
535 DP_SEC_GSP0_PRIORITY, 1); 536 DP_SEC_GSP0_PRIORITY, 1);
536} 537}
537 538
538static bool is_dig_enabled(const struct dce110_link_encoder *enc110) 539bool dce110_is_dig_enabled(struct link_encoder *enc)
539{ 540{
541 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
540 uint32_t value; 542 uint32_t value;
541 543
542 REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value); 544 REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
@@ -1031,7 +1033,7 @@ void dce110_link_encoder_disable_output(
1031 struct bp_transmitter_control cntl = { 0 }; 1033 struct bp_transmitter_control cntl = { 0 };
1032 enum bp_result result; 1034 enum bp_result result;
1033 1035
1034 if (!is_dig_enabled(enc110)) { 1036 if (!dce110_is_dig_enabled(enc)) {
1035 /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ 1037 /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
1036 return; 1038 return;
1037 } 1039 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 0ec3433d34b6..347069461a22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -263,4 +263,6 @@ void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
263void dce110_psr_program_secondary_packet(struct link_encoder *enc, 263void dce110_psr_program_secondary_packet(struct link_encoder *enc,
264 unsigned int sdp_transmit_line_num_deadline); 264 unsigned int sdp_transmit_line_num_deadline);
265 265
266bool dce110_is_dig_enabled(struct link_encoder *enc);
267
266#endif /* __DC_LINK_ENCODER__DCE110_H__ */ 268#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 0790f25c7b3b..b235a75355b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -174,6 +174,25 @@ static void program_urgency_watermark(
174 URGENCY_HIGH_WATERMARK, urgency_high_wm); 174 URGENCY_HIGH_WATERMARK, urgency_high_wm);
175} 175}
176 176
177static void dce120_program_urgency_watermark(
178 struct dce_mem_input *dce_mi,
179 uint32_t wm_select,
180 uint32_t urgency_low_wm,
181 uint32_t urgency_high_wm)
182{
183 REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
184 URGENCY_WATERMARK_MASK, wm_select);
185
186 REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
187 URGENCY_LOW_WATERMARK, urgency_low_wm,
188 URGENCY_HIGH_WATERMARK, urgency_high_wm);
189
190 REG_SET_2(DPG_PIPE_URGENT_LEVEL_CONTROL, 0,
191 URGENT_LEVEL_LOW_WATERMARK, urgency_low_wm,
192 URGENT_LEVEL_HIGH_WATERMARK, urgency_high_wm);
193
194}
195
177static void program_nbp_watermark( 196static void program_nbp_watermark(
178 struct dce_mem_input *dce_mi, 197 struct dce_mem_input *dce_mi,
179 uint32_t wm_select, 198 uint32_t wm_select,
@@ -206,6 +225,25 @@ static void program_nbp_watermark(
206 } 225 }
207} 226}
208 227
228static void dce120_program_stutter_watermark(
229 struct dce_mem_input *dce_mi,
230 uint32_t wm_select,
231 uint32_t stutter_mark,
232 uint32_t stutter_entry)
233{
234 REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
235 STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
236
237 if (REG(DPG_PIPE_STUTTER_CONTROL2))
238 REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL2,
239 STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
240 STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
241 else
242 REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
243 STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
244 STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
245}
246
209static void program_stutter_watermark( 247static void program_stutter_watermark(
210 struct dce_mem_input *dce_mi, 248 struct dce_mem_input *dce_mi,
211 uint32_t wm_select, 249 uint32_t wm_select,
@@ -225,7 +263,8 @@ static void program_stutter_watermark(
225static void dce_mi_program_display_marks( 263static void dce_mi_program_display_marks(
226 struct mem_input *mi, 264 struct mem_input *mi,
227 struct dce_watermarks nbp, 265 struct dce_watermarks nbp,
228 struct dce_watermarks stutter, 266 struct dce_watermarks stutter_exit,
267 struct dce_watermarks stutter_enter,
229 struct dce_watermarks urgent, 268 struct dce_watermarks urgent,
230 uint32_t total_dest_line_time_ns) 269 uint32_t total_dest_line_time_ns)
231{ 270{
@@ -243,13 +282,14 @@ static void dce_mi_program_display_marks(
243 program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ 282 program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
244 program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ 283 program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
245 284
246 program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */ 285 program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */
247 program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */ 286 program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */
248} 287}
249 288
250static void dce120_mi_program_display_marks(struct mem_input *mi, 289static void dce112_mi_program_display_marks(struct mem_input *mi,
251 struct dce_watermarks nbp, 290 struct dce_watermarks nbp,
252 struct dce_watermarks stutter, 291 struct dce_watermarks stutter_exit,
292 struct dce_watermarks stutter_entry,
253 struct dce_watermarks urgent, 293 struct dce_watermarks urgent,
254 uint32_t total_dest_line_time_ns) 294 uint32_t total_dest_line_time_ns)
255{ 295{
@@ -273,10 +313,43 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
273 program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ 313 program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
274 program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ 314 program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
275 315
276 program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */ 316 program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark); /* set a */
277 program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */ 317 program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark); /* set b */
278 program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */ 318 program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark); /* set c */
279 program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */ 319 program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark); /* set d */
320}
321
322static void dce120_mi_program_display_marks(struct mem_input *mi,
323 struct dce_watermarks nbp,
324 struct dce_watermarks stutter_exit,
325 struct dce_watermarks stutter_entry,
326 struct dce_watermarks urgent,
327 uint32_t total_dest_line_time_ns)
328{
329 struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
330 uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
331
332 dce120_program_urgency_watermark(dce_mi, 0, /* set a */
333 urgent.a_mark, total_dest_line_time_ns);
334 dce120_program_urgency_watermark(dce_mi, 1, /* set b */
335 urgent.b_mark, total_dest_line_time_ns);
336 dce120_program_urgency_watermark(dce_mi, 2, /* set c */
337 urgent.c_mark, total_dest_line_time_ns);
338 dce120_program_urgency_watermark(dce_mi, 3, /* set d */
339 urgent.d_mark, total_dest_line_time_ns);
340
341 REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
342 STUTTER_ENABLE, stutter_en,
343 STUTTER_IGNORE_FBC, 1);
344 program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
345 program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
346 program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
347 program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
348
349 dce120_program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */
350 dce120_program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */
351 dce120_program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */
352 dce120_program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */
280} 353}
281 354
282static void program_tiling( 355static void program_tiling(
@@ -696,5 +769,17 @@ void dce112_mem_input_construct(
696 const struct dce_mem_input_mask *mi_mask) 769 const struct dce_mem_input_mask *mi_mask)
697{ 770{
698 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); 771 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
772 dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
773}
774
775void dce120_mem_input_construct(
776 struct dce_mem_input *dce_mi,
777 struct dc_context *ctx,
778 int inst,
779 const struct dce_mem_input_registers *regs,
780 const struct dce_mem_input_shift *mi_shift,
781 const struct dce_mem_input_mask *mi_mask)
782{
783 dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
699 dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; 784 dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
700} 785}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index 05d39c0cbe87..d15b0d7f47fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -106,6 +106,7 @@ struct dce_mem_input_registers {
106 uint32_t DPG_PIPE_ARBITRATION_CONTROL1; 106 uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
107 uint32_t DPG_WATERMARK_MASK_CONTROL; 107 uint32_t DPG_WATERMARK_MASK_CONTROL;
108 uint32_t DPG_PIPE_URGENCY_CONTROL; 108 uint32_t DPG_PIPE_URGENCY_CONTROL;
109 uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL;
109 uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL; 110 uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
110 uint32_t DPG_PIPE_LOW_POWER_CONTROL; 111 uint32_t DPG_PIPE_LOW_POWER_CONTROL;
111 uint32_t DPG_PIPE_STUTTER_CONTROL; 112 uint32_t DPG_PIPE_STUTTER_CONTROL;
@@ -213,6 +214,11 @@ struct dce_mem_input_registers {
213 214
214#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\ 215#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
215 SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\ 216 SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
217 SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_ENTER_SELF_REFRESH_WATERMARK, mask_sh),\
218 SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_LOW_WATERMARK, mask_sh),\
219 SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_HIGH_WATERMARK, mask_sh),\
220 SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
221 SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
216 SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ 222 SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
217 SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\ 223 SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
218 SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ 224 SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
@@ -286,6 +292,8 @@ struct dce_mem_input_registers {
286 type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \ 292 type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
287 type URGENCY_LOW_WATERMARK; \ 293 type URGENCY_LOW_WATERMARK; \
288 type URGENCY_HIGH_WATERMARK; \ 294 type URGENCY_HIGH_WATERMARK; \
295 type URGENT_LEVEL_LOW_WATERMARK;\
296 type URGENT_LEVEL_HIGH_WATERMARK;\
289 type NB_PSTATE_CHANGE_ENABLE; \ 297 type NB_PSTATE_CHANGE_ENABLE; \
290 type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \ 298 type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
291 type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \ 299 type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
@@ -297,6 +305,7 @@ struct dce_mem_input_registers {
297 type STUTTER_ENABLE; \ 305 type STUTTER_ENABLE; \
298 type STUTTER_IGNORE_FBC; \ 306 type STUTTER_IGNORE_FBC; \
299 type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \ 307 type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
308 type STUTTER_ENTER_SELF_REFRESH_WATERMARK; \
300 type DMIF_BUFFERS_ALLOCATED; \ 309 type DMIF_BUFFERS_ALLOCATED; \
301 type DMIF_BUFFERS_ALLOCATION_COMPLETED; \ 310 type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
302 type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\ 311 type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
@@ -344,4 +353,12 @@ void dce112_mem_input_construct(
344 const struct dce_mem_input_shift *mi_shift, 353 const struct dce_mem_input_shift *mi_shift,
345 const struct dce_mem_input_mask *mi_mask); 354 const struct dce_mem_input_mask *mi_mask);
346 355
356void dce120_mem_input_construct(
357 struct dce_mem_input *dce_mi,
358 struct dc_context *ctx,
359 int inst,
360 const struct dce_mem_input_registers *regs,
361 const struct dce_mem_input_shift *mi_shift,
362 const struct dce_mem_input_mask *mi_mask);
363
347#endif /*__DCE_MEM_INPUT_H__*/ 364#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 162f6a6c4208..e265a0abe361 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -26,27 +26,10 @@
26#include "dc_bios_types.h" 26#include "dc_bios_types.h"
27#include "dce_stream_encoder.h" 27#include "dce_stream_encoder.h"
28#include "reg_helper.h" 28#include "reg_helper.h"
29#include "hw_shared.h"
30
29#define DC_LOGGER \ 31#define DC_LOGGER \
30 enc110->base.ctx->logger 32 enc110->base.ctx->logger
31enum DP_PIXEL_ENCODING {
32DP_PIXEL_ENCODING_RGB444 = 0x00000000,
33DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
34DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
35DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
36DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
37DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
38DP_PIXEL_ENCODING_RESERVED = 0x00000006,
39};
40
41
42enum DP_COMPONENT_DEPTH {
43DP_COMPONENT_DEPTH_6BPC = 0x00000000,
44DP_COMPONENT_DEPTH_8BPC = 0x00000001,
45DP_COMPONENT_DEPTH_10BPC = 0x00000002,
46DP_COMPONENT_DEPTH_12BPC = 0x00000003,
47DP_COMPONENT_DEPTH_16BPC = 0x00000004,
48DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
49};
50 33
51 34
52#define REG(reg)\ 35#define REG(reg)\
@@ -80,7 +63,7 @@ enum {
80static void dce110_update_generic_info_packet( 63static void dce110_update_generic_info_packet(
81 struct dce110_stream_encoder *enc110, 64 struct dce110_stream_encoder *enc110,
82 uint32_t packet_index, 65 uint32_t packet_index,
83 const struct encoder_info_packet *info_packet) 66 const struct dc_info_packet *info_packet)
84{ 67{
85 uint32_t regval; 68 uint32_t regval;
86 /* TODOFPGA Figure out a proper number for max_retries polling for lock 69 /* TODOFPGA Figure out a proper number for max_retries polling for lock
@@ -196,7 +179,7 @@ static void dce110_update_generic_info_packet(
196static void dce110_update_hdmi_info_packet( 179static void dce110_update_hdmi_info_packet(
197 struct dce110_stream_encoder *enc110, 180 struct dce110_stream_encoder *enc110,
198 uint32_t packet_index, 181 uint32_t packet_index,
199 const struct encoder_info_packet *info_packet) 182 const struct dc_info_packet *info_packet)
200{ 183{
201 uint32_t cont, send, line; 184 uint32_t cont, send, line;
202 185
@@ -314,11 +297,11 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
314 switch (crtc_timing->pixel_encoding) { 297 switch (crtc_timing->pixel_encoding) {
315 case PIXEL_ENCODING_YCBCR422: 298 case PIXEL_ENCODING_YCBCR422:
316 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 299 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
317 DP_PIXEL_ENCODING_YCBCR422); 300 DP_PIXEL_ENCODING_TYPE_YCBCR422);
318 break; 301 break;
319 case PIXEL_ENCODING_YCBCR444: 302 case PIXEL_ENCODING_YCBCR444:
320 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 303 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
321 DP_PIXEL_ENCODING_YCBCR444); 304 DP_PIXEL_ENCODING_TYPE_YCBCR444);
322 305
323 if (crtc_timing->flags.Y_ONLY) 306 if (crtc_timing->flags.Y_ONLY)
324 if (crtc_timing->display_color_depth != COLOR_DEPTH_666) 307 if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -326,7 +309,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
326 * Color depth of Y-only could be 309 * Color depth of Y-only could be
327 * 8, 10, 12, 16 bits */ 310 * 8, 10, 12, 16 bits */
328 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 311 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
329 DP_PIXEL_ENCODING_Y_ONLY); 312 DP_PIXEL_ENCODING_TYPE_Y_ONLY);
330 /* Note: DP_MSA_MISC1 bit 7 is the indicator 313 /* Note: DP_MSA_MISC1 bit 7 is the indicator
331 * of Y-only mode. 314 * of Y-only mode.
332 * This bit is set in HW if register 315 * This bit is set in HW if register
@@ -334,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
334 break; 317 break;
335 case PIXEL_ENCODING_YCBCR420: 318 case PIXEL_ENCODING_YCBCR420:
336 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 319 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
337 DP_PIXEL_ENCODING_YCBCR420); 320 DP_PIXEL_ENCODING_TYPE_YCBCR420);
338 if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) 321 if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
339 REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); 322 REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
340 323
@@ -345,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
345 break; 328 break;
346 default: 329 default:
347 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 330 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
348 DP_PIXEL_ENCODING_RGB444); 331 DP_PIXEL_ENCODING_TYPE_RGB444);
349 break; 332 break;
350 } 333 }
351 334
@@ -363,20 +346,20 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
363 break; 346 break;
364 case COLOR_DEPTH_888: 347 case COLOR_DEPTH_888:
365 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 348 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
366 DP_COMPONENT_DEPTH_8BPC); 349 DP_COMPONENT_PIXEL_DEPTH_8BPC);
367 break; 350 break;
368 case COLOR_DEPTH_101010: 351 case COLOR_DEPTH_101010:
369 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 352 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
370 DP_COMPONENT_DEPTH_10BPC); 353 DP_COMPONENT_PIXEL_DEPTH_10BPC);
371 354
372 break; 355 break;
373 case COLOR_DEPTH_121212: 356 case COLOR_DEPTH_121212:
374 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 357 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
375 DP_COMPONENT_DEPTH_12BPC); 358 DP_COMPONENT_PIXEL_DEPTH_12BPC);
376 break; 359 break;
377 default: 360 default:
378 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 361 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
379 DP_COMPONENT_DEPTH_6BPC); 362 DP_COMPONENT_PIXEL_DEPTH_6BPC);
380 break; 363 break;
381 } 364 }
382 365
@@ -836,7 +819,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
836 const struct encoder_info_frame *info_frame) 819 const struct encoder_info_frame *info_frame)
837{ 820{
838 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 821 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
839 uint32_t value = REG_READ(DP_SEC_CNTL); 822 uint32_t value = 0;
840 823
841 if (info_frame->vsc.valid) 824 if (info_frame->vsc.valid)
842 dce110_update_generic_info_packet( 825 dce110_update_generic_info_packet(
@@ -870,6 +853,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
870 * Therefore we need to enable master bit 853 * Therefore we need to enable master bit
871 * if at least on of the fields is not 0 854 * if at least on of the fields is not 0
872 */ 855 */
856 value = REG_READ(DP_SEC_CNTL);
873 if (value) 857 if (value)
874 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 858 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
875} 859}
@@ -879,7 +863,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
879{ 863{
880 /* stop generic packets on DP */ 864 /* stop generic packets on DP */
881 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 865 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
882 uint32_t value = REG_READ(DP_SEC_CNTL); 866 uint32_t value = 0;
883 867
884 if (enc110->se_mask->DP_SEC_AVI_ENABLE) { 868 if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
885 REG_SET_7(DP_SEC_CNTL, 0, 869 REG_SET_7(DP_SEC_CNTL, 0,
@@ -892,25 +876,10 @@ static void dce110_stream_encoder_stop_dp_info_packets(
892 DP_SEC_STREAM_ENABLE, 0); 876 DP_SEC_STREAM_ENABLE, 0);
893 } 877 }
894 878
895#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
896 if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
897 REG_SET_10(DP_SEC_CNTL, 0,
898 DP_SEC_GSP0_ENABLE, 0,
899 DP_SEC_GSP1_ENABLE, 0,
900 DP_SEC_GSP2_ENABLE, 0,
901 DP_SEC_GSP3_ENABLE, 0,
902 DP_SEC_GSP4_ENABLE, 0,
903 DP_SEC_GSP5_ENABLE, 0,
904 DP_SEC_GSP6_ENABLE, 0,
905 DP_SEC_GSP7_ENABLE, 0,
906 DP_SEC_MPG_ENABLE, 0,
907 DP_SEC_STREAM_ENABLE, 0);
908 }
909#endif
910 /* this register shared with audio info frame. 879 /* this register shared with audio info frame.
911 * therefore we need to keep master enabled 880 * therefore we need to keep master enabled
912 * if at least one of the fields is not 0 */ 881 * if at least one of the fields is not 0 */
913 882 value = REG_READ(DP_SEC_CNTL);
914 if (value) 883 if (value)
915 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 884 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
916 885
@@ -1513,7 +1482,7 @@ static void dce110_se_disable_dp_audio(
1513 struct stream_encoder *enc) 1482 struct stream_encoder *enc)
1514{ 1483{
1515 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 1484 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1516 uint32_t value = REG_READ(DP_SEC_CNTL); 1485 uint32_t value = 0;
1517 1486
1518 /* Disable Audio packets */ 1487 /* Disable Audio packets */
1519 REG_UPDATE_5(DP_SEC_CNTL, 1488 REG_UPDATE_5(DP_SEC_CNTL,
@@ -1525,6 +1494,7 @@ static void dce110_se_disable_dp_audio(
1525 1494
1526 /* This register shared with encoder info frame. Therefore we need to 1495 /* This register shared with encoder info frame. Therefore we need to
1527 keep master enabled if at least on of the fields is not 0 */ 1496 keep master enabled if at least on of the fields is not 0 */
1497 value = REG_READ(DP_SEC_CNTL);
1528 if (value != 0) 1498 if (value != 0)
1529 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 1499 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1530 1500
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3092f76bdb75..38ec0d609297 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -733,38 +733,6 @@ enum dc_status dce100_add_stream_to_ctx(
733 return result; 733 return result;
734} 734}
735 735
736enum dc_status dce100_validate_guaranteed(
737 struct dc *dc,
738 struct dc_stream_state *dc_stream,
739 struct dc_state *context)
740{
741 enum dc_status result = DC_ERROR_UNEXPECTED;
742
743 context->streams[0] = dc_stream;
744 dc_stream_retain(context->streams[0]);
745 context->stream_count++;
746
747 result = resource_map_pool_resources(dc, context, dc_stream);
748
749 if (result == DC_OK)
750 result = resource_map_clock_resources(dc, context, dc_stream);
751
752 if (result == DC_OK)
753 result = build_mapped_resource(dc, context, dc_stream);
754
755 if (result == DC_OK) {
756 validate_guaranteed_copy_streams(
757 context, dc->caps.max_streams);
758 result = resource_build_scaling_params_for_context(dc, context);
759 }
760
761 if (result == DC_OK)
762 if (!dce100_validate_bandwidth(dc, context))
763 result = DC_FAIL_BANDWIDTH_VALIDATE;
764
765 return result;
766}
767
768static void dce100_destroy_resource_pool(struct resource_pool **pool) 736static void dce100_destroy_resource_pool(struct resource_pool **pool)
769{ 737{
770 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); 738 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -786,7 +754,6 @@ enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, s
786static const struct resource_funcs dce100_res_pool_funcs = { 754static const struct resource_funcs dce100_res_pool_funcs = {
787 .destroy = dce100_destroy_resource_pool, 755 .destroy = dce100_destroy_resource_pool,
788 .link_enc_create = dce100_link_encoder_create, 756 .link_enc_create = dce100_link_encoder_create,
789 .validate_guaranteed = dce100_validate_guaranteed,
790 .validate_bandwidth = dce100_validate_bandwidth, 757 .validate_bandwidth = dce100_validate_bandwidth,
791 .validate_plane = dce100_validate_plane, 758 .validate_plane = dce100_validate_plane,
792 .add_stream_to_ctx = dce100_add_stream_to_ctx, 759 .add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index d0575999f172..2288d0aa773b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -70,8 +70,9 @@
70 70
71#define CTX \ 71#define CTX \
72 hws->ctx 72 hws->ctx
73#define DC_LOGGER \ 73
74 ctx->logger 74#define DC_LOGGER_INIT()
75
75#define REG(reg)\ 76#define REG(reg)\
76 hws->regs->reg 77 hws->regs->reg
77 78
@@ -279,7 +280,9 @@ dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
279 build_prescale_params(&prescale_params, plane_state); 280 build_prescale_params(&prescale_params, plane_state);
280 ipp->funcs->ipp_program_prescale(ipp, &prescale_params); 281 ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
281 282
282 if (plane_state->gamma_correction && dce_use_lut(plane_state->format)) 283 if (plane_state->gamma_correction &&
284 !plane_state->gamma_correction->is_identity &&
285 dce_use_lut(plane_state->format))
283 ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); 286 ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
284 287
285 if (tf == NULL) { 288 if (tf == NULL) {
@@ -851,6 +854,28 @@ void hwss_edp_power_control(
851 854
852 if (power_up != is_panel_powered_on(hwseq)) { 855 if (power_up != is_panel_powered_on(hwseq)) {
853 /* Send VBIOS command to prompt eDP panel power */ 856 /* Send VBIOS command to prompt eDP panel power */
857 if (power_up) {
858 unsigned long long current_ts = dm_get_timestamp(ctx);
859 unsigned long long duration_in_ms =
860 dm_get_elapse_time_in_ns(
861 ctx,
862 current_ts,
863 div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
864 unsigned long long wait_time_ms = 0;
865
866 /* max 500ms from LCDVDD off to on */
867 if (link->link_trace.time_stamp.edp_poweroff == 0)
868 wait_time_ms = 500;
869 else if (duration_in_ms < 500)
870 wait_time_ms = 500 - duration_in_ms;
871
872 if (wait_time_ms) {
873 msleep(wait_time_ms);
874 dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
875 __func__, wait_time_ms);
876 }
877
878 }
854 879
855 DC_LOG_HW_RESUME_S3( 880 DC_LOG_HW_RESUME_S3(
856 "%s: Panel Power action: %s\n", 881 "%s: Panel Power action: %s\n",
@@ -864,9 +889,14 @@ void hwss_edp_power_control(
864 cntl.coherent = false; 889 cntl.coherent = false;
865 cntl.lanes_number = LANE_COUNT_FOUR; 890 cntl.lanes_number = LANE_COUNT_FOUR;
866 cntl.hpd_sel = link->link_enc->hpd_source; 891 cntl.hpd_sel = link->link_enc->hpd_source;
867
868 bp_result = link_transmitter_control(ctx->dc_bios, &cntl); 892 bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
869 893
894 if (!power_up)
895 /*save driver power off time stamp*/
896 link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
897 else
898 link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
899
870 if (bp_result != BP_RESULT_OK) 900 if (bp_result != BP_RESULT_OK)
871 DC_LOG_ERROR( 901 DC_LOG_ERROR(
872 "%s: Panel Power bp_result: %d\n", 902 "%s: Panel Power bp_result: %d\n",
@@ -1011,7 +1041,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
1011 1041
1012 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1042 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1013 link->dc->hwss.edp_backlight_control(link, true); 1043 link->dc->hwss.edp_backlight_control(link, true);
1014 stream->bl_pwm_level = 0; 1044 stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
1015 } 1045 }
1016} 1046}
1017void dce110_blank_stream(struct pipe_ctx *pipe_ctx) 1047void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1203,7 +1233,7 @@ static void program_scaler(const struct dc *dc,
1203 &pipe_ctx->plane_res.scl_data); 1233 &pipe_ctx->plane_res.scl_data);
1204} 1234}
1205 1235
1206static enum dc_status dce110_prog_pixclk_crtc_otg( 1236static enum dc_status dce110_enable_stream_timing(
1207 struct pipe_ctx *pipe_ctx, 1237 struct pipe_ctx *pipe_ctx,
1208 struct dc_state *context, 1238 struct dc_state *context,
1209 struct dc *dc) 1239 struct dc *dc)
@@ -1269,7 +1299,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
1269 pipe_ctx[pipe_ctx->pipe_idx]; 1299 pipe_ctx[pipe_ctx->pipe_idx];
1270 1300
1271 /* */ 1301 /* */
1272 dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc); 1302 dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
1273 1303
1274 /* FPGA does not program backend */ 1304 /* FPGA does not program backend */
1275 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 1305 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
@@ -1441,6 +1471,17 @@ static void disable_vga_and_power_gate_all_controllers(
1441 } 1471 }
1442} 1472}
1443 1473
1474static struct dc_link *get_link_for_edp(struct dc *dc)
1475{
1476 int i;
1477
1478 for (i = 0; i < dc->link_count; i++) {
1479 if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
1480 return dc->links[i];
1481 }
1482 return NULL;
1483}
1484
1444static struct dc_link *get_link_for_edp_not_in_use( 1485static struct dc_link *get_link_for_edp_not_in_use(
1445 struct dc *dc, 1486 struct dc *dc,
1446 struct dc_state *context) 1487 struct dc_state *context)
@@ -1475,20 +1516,21 @@ static struct dc_link *get_link_for_edp_not_in_use(
1475 */ 1516 */
1476void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) 1517void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
1477{ 1518{
1478 struct dc_bios *dcb = dc->ctx->dc_bios;
1479
1480 /* vbios already light up eDP, so we can leverage vbios and skip eDP
1481 * programming
1482 */
1483 bool can_eDP_fast_boot_optimize =
1484 (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
1485
1486 /* if OS doesn't light up eDP and eDP link is available, we want to disable */
1487 struct dc_link *edp_link_to_turnoff = NULL; 1519 struct dc_link *edp_link_to_turnoff = NULL;
1520 struct dc_link *edp_link = get_link_for_edp(dc);
1521 bool can_eDP_fast_boot_optimize = false;
1522
1523 if (edp_link) {
1524 can_eDP_fast_boot_optimize =
1525 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
1526 }
1488 1527
1489 if (can_eDP_fast_boot_optimize) { 1528 if (can_eDP_fast_boot_optimize) {
1490 edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); 1529 edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
1491 1530
1531 /* if OS doesn't light up eDP and eDP link is available, we want to disable
1532 * If resume from S4/S5, should optimization.
1533 */
1492 if (!edp_link_to_turnoff) 1534 if (!edp_link_to_turnoff)
1493 dc->apply_edp_fast_boot_optimization = true; 1535 dc->apply_edp_fast_boot_optimization = true;
1494 } 1536 }
@@ -1544,6 +1586,7 @@ static void dce110_set_displaymarks(
1544 pipe_ctx->plane_res.mi, 1586 pipe_ctx->plane_res.mi,
1545 context->bw.dce.nbp_state_change_wm_ns[num_pipes], 1587 context->bw.dce.nbp_state_change_wm_ns[num_pipes],
1546 context->bw.dce.stutter_exit_wm_ns[num_pipes], 1588 context->bw.dce.stutter_exit_wm_ns[num_pipes],
1589 context->bw.dce.stutter_entry_wm_ns[num_pipes],
1547 context->bw.dce.urgent_wm_ns[num_pipes], 1590 context->bw.dce.urgent_wm_ns[num_pipes],
1548 total_dest_line_time_ns); 1591 total_dest_line_time_ns);
1549 if (i == underlay_idx) { 1592 if (i == underlay_idx) {
@@ -1569,6 +1612,7 @@ static void set_safe_displaymarks(
1569 MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK }; 1612 MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
1570 struct dce_watermarks nbp_marks = { 1613 struct dce_watermarks nbp_marks = {
1571 SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK }; 1614 SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
1615 struct dce_watermarks min_marks = { 0, 0, 0, 0};
1572 1616
1573 for (i = 0; i < MAX_PIPES; i++) { 1617 for (i = 0; i < MAX_PIPES; i++) {
1574 if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL) 1618 if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
@@ -1578,6 +1622,7 @@ static void set_safe_displaymarks(
1578 res_ctx->pipe_ctx[i].plane_res.mi, 1622 res_ctx->pipe_ctx[i].plane_res.mi,
1579 nbp_marks, 1623 nbp_marks,
1580 max_marks, 1624 max_marks,
1625 min_marks,
1581 max_marks, 1626 max_marks,
1582 MAX_WATERMARK); 1627 MAX_WATERMARK);
1583 1628
@@ -1803,6 +1848,9 @@ static bool should_enable_fbc(struct dc *dc,
1803 } 1848 }
1804 } 1849 }
1805 1850
1851 /* Pipe context should be found */
1852 ASSERT(pipe_ctx);
1853
1806 /* Only supports eDP */ 1854 /* Only supports eDP */
1807 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1855 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
1808 return false; 1856 return false;
@@ -2699,8 +2747,11 @@ static void dce110_program_front_end_for_pipe(
2699 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2747 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2700 struct xfm_grph_csc_adjustment adjust; 2748 struct xfm_grph_csc_adjustment adjust;
2701 struct out_csc_color_matrix tbl_entry; 2749 struct out_csc_color_matrix tbl_entry;
2750#if defined(CONFIG_DRM_AMD_DC_FBC)
2751 unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
2752#endif
2702 unsigned int i; 2753 unsigned int i;
2703 struct dc_context *ctx = dc->ctx; 2754 DC_LOGGER_INIT();
2704 memset(&tbl_entry, 0, sizeof(tbl_entry)); 2755 memset(&tbl_entry, 0, sizeof(tbl_entry));
2705 2756
2706 if (dc->current_state) 2757 if (dc->current_state)
@@ -2740,7 +2791,9 @@ static void dce110_program_front_end_for_pipe(
2740 program_scaler(dc, pipe_ctx); 2791 program_scaler(dc, pipe_ctx);
2741 2792
2742#if defined(CONFIG_DRM_AMD_DC_FBC) 2793#if defined(CONFIG_DRM_AMD_DC_FBC)
2743 if (dc->fbc_compressor && old_pipe->stream) { 2794 /* fbc not applicable on Underlay pipe */
2795 if (dc->fbc_compressor && old_pipe->stream &&
2796 pipe_ctx->pipe_idx != underlay_idx) {
2744 if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) 2797 if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
2745 dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); 2798 dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
2746 else 2799 else
@@ -2776,13 +2829,13 @@ static void dce110_program_front_end_for_pipe(
2776 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); 2829 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2777 2830
2778 DC_LOG_SURFACE( 2831 DC_LOG_SURFACE(
2779 "Pipe:%d 0x%x: addr hi:0x%x, " 2832 "Pipe:%d %p: addr hi:0x%x, "
2780 "addr low:0x%x, " 2833 "addr low:0x%x, "
2781 "src: %d, %d, %d," 2834 "src: %d, %d, %d,"
2782 " %d; dst: %d, %d, %d, %d;" 2835 " %d; dst: %d, %d, %d, %d;"
2783 "clip: %d, %d, %d, %d\n", 2836 "clip: %d, %d, %d, %d\n",
2784 pipe_ctx->pipe_idx, 2837 pipe_ctx->pipe_idx,
2785 pipe_ctx->plane_state, 2838 (void *) pipe_ctx->plane_state,
2786 pipe_ctx->plane_state->address.grph.addr.high_part, 2839 pipe_ctx->plane_state->address.grph.addr.high_part,
2787 pipe_ctx->plane_state->address.grph.addr.low_part, 2840 pipe_ctx->plane_state->address.grph.addr.low_part,
2788 pipe_ctx->plane_state->src_rect.x, 2841 pipe_ctx->plane_state->src_rect.x,
@@ -2993,7 +3046,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2993 .get_position = get_position, 3046 .get_position = get_position,
2994 .set_static_screen_control = set_static_screen_control, 3047 .set_static_screen_control = set_static_screen_control,
2995 .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, 3048 .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
2996 .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg, 3049 .enable_stream_timing = dce110_enable_stream_timing,
2997 .setup_stereo = NULL, 3050 .setup_stereo = NULL,
2998 .set_avmute = dce110_set_avmute, 3051 .set_avmute = dce110_set_avmute,
2999 .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, 3052 .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 7bab8c6d2a73..0564c8e31252 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -923,6 +923,7 @@ void dce_mem_input_v_program_display_marks(
923 struct mem_input *mem_input, 923 struct mem_input *mem_input,
924 struct dce_watermarks nbp, 924 struct dce_watermarks nbp,
925 struct dce_watermarks stutter, 925 struct dce_watermarks stutter,
926 struct dce_watermarks stutter_enter,
926 struct dce_watermarks urgent, 927 struct dce_watermarks urgent,
927 uint32_t total_dest_line_time_ns) 928 uint32_t total_dest_line_time_ns)
928{ 929{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index b1f14be20fdf..ee33786bdef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -930,38 +930,6 @@ static enum dc_status dce110_add_stream_to_ctx(
930 return result; 930 return result;
931} 931}
932 932
933static enum dc_status dce110_validate_guaranteed(
934 struct dc *dc,
935 struct dc_stream_state *dc_stream,
936 struct dc_state *context)
937{
938 enum dc_status result = DC_ERROR_UNEXPECTED;
939
940 context->streams[0] = dc_stream;
941 dc_stream_retain(context->streams[0]);
942 context->stream_count++;
943
944 result = resource_map_pool_resources(dc, context, dc_stream);
945
946 if (result == DC_OK)
947 result = resource_map_clock_resources(dc, context, dc_stream);
948
949 if (result == DC_OK)
950 result = build_mapped_resource(dc, context, dc_stream);
951
952 if (result == DC_OK) {
953 validate_guaranteed_copy_streams(
954 context, dc->caps.max_streams);
955 result = resource_build_scaling_params_for_context(dc, context);
956 }
957
958 if (result == DC_OK)
959 if (!dce110_validate_bandwidth(dc, context))
960 result = DC_FAIL_BANDWIDTH_VALIDATE;
961
962 return result;
963}
964
965static struct pipe_ctx *dce110_acquire_underlay( 933static struct pipe_ctx *dce110_acquire_underlay(
966 struct dc_state *context, 934 struct dc_state *context,
967 const struct resource_pool *pool, 935 const struct resource_pool *pool,
@@ -1036,7 +1004,6 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
1036static const struct resource_funcs dce110_res_pool_funcs = { 1004static const struct resource_funcs dce110_res_pool_funcs = {
1037 .destroy = dce110_destroy_resource_pool, 1005 .destroy = dce110_destroy_resource_pool,
1038 .link_enc_create = dce110_link_encoder_create, 1006 .link_enc_create = dce110_link_encoder_create,
1039 .validate_guaranteed = dce110_validate_guaranteed,
1040 .validate_bandwidth = dce110_validate_bandwidth, 1007 .validate_bandwidth = dce110_validate_bandwidth,
1041 .validate_plane = dce110_validate_plane, 1008 .validate_plane = dce110_validate_plane,
1042 .acquire_idle_pipe_for_layer = dce110_acquire_underlay, 1009 .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index be7153924a70..1b2fe0df347f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -431,14 +431,6 @@ void dce110_timing_generator_set_drr(
431 0, 431 0,
432 CRTC_V_TOTAL_CONTROL, 432 CRTC_V_TOTAL_CONTROL,
433 CRTC_SET_V_TOTAL_MIN_MASK); 433 CRTC_SET_V_TOTAL_MIN_MASK);
434 set_reg_field_value(v_total_min,
435 0,
436 CRTC_V_TOTAL_MIN,
437 CRTC_V_TOTAL_MIN);
438 set_reg_field_value(v_total_max,
439 0,
440 CRTC_V_TOTAL_MAX,
441 CRTC_V_TOTAL_MAX);
442 set_reg_field_value(v_total_cntl, 434 set_reg_field_value(v_total_cntl,
443 0, 435 0,
444 CRTC_V_TOTAL_CONTROL, 436 CRTC_V_TOTAL_CONTROL,
@@ -447,6 +439,14 @@ void dce110_timing_generator_set_drr(
447 0, 439 0,
448 CRTC_V_TOTAL_CONTROL, 440 CRTC_V_TOTAL_CONTROL,
449 CRTC_V_TOTAL_MAX_SEL); 441 CRTC_V_TOTAL_MAX_SEL);
442 set_reg_field_value(v_total_min,
443 0,
444 CRTC_V_TOTAL_MIN,
445 CRTC_V_TOTAL_MIN);
446 set_reg_field_value(v_total_max,
447 0,
448 CRTC_V_TOTAL_MAX,
449 CRTC_V_TOTAL_MAX);
450 set_reg_field_value(v_total_cntl, 450 set_reg_field_value(v_total_cntl,
451 0, 451 0,
452 CRTC_V_TOTAL_CONTROL, 452 CRTC_V_TOTAL_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 8ad04816e7d3..a3cef60380ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -648,12 +648,6 @@ static void dce110_timing_generator_v_disable_vga(
648 return; 648 return;
649} 649}
650 650
651static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
652{
653 /* Signal comes from the primary pipe, underlay is never blanked. */
654 return false;
655}
656
657/** ******************************************************************************************** 651/** ********************************************************************************************
658 * 652 *
659 * DCE11 Timing Generator Constructor / Destructor 653 * DCE11 Timing Generator Constructor / Destructor
@@ -670,7 +664,6 @@ static const struct timing_generator_funcs dce110_tg_v_funcs = {
670 .set_early_control = dce110_timing_generator_v_set_early_control, 664 .set_early_control = dce110_timing_generator_v_set_early_control,
671 .wait_for_state = dce110_timing_generator_v_wait_for_state, 665 .wait_for_state = dce110_timing_generator_v_wait_for_state,
672 .set_blank = dce110_timing_generator_v_set_blank, 666 .set_blank = dce110_timing_generator_v_set_blank,
673 .is_blanked = dce110_tg_v_is_blanked,
674 .set_colors = dce110_timing_generator_v_set_colors, 667 .set_colors = dce110_timing_generator_v_set_colors,
675 .set_overscan_blank_color = 668 .set_overscan_blank_color =
676 dce110_timing_generator_v_set_overscan_color_black, 669 dce110_timing_generator_v_set_overscan_color_black,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index cd1e3f72c44e..00c0a1ef15eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -430,7 +430,7 @@ static struct stream_encoder *dce112_stream_encoder_create(
430 430
431 if (!enc110) 431 if (!enc110)
432 return NULL; 432 return NULL;
433 433
434 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 434 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
435 &stream_enc_regs[eng_id], 435 &stream_enc_regs[eng_id],
436 &se_shift, &se_mask); 436 &se_shift, &se_mask);
@@ -867,38 +867,6 @@ enum dc_status dce112_add_stream_to_ctx(
867 return result; 867 return result;
868} 868}
869 869
870enum dc_status dce112_validate_guaranteed(
871 struct dc *dc,
872 struct dc_stream_state *stream,
873 struct dc_state *context)
874{
875 enum dc_status result = DC_ERROR_UNEXPECTED;
876
877 context->streams[0] = stream;
878 dc_stream_retain(context->streams[0]);
879 context->stream_count++;
880
881 result = resource_map_pool_resources(dc, context, stream);
882
883 if (result == DC_OK)
884 result = resource_map_phy_clock_resources(dc, context, stream);
885
886 if (result == DC_OK)
887 result = build_mapped_resource(dc, context, stream);
888
889 if (result == DC_OK) {
890 validate_guaranteed_copy_streams(
891 context, dc->caps.max_streams);
892 result = resource_build_scaling_params_for_context(dc, context);
893 }
894
895 if (result == DC_OK)
896 if (!dce112_validate_bandwidth(dc, context))
897 result = DC_FAIL_BANDWIDTH_VALIDATE;
898
899 return result;
900}
901
902enum dc_status dce112_validate_global( 870enum dc_status dce112_validate_global(
903 struct dc *dc, 871 struct dc *dc,
904 struct dc_state *context) 872 struct dc_state *context)
@@ -921,7 +889,6 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
921static const struct resource_funcs dce112_res_pool_funcs = { 889static const struct resource_funcs dce112_res_pool_funcs = {
922 .destroy = dce112_destroy_resource_pool, 890 .destroy = dce112_destroy_resource_pool,
923 .link_enc_create = dce112_link_encoder_create, 891 .link_enc_create = dce112_link_encoder_create,
924 .validate_guaranteed = dce112_validate_guaranteed,
925 .validate_bandwidth = dce112_validate_bandwidth, 892 .validate_bandwidth = dce112_validate_bandwidth,
926 .validate_plane = dce100_validate_plane, 893 .validate_plane = dce100_validate_plane,
927 .add_stream_to_ctx = dce112_add_stream_to_ctx, 894 .add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index d5c19d34eb0a..95a403396219 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -42,11 +42,6 @@ enum dc_status dce112_validate_with_context(
42 struct dc_state *context, 42 struct dc_state *context,
43 struct dc_state *old_context); 43 struct dc_state *old_context);
44 44
45enum dc_status dce112_validate_guaranteed(
46 struct dc *dc,
47 struct dc_stream_state *dc_stream,
48 struct dc_state *context);
49
50bool dce112_validate_bandwidth( 45bool dce112_validate_bandwidth(
51 struct dc *dc, 46 struct dc *dc,
52 struct dc_state *context); 47 struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 4659a4bfabaa..fda01574d1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -652,7 +652,7 @@ static struct mem_input *dce120_mem_input_create(
652 return NULL; 652 return NULL;
653 } 653 }
654 654
655 dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); 655 dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
656 return &dce_mi->base; 656 return &dce_mi->base;
657} 657}
658 658
@@ -684,7 +684,6 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
684static const struct resource_funcs dce120_res_pool_funcs = { 684static const struct resource_funcs dce120_res_pool_funcs = {
685 .destroy = dce120_destroy_resource_pool, 685 .destroy = dce120_destroy_resource_pool,
686 .link_enc_create = dce120_link_encoder_create, 686 .link_enc_create = dce120_link_encoder_create,
687 .validate_guaranteed = dce112_validate_guaranteed,
688 .validate_bandwidth = dce112_validate_bandwidth, 687 .validate_bandwidth = dce112_validate_bandwidth,
689 .validate_plane = dce100_validate_plane, 688 .validate_plane = dce100_validate_plane,
690 .add_stream_to_ctx = dce112_add_stream_to_ctx 689 .add_stream_to_ctx = dce112_add_stream_to_ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 7bee78172d85..2ea490f8482e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -570,12 +570,6 @@ void dce120_timing_generator_set_drr(
570 0x180); 570 0x180);
571 571
572 } else { 572 } else {
573 CRTC_REG_UPDATE(
574 CRTC0_CRTC_V_TOTAL_MIN,
575 CRTC_V_TOTAL_MIN, 0);
576 CRTC_REG_UPDATE(
577 CRTC0_CRTC_V_TOTAL_MAX,
578 CRTC_V_TOTAL_MAX, 0);
579 CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5, 573 CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
580 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0, 574 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
581 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0, 575 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
@@ -583,6 +577,12 @@ void dce120_timing_generator_set_drr(
583 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0, 577 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
584 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0); 578 FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
585 CRTC_REG_UPDATE( 579 CRTC_REG_UPDATE(
580 CRTC0_CRTC_V_TOTAL_MIN,
581 CRTC_V_TOTAL_MIN, 0);
582 CRTC_REG_UPDATE(
583 CRTC0_CRTC_V_TOTAL_MAX,
584 CRTC_V_TOTAL_MAX, 0);
585 CRTC_REG_UPDATE(
586 CRTC0_CRTC_STATIC_SCREEN_CONTROL, 586 CRTC0_CRTC_STATIC_SCREEN_CONTROL,
587 CRTC_STATIC_SCREEN_EVENT_MASK, 587 CRTC_STATIC_SCREEN_EVENT_MASK,
588 0); 588 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 5d854a37a978..48a068964722 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -691,23 +691,6 @@ static void destruct(struct dce110_resource_pool *pool)
691 } 691 }
692} 692}
693 693
694static enum dc_status build_mapped_resource(
695 const struct dc *dc,
696 struct dc_state *context,
697 struct dc_stream_state *stream)
698{
699 struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
700
701 if (!pipe_ctx)
702 return DC_ERROR_UNEXPECTED;
703
704 dce110_resource_build_pipe_hw_param(pipe_ctx);
705
706 resource_build_info_frame(pipe_ctx);
707
708 return DC_OK;
709}
710
711bool dce80_validate_bandwidth( 694bool dce80_validate_bandwidth(
712 struct dc *dc, 695 struct dc *dc,
713 struct dc_state *context) 696 struct dc_state *context)
@@ -749,37 +732,6 @@ enum dc_status dce80_validate_global(
749 return DC_OK; 732 return DC_OK;
750} 733}
751 734
752enum dc_status dce80_validate_guaranteed(
753 struct dc *dc,
754 struct dc_stream_state *dc_stream,
755 struct dc_state *context)
756{
757 enum dc_status result = DC_ERROR_UNEXPECTED;
758
759 context->streams[0] = dc_stream;
760 dc_stream_retain(context->streams[0]);
761 context->stream_count++;
762
763 result = resource_map_pool_resources(dc, context, dc_stream);
764
765 if (result == DC_OK)
766 result = resource_map_clock_resources(dc, context, dc_stream);
767
768 if (result == DC_OK)
769 result = build_mapped_resource(dc, context, dc_stream);
770
771 if (result == DC_OK) {
772 validate_guaranteed_copy_streams(
773 context, dc->caps.max_streams);
774 result = resource_build_scaling_params_for_context(dc, context);
775 }
776
777 if (result == DC_OK)
778 result = dce80_validate_bandwidth(dc, context);
779
780 return result;
781}
782
783static void dce80_destroy_resource_pool(struct resource_pool **pool) 735static void dce80_destroy_resource_pool(struct resource_pool **pool)
784{ 736{
785 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); 737 struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -792,7 +744,6 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
792static const struct resource_funcs dce80_res_pool_funcs = { 744static const struct resource_funcs dce80_res_pool_funcs = {
793 .destroy = dce80_destroy_resource_pool, 745 .destroy = dce80_destroy_resource_pool,
794 .link_enc_create = dce80_link_encoder_create, 746 .link_enc_create = dce80_link_encoder_create,
795 .validate_guaranteed = dce80_validate_guaranteed,
796 .validate_bandwidth = dce80_validate_bandwidth, 747 .validate_bandwidth = dce80_validate_bandwidth,
797 .validate_plane = dce100_validate_plane, 748 .validate_plane = dce100_validate_plane,
798 .add_stream_to_ctx = dce100_add_stream_to_ctx, 749 .add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 5469bdfe19f3..5c69743a4b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
26 dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ 26 dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
27 dcn10_hubp.o dcn10_mpc.o \ 27 dcn10_hubp.o dcn10_mpc.o \
28 dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ 28 dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
29 dcn10_hubbub.o 29 dcn10_hubbub.o dcn10_stream_encoder.o
30 30
31AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) 31AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
32 32
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 881a1bff94d2..96d5878e9ccd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -367,15 +367,15 @@ bool cm_helper_translate_curve_to_hw_format(
367 367
368 lut_params->hw_points_num = hw_points; 368 lut_params->hw_points_num = hw_points;
369 369
370 i = 1; 370 k = 0;
371 for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) { 371 for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
372 if (seg_distr[k] != -1) { 372 if (seg_distr[k] != -1) {
373 lut_params->arr_curve_points[k].segments_num = 373 lut_params->arr_curve_points[k].segments_num =
374 seg_distr[k]; 374 seg_distr[k];
375 lut_params->arr_curve_points[i].offset = 375 lut_params->arr_curve_points[i].offset =
376 lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); 376 lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
377 } 377 }
378 i++; 378 k++;
379 } 379 }
380 380
381 if (seg_distr[k] != -1) 381 if (seg_distr[k] != -1)
@@ -529,15 +529,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
529 529
530 lut_params->hw_points_num = hw_points; 530 lut_params->hw_points_num = hw_points;
531 531
532 i = 1; 532 k = 0;
533 for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) { 533 for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
534 if (seg_distr[k] != -1) { 534 if (seg_distr[k] != -1) {
535 lut_params->arr_curve_points[k].segments_num = 535 lut_params->arr_curve_points[k].segments_num =
536 seg_distr[k]; 536 seg_distr[k];
537 lut_params->arr_curve_points[i].offset = 537 lut_params->arr_curve_points[i].offset =
538 lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); 538 lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
539 } 539 }
540 i++; 540 k++;
541 } 541 }
542 542
543 if (seg_distr[k] != -1) 543 if (seg_distr[k] != -1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index e305c28c98de..8c4d9e523331 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -98,6 +98,30 @@ enum gamut_remap_select {
98 GAMUT_REMAP_COMB_COEFF 98 GAMUT_REMAP_COMB_COEFF
99}; 99};
100 100
101void dpp_read_state(struct dpp *dpp_base,
102 struct dcn_dpp_state *s)
103{
104 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
105
106 REG_GET(CM_IGAM_CONTROL,
107 CM_IGAM_LUT_MODE, &s->igam_lut_mode);
108 REG_GET(CM_IGAM_CONTROL,
109 CM_IGAM_INPUT_FORMAT, &s->igam_input_format);
110 REG_GET(CM_DGAM_CONTROL,
111 CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
112 REG_GET(CM_RGAM_CONTROL,
113 CM_RGAM_LUT_MODE, &s->rgam_lut_mode);
114 REG_GET(CM_GAMUT_REMAP_CONTROL,
115 CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
116
117 s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
118 s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
119 s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
120 s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
121 s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
122 s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
123}
124
101/* Program gamut remap in bypass mode */ 125/* Program gamut remap in bypass mode */
102void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) 126void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
103{ 127{
@@ -121,6 +145,13 @@ bool dpp_get_optimal_number_of_taps(
121 else 145 else
122 pixel_width = scl_data->viewport.width; 146 pixel_width = scl_data->viewport.width;
123 147
148 /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
149 if (scl_data->viewport.width != scl_data->h_active &&
150 scl_data->viewport.height != scl_data->v_active &&
151 dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
152 scl_data->format == PIXEL_FORMAT_FP16)
153 return false;
154
124 /* TODO: add lb check */ 155 /* TODO: add lb check */
125 156
126 /* No support for programming ratio of 4, drop to 3.99999.. */ 157 /* No support for programming ratio of 4, drop to 3.99999.. */
@@ -257,7 +288,7 @@ void dpp1_cnv_setup (
257 struct dpp *dpp_base, 288 struct dpp *dpp_base,
258 enum surface_pixel_format format, 289 enum surface_pixel_format format,
259 enum expansion_mode mode, 290 enum expansion_mode mode,
260 struct csc_transform input_csc_color_matrix, 291 struct dc_csc_transform input_csc_color_matrix,
261 enum dc_color_space input_color_space) 292 enum dc_color_space input_color_space)
262{ 293{
263 uint32_t pixel_format; 294 uint32_t pixel_format;
@@ -416,7 +447,7 @@ void dpp1_set_cursor_position(
416 if (src_x_offset >= (int)param->viewport_width) 447 if (src_x_offset >= (int)param->viewport_width)
417 cur_en = 0; /* not visible beyond right edge*/ 448 cur_en = 0; /* not visible beyond right edge*/
418 449
419 if (src_x_offset + (int)width < 0) 450 if (src_x_offset + (int)width <= 0)
420 cur_en = 0; /* not visible beyond left edge*/ 451 cur_en = 0; /* not visible beyond left edge*/
421 452
422 REG_UPDATE(CURSOR0_CONTROL, 453 REG_UPDATE(CURSOR0_CONTROL,
@@ -443,6 +474,7 @@ void dpp1_dppclk_control(
443} 474}
444 475
445static const struct dpp_funcs dcn10_dpp_funcs = { 476static const struct dpp_funcs dcn10_dpp_funcs = {
477 .dpp_read_state = dpp_read_state,
446 .dpp_reset = dpp_reset, 478 .dpp_reset = dpp_reset,
447 .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, 479 .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
448 .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, 480 .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 17b062a8f88a..5944a3ba0409 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -44,6 +44,10 @@
44#define TF_REG_LIST_DCN(id) \ 44#define TF_REG_LIST_DCN(id) \
45 SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ 45 SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
46 SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ 46 SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
47 SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
48 SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
49 SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
50 SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
47 SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ 51 SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
48 SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ 52 SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
49 SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ 53 SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
@@ -108,6 +112,8 @@
108 SRI(CM_DGAM_LUT_DATA, CM, id), \ 112 SRI(CM_DGAM_LUT_DATA, CM, id), \
109 SRI(CM_CONTROL, CM, id), \ 113 SRI(CM_CONTROL, CM, id), \
110 SRI(CM_DGAM_CONTROL, CM, id), \ 114 SRI(CM_DGAM_CONTROL, CM, id), \
115 SRI(CM_TEST_DEBUG_INDEX, CM, id), \
116 SRI(CM_TEST_DEBUG_DATA, CM, id), \
111 SRI(FORMAT_CONTROL, CNVC_CFG, id), \ 117 SRI(FORMAT_CONTROL, CNVC_CFG, id), \
112 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ 118 SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 119 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -175,6 +181,14 @@
175 TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ 181 TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
176 TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ 182 TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
177 TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ 183 TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
184 TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
185 TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
186 TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
187 TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
188 TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
189 TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
190 TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
191 TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
178 TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ 192 TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
179 TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ 193 TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
180 TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ 194 TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
@@ -300,6 +314,7 @@
300 TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ 314 TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
301 TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ 315 TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
302 TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ 316 TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
317 TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
303 TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ 318 TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
304 TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ 319 TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
305 TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ 320 TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -417,6 +432,41 @@
417 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ 432 TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
418 TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) 433 TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh)
419 434
435/*
436 *
437 DCN1 CM debug status register definition
438
439 register :ID9_CM_STATUS do
440 implement_ref :cm
441 map to: :cmdebugind, at: j
442 width 32
443 disclosure NEVER
444
445 field :ID9_VUPDATE_CFG, [0], R
446 field :ID9_IGAM_LUT_MODE, [2..1], R
447 field :ID9_BNS_BYPASS, [3], R
448 field :ID9_ICSC_MODE, [5..4], R
449 field :ID9_DGAM_LUT_MODE, [8..6], R
450 field :ID9_HDR_BYPASS, [9], R
451 field :ID9_GAMUT_REMAP_MODE, [11..10], R
452 field :ID9_RGAM_LUT_MODE, [14..12], R
453 #1 free bit
454 field :ID9_OCSC_MODE, [18..16], R
455 field :ID9_DENORM_MODE, [21..19], R
456 field :ID9_ROUND_TRUNC_MODE, [25..22], R
457 field :ID9_DITHER_EN, [26], R
458 field :ID9_DITHER_MODE, [28..27], R
459 end
460*/
461
462#define TF_DEBUG_REG_LIST_SH_DCN10 \
463 .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \
464 .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16
465
466#define TF_DEBUG_REG_LIST_MASK_DCN10 \
467 .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \
468 .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000
469
420#define TF_REG_FIELD_LIST(type) \ 470#define TF_REG_FIELD_LIST(type) \
421 type EXT_OVERSCAN_LEFT; \ 471 type EXT_OVERSCAN_LEFT; \
422 type EXT_OVERSCAN_RIGHT; \ 472 type EXT_OVERSCAN_RIGHT; \
@@ -486,6 +536,14 @@
486 type CM_GAMUT_REMAP_MODE; \ 536 type CM_GAMUT_REMAP_MODE; \
487 type CM_GAMUT_REMAP_C11; \ 537 type CM_GAMUT_REMAP_C11; \
488 type CM_GAMUT_REMAP_C12; \ 538 type CM_GAMUT_REMAP_C12; \
539 type CM_GAMUT_REMAP_C13; \
540 type CM_GAMUT_REMAP_C14; \
541 type CM_GAMUT_REMAP_C21; \
542 type CM_GAMUT_REMAP_C22; \
543 type CM_GAMUT_REMAP_C23; \
544 type CM_GAMUT_REMAP_C24; \
545 type CM_GAMUT_REMAP_C31; \
546 type CM_GAMUT_REMAP_C32; \
489 type CM_GAMUT_REMAP_C33; \ 547 type CM_GAMUT_REMAP_C33; \
490 type CM_GAMUT_REMAP_C34; \ 548 type CM_GAMUT_REMAP_C34; \
491 type CM_COMA_C11; \ 549 type CM_COMA_C11; \
@@ -1010,6 +1068,9 @@
1010 type CUR0_EXPANSION_MODE; \ 1068 type CUR0_EXPANSION_MODE; \
1011 type CUR0_ENABLE; \ 1069 type CUR0_ENABLE; \
1012 type CM_BYPASS; \ 1070 type CM_BYPASS; \
1071 type CM_TEST_DEBUG_INDEX; \
1072 type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \
1073 type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\
1013 type FORMAT_CONTROL__ALPHA_EN; \ 1074 type FORMAT_CONTROL__ALPHA_EN; \
1014 type CUR0_COLOR0; \ 1075 type CUR0_COLOR0; \
1015 type CUR0_COLOR1; \ 1076 type CUR0_COLOR1; \
@@ -1054,6 +1115,10 @@ struct dcn_dpp_mask {
1054 uint32_t RECOUT_SIZE; \ 1115 uint32_t RECOUT_SIZE; \
1055 uint32_t CM_GAMUT_REMAP_CONTROL; \ 1116 uint32_t CM_GAMUT_REMAP_CONTROL; \
1056 uint32_t CM_GAMUT_REMAP_C11_C12; \ 1117 uint32_t CM_GAMUT_REMAP_C11_C12; \
1118 uint32_t CM_GAMUT_REMAP_C13_C14; \
1119 uint32_t CM_GAMUT_REMAP_C21_C22; \
1120 uint32_t CM_GAMUT_REMAP_C23_C24; \
1121 uint32_t CM_GAMUT_REMAP_C31_C32; \
1057 uint32_t CM_GAMUT_REMAP_C33_C34; \ 1122 uint32_t CM_GAMUT_REMAP_C33_C34; \
1058 uint32_t CM_COMA_C11_C12; \ 1123 uint32_t CM_COMA_C11_C12; \
1059 uint32_t CM_COMA_C33_C34; \ 1124 uint32_t CM_COMA_C33_C34; \
@@ -1255,6 +1320,8 @@ struct dcn_dpp_mask {
1255 uint32_t CM_IGAM_LUT_RW_CONTROL; \ 1320 uint32_t CM_IGAM_LUT_RW_CONTROL; \
1256 uint32_t CM_IGAM_LUT_RW_INDEX; \ 1321 uint32_t CM_IGAM_LUT_RW_INDEX; \
1257 uint32_t CM_IGAM_LUT_SEQ_COLOR; \ 1322 uint32_t CM_IGAM_LUT_SEQ_COLOR; \
1323 uint32_t CM_TEST_DEBUG_INDEX; \
1324 uint32_t CM_TEST_DEBUG_DATA; \
1258 uint32_t FORMAT_CONTROL; \ 1325 uint32_t FORMAT_CONTROL; \
1259 uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ 1326 uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
1260 uint32_t CURSOR_CONTROL; \ 1327 uint32_t CURSOR_CONTROL; \
@@ -1289,8 +1356,8 @@ struct dcn10_dpp {
1289 1356
1290enum dcn10_input_csc_select { 1357enum dcn10_input_csc_select {
1291 INPUT_CSC_SELECT_BYPASS = 0, 1358 INPUT_CSC_SELECT_BYPASS = 0,
1292 INPUT_CSC_SELECT_ICSC, 1359 INPUT_CSC_SELECT_ICSC = 1,
1293 INPUT_CSC_SELECT_COMA 1360 INPUT_CSC_SELECT_COMA = 2
1294}; 1361};
1295 1362
1296void dpp1_set_cursor_attributes( 1363void dpp1_set_cursor_attributes(
@@ -1364,6 +1431,9 @@ bool dpp_get_optimal_number_of_taps(
1364 struct scaler_data *scl_data, 1431 struct scaler_data *scl_data,
1365 const struct scaling_taps *in_taps); 1432 const struct scaling_taps *in_taps);
1366 1433
1434void dpp_read_state(struct dpp *dpp_base,
1435 struct dcn_dpp_state *s);
1436
1367void dpp_reset(struct dpp *dpp_base); 1437void dpp_reset(struct dpp *dpp_base);
1368 1438
1369void dpp1_cm_program_regamma_lut( 1439void dpp1_cm_program_regamma_lut(
@@ -1408,7 +1478,7 @@ void dpp1_cnv_setup (
1408 struct dpp *dpp_base, 1478 struct dpp *dpp_base,
1409 enum surface_pixel_format format, 1479 enum surface_pixel_format format,
1410 enum expansion_mode mode, 1480 enum expansion_mode mode,
1411 struct csc_transform input_csc_color_matrix, 1481 struct dc_csc_transform input_csc_color_matrix,
1412 enum dc_color_space input_color_space); 1482 enum dc_color_space input_color_space);
1413 1483
1414void dpp1_full_bypass(struct dpp *dpp_base); 1484void dpp1_full_bypass(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index fb32975e4b67..4f373c97804f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -216,41 +216,55 @@ static void dpp1_cm_program_color_matrix(
216 struct dcn10_dpp *dpp, 216 struct dcn10_dpp *dpp,
217 const uint16_t *regval) 217 const uint16_t *regval)
218{ 218{
219 uint32_t mode; 219 uint32_t ocsc_mode;
220 uint32_t cur_mode;
220 struct color_matrices_reg gam_regs; 221 struct color_matrices_reg gam_regs;
221 222
222 REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
223
224 if (regval == NULL) { 223 if (regval == NULL) {
225 BREAK_TO_DEBUGGER(); 224 BREAK_TO_DEBUGGER();
226 return; 225 return;
227 } 226 }
228 mode = 4; 227
228 /* determine which CSC matrix (ocsc or comb) we are using
229 * currently. select the alternate set to double buffer
230 * the CSC update so CSC is updated on frame boundary
231 */
232 REG_SET(CM_TEST_DEBUG_INDEX, 0,
233 CM_TEST_DEBUG_INDEX, 9);
234
235 REG_GET(CM_TEST_DEBUG_DATA,
236 CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
237
238 if (cur_mode != 4)
239 ocsc_mode = 4;
240 else
241 ocsc_mode = 5;
242
243
229 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; 244 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
230 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; 245 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
231 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; 246 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
232 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; 247 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
233 248
234 if (mode == 4) { 249 if (ocsc_mode == 4) {
235 250
236 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); 251 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
237 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); 252 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
238 253
239 cm_helper_program_color_matrices(
240 dpp->base.ctx,
241 regval,
242 &gam_regs);
243
244 } else { 254 } else {
245 255
246 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); 256 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
247 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); 257 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
248 258
249 cm_helper_program_color_matrices(
250 dpp->base.ctx,
251 regval,
252 &gam_regs);
253 } 259 }
260
261 cm_helper_program_color_matrices(
262 dpp->base.ctx,
263 regval,
264 &gam_regs);
265
266 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
267
254} 268}
255 269
256void dpp1_cm_set_output_csc_default( 270void dpp1_cm_set_output_csc_default(
@@ -260,15 +274,14 @@ void dpp1_cm_set_output_csc_default(
260 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 274 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
261 const uint16_t *regval = NULL; 275 const uint16_t *regval = NULL;
262 int arr_size; 276 int arr_size;
263 uint32_t ocsc_mode = 4;
264 277
265 regval = find_color_matrix(colorspace, &arr_size); 278 regval = find_color_matrix(colorspace, &arr_size);
266 if (regval == NULL) { 279 if (regval == NULL) {
267 BREAK_TO_DEBUGGER(); 280 BREAK_TO_DEBUGGER();
268 return; 281 return;
269 } 282 }
283
270 dpp1_cm_program_color_matrix(dpp, regval); 284 dpp1_cm_program_color_matrix(dpp, regval);
271 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
272} 285}
273 286
274static void dpp1_cm_get_reg_field( 287static void dpp1_cm_get_reg_field(
@@ -329,9 +342,8 @@ void dpp1_cm_set_output_csc_adjustment(
329 const uint16_t *regval) 342 const uint16_t *regval)
330{ 343{
331 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 344 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
332 uint32_t ocsc_mode = 4; 345
333 dpp1_cm_program_color_matrix(dpp, regval); 346 dpp1_cm_program_color_matrix(dpp, regval);
334 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
335} 347}
336 348
337void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, 349void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
@@ -437,17 +449,18 @@ void dpp1_cm_program_regamma_lutb_settings(
437void dpp1_program_input_csc( 449void dpp1_program_input_csc(
438 struct dpp *dpp_base, 450 struct dpp *dpp_base,
439 enum dc_color_space color_space, 451 enum dc_color_space color_space,
440 enum dcn10_input_csc_select select, 452 enum dcn10_input_csc_select input_select,
441 const struct out_csc_color_matrix *tbl_entry) 453 const struct out_csc_color_matrix *tbl_entry)
442{ 454{
443 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 455 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
444 int i; 456 int i;
445 int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix); 457 int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
446 const uint16_t *regval = NULL; 458 const uint16_t *regval = NULL;
447 uint32_t selection = 1; 459 uint32_t cur_select = 0;
460 enum dcn10_input_csc_select select;
448 struct color_matrices_reg gam_regs; 461 struct color_matrices_reg gam_regs;
449 462
450 if (select == INPUT_CSC_SELECT_BYPASS) { 463 if (input_select == INPUT_CSC_SELECT_BYPASS) {
451 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); 464 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
452 return; 465 return;
453 } 466 }
@@ -467,36 +480,45 @@ void dpp1_program_input_csc(
467 regval = tbl_entry->regval; 480 regval = tbl_entry->regval;
468 } 481 }
469 482
470 if (select == INPUT_CSC_SELECT_COMA) 483 /* determine which CSC matrix (icsc or coma) we are using
471 selection = 2; 484 * currently. select the alternate set to double buffer
472 REG_SET(CM_ICSC_CONTROL, 0, 485 * the CSC update so CSC is updated on frame boundary
473 CM_ICSC_MODE, selection); 486 */
487 REG_SET(CM_TEST_DEBUG_INDEX, 0,
488 CM_TEST_DEBUG_INDEX, 9);
489
490 REG_GET(CM_TEST_DEBUG_DATA,
491 CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
492
493 if (cur_select != INPUT_CSC_SELECT_ICSC)
494 select = INPUT_CSC_SELECT_ICSC;
495 else
496 select = INPUT_CSC_SELECT_COMA;
474 497
475 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; 498 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
476 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; 499 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
477 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; 500 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
478 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; 501 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
479 502
480
481 if (select == INPUT_CSC_SELECT_ICSC) { 503 if (select == INPUT_CSC_SELECT_ICSC) {
482 504
483 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); 505 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
484 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); 506 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
485 507
486 cm_helper_program_color_matrices(
487 dpp->base.ctx,
488 regval,
489 &gam_regs);
490 } else { 508 } else {
491 509
492 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); 510 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
493 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); 511 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
494 512
495 cm_helper_program_color_matrices(
496 dpp->base.ctx,
497 regval,
498 &gam_regs);
499 } 513 }
514
515 cm_helper_program_color_matrices(
516 dpp->base.ctx,
517 regval,
518 &gam_regs);
519
520 REG_SET(CM_ICSC_CONTROL, 0,
521 CM_ICSC_MODE, select);
500} 522}
501 523
502//keep here for now, decide multi dce support later 524//keep here for now, decide multi dce support later
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 738f67ffd1b4..b9fb14a3224b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
476 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); 476 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
477} 477}
478 478
479static bool hubbub1_dcc_support_swizzle(
480 enum swizzle_mode_values swizzle,
481 unsigned int bytes_per_element,
482 enum segment_order *segment_order_horz,
483 enum segment_order *segment_order_vert)
484{
485 bool standard_swizzle = false;
486 bool display_swizzle = false;
487
488 switch (swizzle) {
489 case DC_SW_4KB_S:
490 case DC_SW_64KB_S:
491 case DC_SW_VAR_S:
492 case DC_SW_4KB_S_X:
493 case DC_SW_64KB_S_X:
494 case DC_SW_VAR_S_X:
495 standard_swizzle = true;
496 break;
497 case DC_SW_4KB_D:
498 case DC_SW_64KB_D:
499 case DC_SW_VAR_D:
500 case DC_SW_4KB_D_X:
501 case DC_SW_64KB_D_X:
502 case DC_SW_VAR_D_X:
503 display_swizzle = true;
504 break;
505 default:
506 break;
507 }
508
509 if (bytes_per_element == 1 && standard_swizzle) {
510 *segment_order_horz = segment_order__contiguous;
511 *segment_order_vert = segment_order__na;
512 return true;
513 }
514 if (bytes_per_element == 2 && standard_swizzle) {
515 *segment_order_horz = segment_order__non_contiguous;
516 *segment_order_vert = segment_order__contiguous;
517 return true;
518 }
519 if (bytes_per_element == 4 && standard_swizzle) {
520 *segment_order_horz = segment_order__non_contiguous;
521 *segment_order_vert = segment_order__contiguous;
522 return true;
523 }
524 if (bytes_per_element == 8 && standard_swizzle) {
525 *segment_order_horz = segment_order__na;
526 *segment_order_vert = segment_order__contiguous;
527 return true;
528 }
529 if (bytes_per_element == 8 && display_swizzle) {
530 *segment_order_horz = segment_order__contiguous;
531 *segment_order_vert = segment_order__non_contiguous;
532 return true;
533 }
534
535 return false;
536}
537
538static bool hubbub1_dcc_support_pixel_format(
539 enum surface_pixel_format format,
540 unsigned int *bytes_per_element)
541{
542 /* DML: get_bytes_per_element */
543 switch (format) {
544 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
545 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
546 *bytes_per_element = 2;
547 return true;
548 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
549 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
550 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
551 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
552 *bytes_per_element = 4;
553 return true;
554 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
555 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
556 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
557 *bytes_per_element = 8;
558 return true;
559 default:
560 return false;
561 }
562}
563
564static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
565 unsigned int bytes_per_element)
566{
567 /* copied from DML. might want to refactor DML to leverage from DML */
568 /* DML : get_blk256_size */
569 if (bytes_per_element == 1) {
570 *blk256_width = 16;
571 *blk256_height = 16;
572 } else if (bytes_per_element == 2) {
573 *blk256_width = 16;
574 *blk256_height = 8;
575 } else if (bytes_per_element == 4) {
576 *blk256_width = 8;
577 *blk256_height = 8;
578 } else if (bytes_per_element == 8) {
579 *blk256_width = 8;
580 *blk256_height = 4;
581 }
582}
583
584static void hubbub1_det_request_size(
585 unsigned int height,
586 unsigned int width,
587 unsigned int bpe,
588 bool *req128_horz_wc,
589 bool *req128_vert_wc)
590{
591 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
592
593 unsigned int blk256_height = 0;
594 unsigned int blk256_width = 0;
595 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
596
597 hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
598
599 swath_bytes_horz_wc = height * blk256_height * bpe;
600 swath_bytes_vert_wc = width * blk256_width * bpe;
601
602 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
603 false : /* full 256B request */
604 true; /* half 128b request */
605
606 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
607 false : /* full 256B request */
608 true; /* half 128b request */
609}
610
611static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
612 const struct dc_dcc_surface_param *input,
613 struct dc_surface_dcc_cap *output)
614{
615 struct dc *dc = hubbub->ctx->dc;
616 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
617 enum dcc_control dcc_control;
618 unsigned int bpe;
619 enum segment_order segment_order_horz, segment_order_vert;
620 bool req128_horz_wc, req128_vert_wc;
621
622 memset(output, 0, sizeof(*output));
623
624 if (dc->debug.disable_dcc == DCC_DISABLE)
625 return false;
626
627 if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
628 return false;
629
630 if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
631 &segment_order_horz, &segment_order_vert))
632 return false;
633
634 hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
635 bpe, &req128_horz_wc, &req128_vert_wc);
636
637 if (!req128_horz_wc && !req128_vert_wc) {
638 dcc_control = dcc_control__256_256_xxx;
639 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
640 if (!req128_horz_wc)
641 dcc_control = dcc_control__256_256_xxx;
642 else if (segment_order_horz == segment_order__contiguous)
643 dcc_control = dcc_control__128_128_xxx;
644 else
645 dcc_control = dcc_control__256_64_64;
646 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
647 if (!req128_vert_wc)
648 dcc_control = dcc_control__256_256_xxx;
649 else if (segment_order_vert == segment_order__contiguous)
650 dcc_control = dcc_control__128_128_xxx;
651 else
652 dcc_control = dcc_control__256_64_64;
653 } else {
654 if ((req128_horz_wc &&
655 segment_order_horz == segment_order__non_contiguous) ||
656 (req128_vert_wc &&
657 segment_order_vert == segment_order__non_contiguous))
658 /* access_dir not known, must use most constraining */
659 dcc_control = dcc_control__256_64_64;
660 else
661 /* reg128 is true for either horz and vert
662 * but segment_order is contiguous
663 */
664 dcc_control = dcc_control__128_128_xxx;
665 }
666
667 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
668 dcc_control != dcc_control__256_256_xxx)
669 return false;
670
671 switch (dcc_control) {
672 case dcc_control__256_256_xxx:
673 output->grph.rgb.max_uncompressed_blk_size = 256;
674 output->grph.rgb.max_compressed_blk_size = 256;
675 output->grph.rgb.independent_64b_blks = false;
676 break;
677 case dcc_control__128_128_xxx:
678 output->grph.rgb.max_uncompressed_blk_size = 128;
679 output->grph.rgb.max_compressed_blk_size = 128;
680 output->grph.rgb.independent_64b_blks = false;
681 break;
682 case dcc_control__256_64_64:
683 output->grph.rgb.max_uncompressed_blk_size = 256;
684 output->grph.rgb.max_compressed_blk_size = 64;
685 output->grph.rgb.independent_64b_blks = true;
686 break;
687 }
688
689 output->capable = true;
690 output->const_color_support = false;
691
692 return true;
693}
694
479static const struct hubbub_funcs hubbub1_funcs = { 695static const struct hubbub_funcs hubbub1_funcs = {
480 .update_dchub = hubbub1_update_dchub 696 .update_dchub = hubbub1_update_dchub,
697 .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
698 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
699 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
481}; 700};
482 701
483void hubbub1_construct(struct hubbub *hubbub, 702void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index a16e908821a0..f479f54e5bb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -27,6 +27,7 @@
27#define __DC_HUBBUB_DCN10_H__ 27#define __DC_HUBBUB_DCN10_H__
28 28
29#include "core_types.h" 29#include "core_types.h"
30#include "dchubbub.h"
30 31
31#define HUBHUB_REG_LIST_DCN()\ 32#define HUBHUB_REG_LIST_DCN()\
32 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ 33 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
173 struct dcn_hubbub_wm_set sets[4]; 174 struct dcn_hubbub_wm_set sets[4];
174}; 175};
175 176
176struct hubbub_funcs {
177 void (*update_dchub)(
178 struct hubbub *hubbub,
179 struct dchub_init_data *dh_data);
180};
181
182struct hubbub { 177struct hubbub {
183 const struct hubbub_funcs *funcs; 178 const struct hubbub_funcs *funcs;
184 struct dc_context *ctx; 179 struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 39b72f696ae9..0cbc83edd37f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -146,6 +146,9 @@ void hubp1_program_size_and_rotation(
146 * 444 or 420 luma 146 * 444 or 420 luma
147 */ 147 */
148 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 148 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
149 ASSERT(plane_size->video.chroma_pitch != 0);
150 /* Chroma pitch zero can cause system hang! */
151
149 pitch = plane_size->video.luma_pitch - 1; 152 pitch = plane_size->video.luma_pitch - 1;
150 meta_pitch = dcc->video.meta_pitch_l - 1; 153 meta_pitch = dcc->video.meta_pitch_l - 1;
151 pitch_c = plane_size->video.chroma_pitch - 1; 154 pitch_c = plane_size->video.chroma_pitch - 1;
@@ -535,11 +538,13 @@ void hubp1_program_deadline(
535 REG_SET(VBLANK_PARAMETERS_3, 0, 538 REG_SET(VBLANK_PARAMETERS_3, 0,
536 REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); 539 REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
537 540
538 REG_SET(NOM_PARAMETERS_0, 0, 541 if (REG(NOM_PARAMETERS_0))
539 DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); 542 REG_SET(NOM_PARAMETERS_0, 0,
543 DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
540 544
541 REG_SET(NOM_PARAMETERS_1, 0, 545 if (REG(NOM_PARAMETERS_1))
542 REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l); 546 REG_SET(NOM_PARAMETERS_1, 0,
547 REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
543 548
544 REG_SET(NOM_PARAMETERS_4, 0, 549 REG_SET(NOM_PARAMETERS_4, 0,
545 DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l); 550 DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
@@ -568,11 +573,13 @@ void hubp1_program_deadline(
568 REG_SET(VBLANK_PARAMETERS_4, 0, 573 REG_SET(VBLANK_PARAMETERS_4, 0,
569 REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); 574 REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
570 575
571 REG_SET(NOM_PARAMETERS_2, 0, 576 if (REG(NOM_PARAMETERS_2))
572 DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); 577 REG_SET(NOM_PARAMETERS_2, 0,
578 DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
573 579
574 REG_SET(NOM_PARAMETERS_3, 0, 580 if (REG(NOM_PARAMETERS_3))
575 REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c); 581 REG_SET(NOM_PARAMETERS_3, 0,
582 REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
576 583
577 REG_SET(NOM_PARAMETERS_6, 0, 584 REG_SET(NOM_PARAMETERS_6, 0,
578 DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c); 585 DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
@@ -609,6 +616,13 @@ void hubp1_program_deadline(
609 REG_SET(DCN_SURF1_TTU_CNTL1, 0, 616 REG_SET(DCN_SURF1_TTU_CNTL1, 0,
610 REFCYC_PER_REQ_DELIVERY_PRE, 617 REFCYC_PER_REQ_DELIVERY_PRE,
611 ttu_attr->refcyc_per_req_delivery_pre_c); 618 ttu_attr->refcyc_per_req_delivery_pre_c);
619
620 REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
621 REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
622 QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
623 QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
624 REG_SET(DCN_CUR0_TTU_CNTL1, 0,
625 REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
612} 626}
613 627
614static void hubp1_setup( 628static void hubp1_setup(
@@ -752,9 +766,159 @@ void min_set_viewport(
752 PRI_VIEWPORT_Y_START_C, viewport_c->y); 766 PRI_VIEWPORT_Y_START_C, viewport_c->y);
753} 767}
754 768
755void hubp1_read_state(struct dcn10_hubp *hubp1, 769void hubp1_read_state(struct hubp *hubp)
756 struct dcn_hubp_state *s)
757{ 770{
771 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
772 struct dcn_hubp_state *s = &hubp1->state;
773 struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
774 struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
775 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
776
777 /* Requester */
778 REG_GET(HUBPRET_CONTROL,
779 DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
780 REG_GET_4(DCN_EXPANSION_MODE,
781 DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
782 PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
783 MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
784 CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
785 REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
786 CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
787 MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
788 META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
789 MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
790 DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
791 MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
792 SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
793 PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
794 REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
795 CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
796 MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
797 META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
798 MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
799 DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
800 MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
801 SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
802 PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
803
804 /* DLG - Per hubp */
805 REG_GET_2(BLANK_OFFSET_0,
806 REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
807 DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
808
809 REG_GET(BLANK_OFFSET_1,
810 MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
811
812 REG_GET(DST_DIMENSIONS,
813 REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
814
815 REG_GET_2(DST_AFTER_SCALER,
816 REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
817 DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
818
819 if (REG(PREFETCH_SETTINS))
820 REG_GET_2(PREFETCH_SETTINS,
821 DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
822 VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
823 else
824 REG_GET_2(PREFETCH_SETTINGS,
825 DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
826 VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
827
828 REG_GET_2(VBLANK_PARAMETERS_0,
829 DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
830 DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
831
832 REG_GET(REF_FREQ_TO_PIX_FREQ,
833 REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
834
835 /* DLG - Per luma/chroma */
836 REG_GET(VBLANK_PARAMETERS_1,
837 REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
838
839 REG_GET(VBLANK_PARAMETERS_3,
840 REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
841
842 if (REG(NOM_PARAMETERS_0))
843 REG_GET(NOM_PARAMETERS_0,
844 DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
845
846 if (REG(NOM_PARAMETERS_1))
847 REG_GET(NOM_PARAMETERS_1,
848 REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
849
850 REG_GET(NOM_PARAMETERS_4,
851 DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
852
853 REG_GET(NOM_PARAMETERS_5,
854 REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
855
856 REG_GET_2(PER_LINE_DELIVERY_PRE,
857 REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
858 REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
859
860 REG_GET_2(PER_LINE_DELIVERY,
861 REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
862 REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
863
864 if (REG(PREFETCH_SETTINS_C))
865 REG_GET(PREFETCH_SETTINS_C,
866 VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
867 else
868 REG_GET(PREFETCH_SETTINGS_C,
869 VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
870
871 REG_GET(VBLANK_PARAMETERS_2,
872 REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
873
874 REG_GET(VBLANK_PARAMETERS_4,
875 REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
876
877 if (REG(NOM_PARAMETERS_2))
878 REG_GET(NOM_PARAMETERS_2,
879 DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
880
881 if (REG(NOM_PARAMETERS_3))
882 REG_GET(NOM_PARAMETERS_3,
883 REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
884
885 REG_GET(NOM_PARAMETERS_6,
886 DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
887
888 REG_GET(NOM_PARAMETERS_7,
889 REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
890
891 /* TTU - per hubp */
892 REG_GET_2(DCN_TTU_QOS_WM,
893 QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
894 QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
895
896 REG_GET_2(DCN_GLOBAL_TTU_CNTL,
897 MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
898 QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
899
900 /* TTU - per luma/chroma */
901 /* Assumed surf0 is luma and 1 is chroma */
902
903 REG_GET_3(DCN_SURF0_TTU_CNTL0,
904 REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
905 QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
906 QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
907
908 REG_GET(DCN_SURF0_TTU_CNTL1,
909 REFCYC_PER_REQ_DELIVERY_PRE,
910 &ttu_attr->refcyc_per_req_delivery_pre_l);
911
912 REG_GET_3(DCN_SURF1_TTU_CNTL0,
913 REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
914 QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
915 QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
916
917 REG_GET(DCN_SURF1_TTU_CNTL1,
918 REFCYC_PER_REQ_DELIVERY_PRE,
919 &ttu_attr->refcyc_per_req_delivery_pre_c);
920
921 /* Rest of hubp */
758 REG_GET(DCSURF_SURFACE_CONFIG, 922 REG_GET(DCSURF_SURFACE_CONFIG,
759 SURFACE_PIXEL_FORMAT, &s->pixel_format); 923 SURFACE_PIXEL_FORMAT, &s->pixel_format);
760 924
@@ -897,7 +1061,7 @@ void hubp1_cursor_set_position(
897 if (src_x_offset >= (int)param->viewport_width) 1061 if (src_x_offset >= (int)param->viewport_width)
898 cur_en = 0; /* not visible beyond right edge*/ 1062 cur_en = 0; /* not visible beyond right edge*/
899 1063
900 if (src_x_offset + (int)hubp->curs_attr.width < 0) 1064 if (src_x_offset + (int)hubp->curs_attr.width <= 0)
901 cur_en = 0; /* not visible beyond left edge*/ 1065 cur_en = 0; /* not visible beyond left edge*/
902 1066
903 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) 1067 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
@@ -952,6 +1116,7 @@ static struct hubp_funcs dcn10_hubp_funcs = {
952 .hubp_disconnect = hubp1_disconnect, 1116 .hubp_disconnect = hubp1_disconnect,
953 .hubp_clk_cntl = hubp1_clk_cntl, 1117 .hubp_clk_cntl = hubp1_clk_cntl,
954 .hubp_vtg_sel = hubp1_vtg_sel, 1118 .hubp_vtg_sel = hubp1_vtg_sel,
1119 .hubp_read_state = hubp1_read_state,
955}; 1120};
956 1121
957/*****************************************/ 1122/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4a3703e12ea1..fe9b8c4a91ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -30,6 +30,7 @@
30#define TO_DCN10_HUBP(hubp)\ 30#define TO_DCN10_HUBP(hubp)\
31 container_of(hubp, struct dcn10_hubp, base) 31 container_of(hubp, struct dcn10_hubp, base)
32 32
33/* Register address initialization macro for all ASICs (including those with reduced functionality) */
33#define HUBP_REG_LIST_DCN(id)\ 34#define HUBP_REG_LIST_DCN(id)\
34 SRI(DCHUBP_CNTL, HUBP, id),\ 35 SRI(DCHUBP_CNTL, HUBP, id),\
35 SRI(HUBPREQ_DEBUG_DB, HUBP, id),\ 36 SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
@@ -78,16 +79,12 @@
78 SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\ 79 SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
79 SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\ 80 SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
80 SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\ 81 SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
81 SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
82 SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
83 SRI(NOM_PARAMETERS_4, HUBPREQ, id),\ 82 SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
84 SRI(NOM_PARAMETERS_5, HUBPREQ, id),\ 83 SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
85 SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\ 84 SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
86 SRI(PER_LINE_DELIVERY, HUBPREQ, id),\ 85 SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
87 SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\ 86 SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
88 SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\ 87 SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
89 SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
90 SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
91 SRI(NOM_PARAMETERS_6, HUBPREQ, id),\ 88 SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
92 SRI(NOM_PARAMETERS_7, HUBPREQ, id),\ 89 SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
93 SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\ 90 SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
@@ -96,11 +93,21 @@
96 SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\ 93 SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
97 SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\ 94 SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
98 SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ 95 SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
99 SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\ 96 SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
97 SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
100 SRI(HUBP_CLK_CNTL, HUBP, id) 98 SRI(HUBP_CLK_CNTL, HUBP, id)
101 99
100/* Register address initialization macro for ASICs with VM */
101#define HUBP_REG_LIST_DCN_VM(id)\
102 SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
103 SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
104 SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
105 SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
106 SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
107
102#define HUBP_REG_LIST_DCN10(id)\ 108#define HUBP_REG_LIST_DCN10(id)\
103 HUBP_REG_LIST_DCN(id),\ 109 HUBP_REG_LIST_DCN(id),\
110 HUBP_REG_LIST_DCN_VM(id),\
104 SRI(PREFETCH_SETTINS, HUBPREQ, id),\ 111 SRI(PREFETCH_SETTINS, HUBPREQ, id),\
105 SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\ 112 SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
106 SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\ 113 SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
@@ -198,6 +205,8 @@
198 uint32_t DCN_SURF0_TTU_CNTL1; \ 205 uint32_t DCN_SURF0_TTU_CNTL1; \
199 uint32_t DCN_SURF1_TTU_CNTL0; \ 206 uint32_t DCN_SURF1_TTU_CNTL0; \
200 uint32_t DCN_SURF1_TTU_CNTL1; \ 207 uint32_t DCN_SURF1_TTU_CNTL1; \
208 uint32_t DCN_CUR0_TTU_CNTL0; \
209 uint32_t DCN_CUR0_TTU_CNTL1; \
201 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \ 210 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
202 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \ 211 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
203 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \ 212 uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
@@ -237,6 +246,7 @@
237#define HUBP_SF(reg_name, field_name, post_fix)\ 246#define HUBP_SF(reg_name, field_name, post_fix)\
238 .field_name = reg_name ## __ ## field_name ## post_fix 247 .field_name = reg_name ## __ ## field_name ## post_fix
239 248
249/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
240#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ 250#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
241 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ 251 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
242 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ 252 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
@@ -335,8 +345,6 @@
335 HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\ 345 HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
336 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\ 346 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
337 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\ 347 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
338 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
339 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
340 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\ 348 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
341 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\ 349 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
342 HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\ 350 HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
@@ -345,8 +353,6 @@
345 HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\ 353 HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
346 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\ 354 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
347 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\ 355 HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
348 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
349 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
350 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\ 356 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
351 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\ 357 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
352 HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\ 358 HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
@@ -357,12 +363,24 @@
357 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\ 363 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
358 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\ 364 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
359 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ 365 HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
366 HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
367
368/* Mask/shift struct generation macro for ASICs with VM */
369#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
370 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
371 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
372 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
373 HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
360 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\ 374 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
361 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\ 375 HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
362 HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) 376 HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
377 HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
378 HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
379 HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh)
363 380
364#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ 381#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
365 HUBP_MASK_SH_LIST_DCN(mask_sh),\ 382 HUBP_MASK_SH_LIST_DCN(mask_sh),\
383 HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
366 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\ 384 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
367 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\ 385 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
368 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\ 386 HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -601,8 +619,29 @@ struct dcn_mi_mask {
601 DCN_HUBP_REG_FIELD_LIST(uint32_t); 619 DCN_HUBP_REG_FIELD_LIST(uint32_t);
602}; 620};
603 621
622struct dcn_hubp_state {
623 struct _vcs_dpi_display_dlg_regs_st dlg_attr;
624 struct _vcs_dpi_display_ttu_regs_st ttu_attr;
625 struct _vcs_dpi_display_rq_regs_st rq_regs;
626 uint32_t pixel_format;
627 uint32_t inuse_addr_hi;
628 uint32_t viewport_width;
629 uint32_t viewport_height;
630 uint32_t rotation_angle;
631 uint32_t h_mirror_en;
632 uint32_t sw_mode;
633 uint32_t dcc_en;
634 uint32_t blank_en;
635 uint32_t underflow_status;
636 uint32_t ttu_disable;
637 uint32_t min_ttu_vblank;
638 uint32_t qos_level_low_wm;
639 uint32_t qos_level_high_wm;
640};
641
604struct dcn10_hubp { 642struct dcn10_hubp {
605 struct hubp base; 643 struct hubp base;
644 struct dcn_hubp_state state;
606 const struct dcn_mi_registers *hubp_regs; 645 const struct dcn_mi_registers *hubp_regs;
607 const struct dcn_mi_shift *hubp_shift; 646 const struct dcn_mi_shift *hubp_shift;
608 const struct dcn_mi_mask *hubp_mask; 647 const struct dcn_mi_mask *hubp_mask;
@@ -680,25 +719,7 @@ void dcn10_hubp_construct(
680 const struct dcn_mi_shift *hubp_shift, 719 const struct dcn_mi_shift *hubp_shift,
681 const struct dcn_mi_mask *hubp_mask); 720 const struct dcn_mi_mask *hubp_mask);
682 721
683 722void hubp1_read_state(struct hubp *hubp);
684struct dcn_hubp_state {
685 uint32_t pixel_format;
686 uint32_t inuse_addr_hi;
687 uint32_t viewport_width;
688 uint32_t viewport_height;
689 uint32_t rotation_angle;
690 uint32_t h_mirror_en;
691 uint32_t sw_mode;
692 uint32_t dcc_en;
693 uint32_t blank_en;
694 uint32_t underflow_status;
695 uint32_t ttu_disable;
696 uint32_t min_ttu_vblank;
697 uint32_t qos_level_low_wm;
698 uint32_t qos_level_high_wm;
699};
700void hubp1_read_state(struct dcn10_hubp *hubp1,
701 struct dcn_hubp_state *s);
702 723
703enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); 724enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
704 725
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8b0f6b8a5627..572fa601a0eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,8 +45,8 @@
45#include "dcn10_hubbub.h" 45#include "dcn10_hubbub.h"
46#include "dcn10_cm_common.h" 46#include "dcn10_cm_common.h"
47 47
48#define DC_LOGGER \ 48#define DC_LOGGER_INIT(logger)
49 ctx->logger 49
50#define CTX \ 50#define CTX \
51 hws->ctx 51 hws->ctx
52#define REG(reg)\ 52#define REG(reg)\
@@ -56,16 +56,17 @@
56#define FN(reg_name, field_name) \ 56#define FN(reg_name, field_name) \
57 hws->shifts->field_name, hws->masks->field_name 57 hws->shifts->field_name, hws->masks->field_name
58 58
59/*print is 17 wide, first two characters are spaces*/
59#define DTN_INFO_MICRO_SEC(ref_cycle) \ 60#define DTN_INFO_MICRO_SEC(ref_cycle) \
60 print_microsec(dc_ctx, ref_cycle) 61 print_microsec(dc_ctx, ref_cycle)
61 62
62void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle) 63void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
63{ 64{
64 static const uint32_t ref_clk_mhz = 48; 65 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
65 static const unsigned int frac = 10; 66 static const unsigned int frac = 1000;
66 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz; 67 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
67 68
68 DTN_INFO("%d.%d \t ", 69 DTN_INFO(" %11d.%03d",
69 us_x10 / frac, 70 us_x10 / frac,
70 us_x10 % frac); 71 us_x10 % frac);
71} 72}
@@ -92,14 +93,14 @@ void dcn10_log_hubbub_state(struct dc *dc)
92 93
93 hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); 94 hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
94 95
95 DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t " 96 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
96 "sr_enter \t sr_exit \t dram_clk_change \n"); 97 " sr_enter sr_exit dram_clk_change\n");
97 98
98 for (i = 0; i < 4; i++) { 99 for (i = 0; i < 4; i++) {
99 struct dcn_hubbub_wm_set *s; 100 struct dcn_hubbub_wm_set *s;
100 101
101 s = &wm.sets[i]; 102 s = &wm.sets[i];
102 DTN_INFO("WM_Set[%d]:\t ", s->wm_set); 103 DTN_INFO("WM_Set[%d]:", s->wm_set);
103 DTN_INFO_MICRO_SEC(s->data_urgent); 104 DTN_INFO_MICRO_SEC(s->data_urgent);
104 DTN_INFO_MICRO_SEC(s->pte_meta_urgent); 105 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
105 DTN_INFO_MICRO_SEC(s->sr_enter); 106 DTN_INFO_MICRO_SEC(s->sr_enter);
@@ -111,6 +112,116 @@ void dcn10_log_hubbub_state(struct dc *dc)
111 DTN_INFO("\n"); 112 DTN_INFO("\n");
112} 113}
113 114
115static void dcn10_log_hubp_states(struct dc *dc)
116{
117 struct dc_context *dc_ctx = dc->ctx;
118 struct resource_pool *pool = dc->res_pool;
119 int i;
120
121 DTN_INFO("HUBP: format addr_hi width height"
122 " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
123 " min_ttu_vblank qos_low_wm qos_high_wm\n");
124 for (i = 0; i < pool->pipe_count; i++) {
125 struct hubp *hubp = pool->hubps[i];
126 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
127
128 hubp->funcs->hubp_read_state(hubp);
129
130 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
131 " %6d %8d %7d %8xh",
132 hubp->inst,
133 s->pixel_format,
134 s->inuse_addr_hi,
135 s->viewport_width,
136 s->viewport_height,
137 s->rotation_angle,
138 s->h_mirror_en,
139 s->sw_mode,
140 s->dcc_en,
141 s->blank_en,
142 s->ttu_disable,
143 s->underflow_status);
144 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
145 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
146 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
147 DTN_INFO("\n");
148 }
149
150 DTN_INFO("\n=========RQ========\n");
151 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
152 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
153 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
154 for (i = 0; i < pool->pipe_count; i++) {
155 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
156 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
157
158 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
159 i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
160 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
161 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
162 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
163 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
164 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
165 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
166 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
167 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
168 }
169
170 DTN_INFO("========DLG========\n");
171 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
172 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
173 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
174 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
175 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
176 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
177 " x_rp_dlay x_rr_sfl\n");
178 for (i = 0; i < pool->pipe_count; i++) {
179 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
180 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
181
182 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
183 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
184 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
185 i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
186 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
187 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
188 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
189 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
190 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
191 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
192 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
193 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
194 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
195 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
196 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
197 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
198 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
199 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
200 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
201 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
202 dlg_regs->xfc_reg_remote_surface_flip_latency);
203 }
204
205 DTN_INFO("========TTU========\n");
206 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
207 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
208 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
209 for (i = 0; i < pool->pipe_count; i++) {
210 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
211 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
212
213 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
214 i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
215 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
216 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
217 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
218 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
219 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
220 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
221 }
222 DTN_INFO("\n");
223}
224
114void dcn10_log_hw_state(struct dc *dc) 225void dcn10_log_hw_state(struct dc *dc)
115{ 226{
116 struct dc_context *dc_ctx = dc->ctx; 227 struct dc_context *dc_ctx = dc->ctx;
@@ -121,41 +232,64 @@ void dcn10_log_hw_state(struct dc *dc)
121 232
122 dcn10_log_hubbub_state(dc); 233 dcn10_log_hubbub_state(dc);
123 234
124 DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t " 235 dcn10_log_hubp_states(dc);
125 "rotation \t mirror \t sw_mode \t "
126 "dcc_en \t blank_en \t ttu_dis \t underflow \t "
127 "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
128 236
237 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
238 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
239 "C31 C32 C33 C34\n");
129 for (i = 0; i < pool->pipe_count; i++) { 240 for (i = 0; i < pool->pipe_count; i++) {
130 struct hubp *hubp = pool->hubps[i]; 241 struct dpp *dpp = pool->dpps[i];
131 struct dcn_hubp_state s; 242 struct dcn_dpp_state s;
243
244 dpp->funcs->dpp_read_state(dpp, &s);
245
246 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
247 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
248 dpp->inst,
249 s.igam_input_format,
250 (s.igam_lut_mode == 0) ? "BypassFixed" :
251 ((s.igam_lut_mode == 1) ? "BypassFloat" :
252 ((s.igam_lut_mode == 2) ? "RAM" :
253 ((s.igam_lut_mode == 3) ? "RAM" :
254 "Unknown"))),
255 (s.dgam_lut_mode == 0) ? "Bypass" :
256 ((s.dgam_lut_mode == 1) ? "sRGB" :
257 ((s.dgam_lut_mode == 2) ? "Ycc" :
258 ((s.dgam_lut_mode == 3) ? "RAM" :
259 ((s.dgam_lut_mode == 4) ? "RAM" :
260 "Unknown")))),
261 (s.rgam_lut_mode == 0) ? "Bypass" :
262 ((s.rgam_lut_mode == 1) ? "sRGB" :
263 ((s.rgam_lut_mode == 2) ? "Ycc" :
264 ((s.rgam_lut_mode == 3) ? "RAM" :
265 ((s.rgam_lut_mode == 4) ? "RAM" :
266 "Unknown")))),
267 s.gamut_remap_mode,
268 s.gamut_remap_c11_c12,
269 s.gamut_remap_c13_c14,
270 s.gamut_remap_c21_c22,
271 s.gamut_remap_c23_c24,
272 s.gamut_remap_c31_c32,
273 s.gamut_remap_c33_c34);
274 DTN_INFO("\n");
275 }
276 DTN_INFO("\n");
132 277
133 hubp1_read_state(TO_DCN10_HUBP(hubp), &s); 278 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
279 for (i = 0; i < pool->pipe_count; i++) {
280 struct mpcc_state s = {0};
134 281
135 DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t " 282 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
136 "%xh \t %xh \t %xh \t " 283 if (s.opp_id != 0xf)
137 "%d \t %d \t %d \t %xh \t", 284 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
138 hubp->inst, 285 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
139 s.pixel_format, 286 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
140 s.inuse_addr_hi, 287 s.idle);
141 s.viewport_width,
142 s.viewport_height,
143 s.rotation_angle,
144 s.h_mirror_en,
145 s.sw_mode,
146 s.dcc_en,
147 s.blank_en,
148 s.ttu_disable,
149 s.underflow_status);
150 DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
151 DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
152 DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
153 DTN_INFO("\n");
154 } 288 }
155 DTN_INFO("\n"); 289 DTN_INFO("\n");
156 290
157 DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t " 291 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
158 "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n"); 292 " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
159 293
160 for (i = 0; i < pool->timing_generator_count; i++) { 294 for (i = 0; i < pool->timing_generator_count; i++) {
161 struct timing_generator *tg = pool->timing_generators[i]; 295 struct timing_generator *tg = pool->timing_generators[i];
@@ -167,9 +301,8 @@ void dcn10_log_hw_state(struct dc *dc)
167 if ((s.otg_enabled & 1) == 0) 301 if ((s.otg_enabled & 1) == 0)
168 continue; 302 continue;
169 303
170 DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t " 304 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
171 "%d \t %d \t %d \t %d \t %d \t %d \t " 305 " %5d %5d %5d %5d %9d\n",
172 "%d \t %d \t %d \t %d \t %d \t ",
173 tg->inst, 306 tg->inst,
174 s.v_blank_start, 307 s.v_blank_start,
175 s.v_blank_end, 308 s.v_blank_end,
@@ -178,6 +311,8 @@ void dcn10_log_hw_state(struct dc *dc)
178 s.v_sync_a_pol, 311 s.v_sync_a_pol,
179 s.v_total_max, 312 s.v_total_max,
180 s.v_total_min, 313 s.v_total_min,
314 s.v_total_max_sel,
315 s.v_total_min_sel,
181 s.h_blank_start, 316 s.h_blank_start,
182 s.h_blank_end, 317 s.h_blank_end,
183 s.h_sync_a_start, 318 s.h_sync_a_start,
@@ -186,10 +321,19 @@ void dcn10_log_hw_state(struct dc *dc)
186 s.h_total, 321 s.h_total,
187 s.v_total, 322 s.v_total,
188 s.underflow_occurred_status); 323 s.underflow_occurred_status);
189 DTN_INFO("\n");
190 } 324 }
191 DTN_INFO("\n"); 325 DTN_INFO("\n");
192 326
327 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
328 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
329 dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
330 dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
331 dc->current_state->bw.dcn.calc_clk.dispclk_khz,
332 dc->current_state->bw.dcn.calc_clk.dppclk_khz,
333 dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
334 dc->current_state->bw.dcn.calc_clk.fclk_khz,
335 dc->current_state->bw.dcn.calc_clk.socclk_khz);
336
193 log_mpc_crc(dc); 337 log_mpc_crc(dc);
194 338
195 DTN_INFO_END(); 339 DTN_INFO_END();
@@ -354,7 +498,7 @@ static void power_on_plane(
354 struct dce_hwseq *hws, 498 struct dce_hwseq *hws,
355 int plane_id) 499 int plane_id)
356{ 500{
357 struct dc_context *ctx = hws->ctx; 501 DC_LOGGER_INIT(hws->ctx->logger);
358 if (REG(DC_IP_REQUEST_CNTL)) { 502 if (REG(DC_IP_REQUEST_CNTL)) {
359 REG_SET(DC_IP_REQUEST_CNTL, 0, 503 REG_SET(DC_IP_REQUEST_CNTL, 0,
360 IP_REQUEST_EN, 1); 504 IP_REQUEST_EN, 1);
@@ -461,7 +605,7 @@ static void false_optc_underflow_wa(
461 tg->funcs->clear_optc_underflow(tg); 605 tg->funcs->clear_optc_underflow(tg);
462} 606}
463 607
464static enum dc_status dcn10_prog_pixclk_crtc_otg( 608static enum dc_status dcn10_enable_stream_timing(
465 struct pipe_ctx *pipe_ctx, 609 struct pipe_ctx *pipe_ctx,
466 struct dc_state *context, 610 struct dc_state *context,
467 struct dc *dc) 611 struct dc *dc)
@@ -553,7 +697,7 @@ static void reset_back_end_for_pipe(
553 struct dc_state *context) 697 struct dc_state *context)
554{ 698{
555 int i; 699 int i;
556 struct dc_context *ctx = dc->ctx; 700 DC_LOGGER_INIT(dc->ctx->logger);
557 if (pipe_ctx->stream_res.stream_enc == NULL) { 701 if (pipe_ctx->stream_res.stream_enc == NULL) {
558 pipe_ctx->stream = NULL; 702 pipe_ctx->stream = NULL;
559 return; 703 return;
@@ -649,7 +793,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
649{ 793{
650 struct dce_hwseq *hws = dc->hwseq; 794 struct dce_hwseq *hws = dc->hwseq;
651 struct dpp *dpp = pipe_ctx->plane_res.dpp; 795 struct dpp *dpp = pipe_ctx->plane_res.dpp;
652 struct dc_context *ctx = dc->ctx; 796 DC_LOGGER_INIT(dc->ctx->logger);
653 797
654 if (REG(DC_IP_REQUEST_CNTL)) { 798 if (REG(DC_IP_REQUEST_CNTL)) {
655 REG_SET(DC_IP_REQUEST_CNTL, 0, 799 REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -699,7 +843,7 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
699 843
700static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 844static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
701{ 845{
702 struct dc_context *ctx = dc->ctx; 846 DC_LOGGER_INIT(dc->ctx->logger);
703 847
704 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 848 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
705 return; 849 return;
@@ -945,9 +1089,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
945 tf = plane_state->in_transfer_func; 1089 tf = plane_state->in_transfer_func;
946 1090
947 if (plane_state->gamma_correction && 1091 if (plane_state->gamma_correction &&
948 plane_state->gamma_correction->is_identity) 1092 !plane_state->gamma_correction->is_identity
949 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); 1093 && dce_use_lut(plane_state->format))
950 else if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
951 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); 1094 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
952 1095
953 if (tf == NULL) 1096 if (tf == NULL)
@@ -1433,7 +1576,7 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
1433 } 1576 }
1434} 1577}
1435 1578
1436static void program_output_csc(struct dc *dc, 1579static void dcn10_program_output_csc(struct dc *dc,
1437 struct pipe_ctx *pipe_ctx, 1580 struct pipe_ctx *pipe_ctx,
1438 enum dc_color_space colorspace, 1581 enum dc_color_space colorspace,
1439 uint16_t *matrix, 1582 uint16_t *matrix,
@@ -1623,6 +1766,8 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1623 struct mpc *mpc = dc->res_pool->mpc; 1766 struct mpc *mpc = dc->res_pool->mpc;
1624 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 1767 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
1625 1768
1769
1770
1626 /* TODO: proper fix once fpga works */ 1771 /* TODO: proper fix once fpga works */
1627 1772
1628 if (dc->debug.surface_visual_confirm) 1773 if (dc->debug.surface_visual_confirm)
@@ -1649,6 +1794,7 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1649 pipe_ctx->stream->output_color_space) 1794 pipe_ctx->stream->output_color_space)
1650 && per_pixel_alpha; 1795 && per_pixel_alpha;
1651 1796
1797
1652 /* 1798 /*
1653 * TODO: remove hack 1799 * TODO: remove hack
1654 * Note: currently there is a bug in init_hw such that 1800 * Note: currently there is a bug in init_hw such that
@@ -1659,6 +1805,12 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
1659 */ 1805 */
1660 mpcc_id = hubp->inst; 1806 mpcc_id = hubp->inst;
1661 1807
1808 /* If there is no full update, don't need to touch MPC tree*/
1809 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
1810 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
1811 return;
1812 }
1813
1662 /* check if this MPCC is already being used */ 1814 /* check if this MPCC is already being used */
1663 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 1815 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
1664 /* remove MPCC if being used */ 1816 /* remove MPCC if being used */
@@ -1777,7 +1929,7 @@ static void update_dchubp_dpp(
1777 /*gamut remap*/ 1929 /*gamut remap*/
1778 program_gamut_remap(pipe_ctx); 1930 program_gamut_remap(pipe_ctx);
1779 1931
1780 program_output_csc(dc, 1932 dc->hwss.program_output_csc(dc,
1781 pipe_ctx, 1933 pipe_ctx,
1782 pipe_ctx->stream->output_color_space, 1934 pipe_ctx->stream->output_color_space,
1783 pipe_ctx->stream->csc_color_matrix.matrix, 1935 pipe_ctx->stream->csc_color_matrix.matrix,
@@ -1810,9 +1962,9 @@ static void update_dchubp_dpp(
1810 hubp->funcs->set_blank(hubp, false); 1962 hubp->funcs->set_blank(hubp, false);
1811} 1963}
1812 1964
1813static void dcn10_otg_blank( 1965static void dcn10_blank_pixel_data(
1814 struct dc *dc, 1966 struct dc *dc,
1815 struct stream_resource stream_res, 1967 struct stream_resource *stream_res,
1816 struct dc_stream_state *stream, 1968 struct dc_stream_state *stream,
1817 bool blank) 1969 bool blank)
1818{ 1970{
@@ -1823,21 +1975,21 @@ static void dcn10_otg_blank(
1823 color_space = stream->output_color_space; 1975 color_space = stream->output_color_space;
1824 color_space_to_black_color(dc, color_space, &black_color); 1976 color_space_to_black_color(dc, color_space, &black_color);
1825 1977
1826 if (stream_res.tg->funcs->set_blank_color) 1978 if (stream_res->tg->funcs->set_blank_color)
1827 stream_res.tg->funcs->set_blank_color( 1979 stream_res->tg->funcs->set_blank_color(
1828 stream_res.tg, 1980 stream_res->tg,
1829 &black_color); 1981 &black_color);
1830 1982
1831 if (!blank) { 1983 if (!blank) {
1832 if (stream_res.tg->funcs->set_blank) 1984 if (stream_res->tg->funcs->set_blank)
1833 stream_res.tg->funcs->set_blank(stream_res.tg, blank); 1985 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
1834 if (stream_res.abm) 1986 if (stream_res->abm)
1835 stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level); 1987 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
1836 } else if (blank) { 1988 } else if (blank) {
1837 if (stream_res.abm) 1989 if (stream_res->abm)
1838 stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm); 1990 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
1839 if (stream_res.tg->funcs->set_blank) 1991 if (stream_res->tg->funcs->set_blank)
1840 stream_res.tg->funcs->set_blank(stream_res.tg, blank); 1992 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
1841 } 1993 }
1842} 1994}
1843 1995
@@ -1876,7 +2028,7 @@ static void program_all_pipe_in_tree(
1876 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2028 pipe_ctx->stream_res.tg->funcs->program_global_sync(
1877 pipe_ctx->stream_res.tg); 2029 pipe_ctx->stream_res.tg);
1878 2030
1879 dcn10_otg_blank(dc, pipe_ctx->stream_res, 2031 dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
1880 pipe_ctx->stream, blank); 2032 pipe_ctx->stream, blank);
1881 } 2033 }
1882 2034
@@ -1983,9 +2135,9 @@ static void dcn10_apply_ctx_for_surface(
1983 bool removed_pipe[4] = { false }; 2135 bool removed_pipe[4] = { false };
1984 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000; 2136 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
1985 bool program_water_mark = false; 2137 bool program_water_mark = false;
1986 struct dc_context *ctx = dc->ctx;
1987 struct pipe_ctx *top_pipe_to_program = 2138 struct pipe_ctx *top_pipe_to_program =
1988 find_top_pipe_for_stream(dc, context, stream); 2139 find_top_pipe_for_stream(dc, context, stream);
2140 DC_LOGGER_INIT(dc->ctx->logger);
1989 2141
1990 if (!top_pipe_to_program) 2142 if (!top_pipe_to_program)
1991 return; 2143 return;
@@ -1996,7 +2148,7 @@ static void dcn10_apply_ctx_for_surface(
1996 2148
1997 if (num_planes == 0) { 2149 if (num_planes == 0) {
1998 /* OTG blank before remove all front end */ 2150 /* OTG blank before remove all front end */
1999 dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true); 2151 dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
2000 } 2152 }
2001 2153
2002 /* Disconnect unused mpcc */ 2154 /* Disconnect unused mpcc */
@@ -2527,6 +2679,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2527 .update_pending_status = dcn10_update_pending_status, 2679 .update_pending_status = dcn10_update_pending_status,
2528 .set_input_transfer_func = dcn10_set_input_transfer_func, 2680 .set_input_transfer_func = dcn10_set_input_transfer_func,
2529 .set_output_transfer_func = dcn10_set_output_transfer_func, 2681 .set_output_transfer_func = dcn10_set_output_transfer_func,
2682 .program_output_csc = dcn10_program_output_csc,
2530 .power_down = dce110_power_down, 2683 .power_down = dce110_power_down,
2531 .enable_accelerated_mode = dce110_enable_accelerated_mode, 2684 .enable_accelerated_mode = dce110_enable_accelerated_mode,
2532 .enable_timing_synchronization = dcn10_enable_timing_synchronization, 2685 .enable_timing_synchronization = dcn10_enable_timing_synchronization,
@@ -2538,10 +2691,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2538 .blank_stream = dce110_blank_stream, 2691 .blank_stream = dce110_blank_stream,
2539 .enable_display_power_gating = dcn10_dummy_display_power_gating, 2692 .enable_display_power_gating = dcn10_dummy_display_power_gating,
2540 .disable_plane = dcn10_disable_plane, 2693 .disable_plane = dcn10_disable_plane,
2694 .blank_pixel_data = dcn10_blank_pixel_data,
2541 .pipe_control_lock = dcn10_pipe_control_lock, 2695 .pipe_control_lock = dcn10_pipe_control_lock,
2542 .set_bandwidth = dcn10_set_bandwidth, 2696 .set_bandwidth = dcn10_set_bandwidth,
2543 .reset_hw_ctx_wrap = reset_hw_ctx_wrap, 2697 .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
2544 .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg, 2698 .enable_stream_timing = dcn10_enable_stream_timing,
2545 .set_drr = set_drr, 2699 .set_drr = set_drr,
2546 .get_position = get_position, 2700 .get_position = get_position,
2547 .set_static_screen_control = set_static_screen_control, 2701 .set_static_screen_control = set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 179890b1a8c4..9ca51ae46de7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -65,6 +65,7 @@ static void mpc1_update_blending(
65 int mpcc_id) 65 int mpcc_id)
66{ 66{
67 struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); 67 struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
68 struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
68 69
69 REG_UPDATE_5(MPCC_CONTROL[mpcc_id], 70 REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
70 MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, 71 MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
@@ -74,6 +75,7 @@ static void mpc1_update_blending(
74 MPCC_GLOBAL_GAIN, blnd_cfg->global_gain); 75 MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
75 76
76 mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id); 77 mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
78 mpcc->blnd_cfg = *blnd_cfg;
77} 79}
78 80
79void mpc1_update_stereo_mix( 81void mpc1_update_stereo_mix(
@@ -235,8 +237,7 @@ struct mpcc *mpc1_insert_plane(
235 } 237 }
236 238
237 /* update the blending configuration */ 239 /* update the blending configuration */
238 new_mpcc->blnd_cfg = *blnd_cfg; 240 mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
239 mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
240 241
241 /* update the stereo mix settings, if provided */ 242 /* update the stereo mix settings, if provided */
242 if (sm_cfg != NULL) { 243 if (sm_cfg != NULL) {
@@ -409,7 +410,26 @@ void mpc1_init_mpcc_list_from_hw(
409 } 410 }
410} 411}
411 412
413void mpc1_read_mpcc_state(
414 struct mpc *mpc,
415 int mpcc_inst,
416 struct mpcc_state *s)
417{
418 struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
419
420 REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
421 REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
422 REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
423 REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
424 MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
425 MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
426 MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
427 REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
428 MPCC_BUSY, &s->busy);
429}
430
412const struct mpc_funcs dcn10_mpc_funcs = { 431const struct mpc_funcs dcn10_mpc_funcs = {
432 .read_mpcc_state = mpc1_read_mpcc_state,
413 .insert_plane = mpc1_insert_plane, 433 .insert_plane = mpc1_insert_plane,
414 .remove_mpcc = mpc1_remove_mpcc, 434 .remove_mpcc = mpc1_remove_mpcc,
415 .mpc_init = mpc1_mpc_init, 435 .mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 267a2995ef6e..d3d16c4cbea3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -183,4 +183,9 @@ struct mpcc *mpc1_get_mpcc_for_dpp(
183 struct mpc_tree *tree, 183 struct mpc_tree *tree,
184 int dpp_id); 184 int dpp_id);
185 185
186void mpc1_read_mpcc_state(
187 struct mpc *mpc,
188 int mpcc_inst,
189 struct mpcc_state *s);
190
186#endif 191#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 4bf64d1b2c60..c734b7fa5835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -93,6 +93,81 @@ static void optc1_disable_stereo(struct timing_generator *optc)
93 OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); 93 OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
94} 94}
95 95
96static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
97{
98 struct dc_crtc_timing patched_crtc_timing;
99 int vesa_sync_start;
100 int asic_blank_end;
101 int interlace_factor;
102 int vertical_line_start;
103
104 patched_crtc_timing = *dc_crtc_timing;
105 optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
106
107 vesa_sync_start = patched_crtc_timing.h_addressable +
108 patched_crtc_timing.h_border_right +
109 patched_crtc_timing.h_front_porch;
110
111 asic_blank_end = patched_crtc_timing.h_total -
112 vesa_sync_start -
113 patched_crtc_timing.h_border_left;
114
115 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
116
117 vesa_sync_start = patched_crtc_timing.v_addressable +
118 patched_crtc_timing.v_border_bottom +
119 patched_crtc_timing.v_front_porch;
120
121 asic_blank_end = (patched_crtc_timing.v_total -
122 vesa_sync_start -
123 patched_crtc_timing.v_border_top)
124 * interlace_factor;
125
126 vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
127 if (vertical_line_start < 0) {
128 ASSERT(0);
129 vertical_line_start = 0;
130 }
131
132 return vertical_line_start;
133}
134
135void optc1_program_vline_interrupt(
136 struct timing_generator *optc,
137 const struct dc_crtc_timing *dc_crtc_timing,
138 unsigned long long vsync_delta)
139{
140
141 struct optc *optc1 = DCN10TG_FROM_TG(optc);
142
143 unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
144 unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100);
145 uint32_t req_delta_lines = (uint32_t) div64_u64(
146 (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
147 dc_crtc_timing->h_total);
148
149 uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
150 uint32_t start_line = 0;
151 uint32_t endLine = 0;
152
153 if (req_delta_lines != 0)
154 req_delta_lines--;
155
156 if (req_delta_lines > vsync_line)
157 start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
158 else
159 start_line = vsync_line - req_delta_lines;
160
161 endLine = start_line + 2;
162
163 if (endLine >= dc_crtc_timing->v_total)
164 endLine = 2;
165
166 REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
167 OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
168 OTG_VERTICAL_INTERRUPT0_LINE_END, endLine);
169}
170
96/** 171/**
97 * program_timing_generator used by mode timing set 172 * program_timing_generator used by mode timing set
98 * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition. 173 * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
@@ -780,17 +855,17 @@ void optc1_set_drr(
780 OTG_SET_V_TOTAL_MIN_MASK_EN, 0, 855 OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
781 OTG_SET_V_TOTAL_MIN_MASK, 0); 856 OTG_SET_V_TOTAL_MIN_MASK, 0);
782 } else { 857 } else {
783 REG_SET(OTG_V_TOTAL_MIN, 0,
784 OTG_V_TOTAL_MIN, 0);
785
786 REG_SET(OTG_V_TOTAL_MAX, 0,
787 OTG_V_TOTAL_MAX, 0);
788
789 REG_UPDATE_4(OTG_V_TOTAL_CONTROL, 858 REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
790 OTG_SET_V_TOTAL_MIN_MASK, 0, 859 OTG_SET_V_TOTAL_MIN_MASK, 0,
791 OTG_V_TOTAL_MIN_SEL, 0, 860 OTG_V_TOTAL_MIN_SEL, 0,
792 OTG_V_TOTAL_MAX_SEL, 0, 861 OTG_V_TOTAL_MAX_SEL, 0,
793 OTG_FORCE_LOCK_ON_EVENT, 0); 862 OTG_FORCE_LOCK_ON_EVENT, 0);
863
864 REG_SET(OTG_V_TOTAL_MIN, 0,
865 OTG_V_TOTAL_MIN, 0);
866
867 REG_SET(OTG_V_TOTAL_MAX, 0,
868 OTG_V_TOTAL_MAX, 0);
794 } 869 }
795} 870}
796 871
@@ -1154,6 +1229,12 @@ void optc1_read_otg_state(struct optc *optc1,
1154 REG_GET(OTG_V_TOTAL_MIN, 1229 REG_GET(OTG_V_TOTAL_MIN,
1155 OTG_V_TOTAL_MIN, &s->v_total_min); 1230 OTG_V_TOTAL_MIN, &s->v_total_min);
1156 1231
1232 REG_GET(OTG_V_TOTAL_CONTROL,
1233 OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
1234
1235 REG_GET(OTG_V_TOTAL_CONTROL,
1236 OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
1237
1157 REG_GET_2(OTG_V_SYNC_A, 1238 REG_GET_2(OTG_V_SYNC_A,
1158 OTG_V_SYNC_A_START, &s->v_sync_a_start, 1239 OTG_V_SYNC_A_START, &s->v_sync_a_start,
1159 OTG_V_SYNC_A_END, &s->v_sync_a_end); 1240 OTG_V_SYNC_A_END, &s->v_sync_a_end);
@@ -1215,6 +1296,7 @@ static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
1215static const struct timing_generator_funcs dcn10_tg_funcs = { 1296static const struct timing_generator_funcs dcn10_tg_funcs = {
1216 .validate_timing = optc1_validate_timing, 1297 .validate_timing = optc1_validate_timing,
1217 .program_timing = optc1_program_timing, 1298 .program_timing = optc1_program_timing,
1299 .program_vline_interrupt = optc1_program_vline_interrupt,
1218 .program_global_sync = optc1_program_global_sync, 1300 .program_global_sync = optc1_program_global_sync,
1219 .enable_crtc = optc1_enable_crtc, 1301 .enable_crtc = optc1_enable_crtc,
1220 .disable_crtc = optc1_disable_crtc, 1302 .disable_crtc = optc1_disable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index d25e7bf0d0d7..89e09e5327a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -65,6 +65,8 @@
65 SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ 65 SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
66 SRI(OTG_BLACK_COLOR, OTG, inst),\ 66 SRI(OTG_BLACK_COLOR, OTG, inst),\
67 SRI(OTG_CLOCK_CONTROL, OTG, inst),\ 67 SRI(OTG_CLOCK_CONTROL, OTG, inst),\
68 SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
69 SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
68 SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ 70 SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
69 SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ 71 SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
70 SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ 72 SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
@@ -124,6 +126,8 @@ struct dcn_optc_registers {
124 uint32_t OTG_TEST_PATTERN_CONTROL; 126 uint32_t OTG_TEST_PATTERN_CONTROL;
125 uint32_t OTG_TEST_PATTERN_COLOR; 127 uint32_t OTG_TEST_PATTERN_COLOR;
126 uint32_t OTG_CLOCK_CONTROL; 128 uint32_t OTG_CLOCK_CONTROL;
129 uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
130 uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
127 uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; 131 uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
128 uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; 132 uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
129 uint32_t OPTC_INPUT_CLOCK_CONTROL; 133 uint32_t OPTC_INPUT_CLOCK_CONTROL;
@@ -206,6 +210,9 @@ struct dcn_optc_registers {
206 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ 210 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
207 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ 211 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
208 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ 212 SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
213 SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
214 SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
215 SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
209 SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ 216 SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
210 SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ 217 SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
211 SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ 218 SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
@@ -323,6 +330,9 @@ struct dcn_optc_registers {
323 type OTG_CLOCK_EN;\ 330 type OTG_CLOCK_EN;\
324 type OTG_CLOCK_ON;\ 331 type OTG_CLOCK_ON;\
325 type OTG_CLOCK_GATE_DIS;\ 332 type OTG_CLOCK_GATE_DIS;\
333 type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\
334 type OTG_VERTICAL_INTERRUPT0_LINE_START;\
335 type OTG_VERTICAL_INTERRUPT0_LINE_END;\
326 type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ 336 type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
327 type OTG_VERTICAL_INTERRUPT2_LINE_START;\ 337 type OTG_VERTICAL_INTERRUPT2_LINE_START;\
328 type OPTC_INPUT_CLK_EN;\ 338 type OPTC_INPUT_CLK_EN;\
@@ -396,6 +406,8 @@ struct dcn_otg_state {
396 uint32_t v_total; 406 uint32_t v_total;
397 uint32_t v_total_max; 407 uint32_t v_total_max;
398 uint32_t v_total_min; 408 uint32_t v_total_min;
409 uint32_t v_total_min_sel;
410 uint32_t v_total_max_sel;
399 uint32_t v_sync_a_start; 411 uint32_t v_sync_a_start;
400 uint32_t v_sync_a_end; 412 uint32_t v_sync_a_end;
401 uint32_t h_blank_start; 413 uint32_t h_blank_start;
@@ -420,6 +432,10 @@ void optc1_program_timing(
420 const struct dc_crtc_timing *dc_crtc_timing, 432 const struct dc_crtc_timing *dc_crtc_timing,
421 bool use_vbios); 433 bool use_vbios);
422 434
435void optc1_program_vline_interrupt(struct timing_generator *optc,
436 const struct dc_crtc_timing *dc_crtc_timing,
437 unsigned long long vsync_delta);
438
423void optc1_program_global_sync( 439void optc1_program_global_sync(
424 struct timing_generator *optc); 440 struct timing_generator *optc);
425 441
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 02bd664aed3e..2c0a3150bf2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -39,7 +39,7 @@
39#include "dce110/dce110_hw_sequencer.h" 39#include "dce110/dce110_hw_sequencer.h"
40#include "dcn10/dcn10_opp.h" 40#include "dcn10/dcn10_opp.h"
41#include "dce/dce_link_encoder.h" 41#include "dce/dce_link_encoder.h"
42#include "dce/dce_stream_encoder.h" 42#include "dcn10/dcn10_stream_encoder.h"
43#include "dce/dce_clocks.h" 43#include "dce/dce_clocks.h"
44#include "dce/dce_clock_source.h" 44#include "dce/dce_clock_source.h"
45#include "dce/dce_audio.h" 45#include "dce/dce_audio.h"
@@ -166,36 +166,22 @@ static const struct dce_abm_mask abm_mask = {
166 166
167#define stream_enc_regs(id)\ 167#define stream_enc_regs(id)\
168[id] = {\ 168[id] = {\
169 SE_DCN_REG_LIST(id),\ 169 SE_DCN_REG_LIST(id)\
170 .TMDS_CNTL = 0,\
171 .AFMT_AVI_INFO0 = 0,\
172 .AFMT_AVI_INFO1 = 0,\
173 .AFMT_AVI_INFO2 = 0,\
174 .AFMT_AVI_INFO3 = 0,\
175} 170}
176 171
177static const struct dce110_stream_enc_registers stream_enc_regs[] = { 172static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
178 stream_enc_regs(0), 173 stream_enc_regs(0),
179 stream_enc_regs(1), 174 stream_enc_regs(1),
180 stream_enc_regs(2), 175 stream_enc_regs(2),
181 stream_enc_regs(3), 176 stream_enc_regs(3),
182}; 177};
183 178
184static const struct dce_stream_encoder_shift se_shift = { 179static const struct dcn10_stream_encoder_shift se_shift = {
185 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) 180 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
186}; 181};
187 182
188static const struct dce_stream_encoder_mask se_mask = { 183static const struct dcn10_stream_encoder_mask se_mask = {
189 SE_COMMON_MASK_SH_LIST_DCN10(_MASK), 184 SE_COMMON_MASK_SH_LIST_DCN10(_MASK)
190 .AFMT_GENERIC0_UPDATE = 0,
191 .AFMT_GENERIC2_UPDATE = 0,
192 .DP_DYN_RANGE = 0,
193 .DP_YCBCR_RANGE = 0,
194 .HDMI_AVI_INFO_SEND = 0,
195 .HDMI_AVI_INFO_CONT = 0,
196 .HDMI_AVI_INFO_LINE = 0,
197 .DP_SEC_AVI_ENABLE = 0,
198 .AFMT_AVI_INFO_VERSION = 0
199}; 185};
200 186
201#define audio_regs(id)\ 187#define audio_regs(id)\
@@ -320,11 +306,14 @@ static const struct dcn_dpp_registers tf_regs[] = {
320}; 306};
321 307
322static const struct dcn_dpp_shift tf_shift = { 308static const struct dcn_dpp_shift tf_shift = {
323 TF_REG_LIST_SH_MASK_DCN10(__SHIFT) 309 TF_REG_LIST_SH_MASK_DCN10(__SHIFT),
310 TF_DEBUG_REG_LIST_SH_DCN10
311
324}; 312};
325 313
326static const struct dcn_dpp_mask tf_mask = { 314static const struct dcn_dpp_mask tf_mask = {
327 TF_REG_LIST_SH_MASK_DCN10(_MASK), 315 TF_REG_LIST_SH_MASK_DCN10(_MASK),
316 TF_DEBUG_REG_LIST_MASK_DCN10
328}; 317};
329 318
330static const struct dcn_mpc_registers mpc_regs = { 319static const struct dcn_mpc_registers mpc_regs = {
@@ -650,16 +639,16 @@ static struct stream_encoder *dcn10_stream_encoder_create(
650 enum engine_id eng_id, 639 enum engine_id eng_id,
651 struct dc_context *ctx) 640 struct dc_context *ctx)
652{ 641{
653 struct dce110_stream_encoder *enc110 = 642 struct dcn10_stream_encoder *enc1 =
654 kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); 643 kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
655 644
656 if (!enc110) 645 if (!enc1)
657 return NULL; 646 return NULL;
658 647
659 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 648 dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
660 &stream_enc_regs[eng_id], 649 &stream_enc_regs[eng_id],
661 &se_shift, &se_mask); 650 &se_shift, &se_mask);
662 return &enc110->base; 651 return &enc1->base;
663} 652}
664 653
665static const struct dce_hwseq_registers hwseq_reg = { 654static const struct dce_hwseq_registers hwseq_reg = {
@@ -918,36 +907,6 @@ enum dc_status dcn10_add_stream_to_ctx(
918 return result; 907 return result;
919} 908}
920 909
921enum dc_status dcn10_validate_guaranteed(
922 struct dc *dc,
923 struct dc_stream_state *dc_stream,
924 struct dc_state *context)
925{
926 enum dc_status result = DC_ERROR_UNEXPECTED;
927
928 context->streams[0] = dc_stream;
929 dc_stream_retain(context->streams[0]);
930 context->stream_count++;
931
932 result = resource_map_pool_resources(dc, context, dc_stream);
933
934 if (result == DC_OK)
935 result = resource_map_phy_clock_resources(dc, context, dc_stream);
936
937 if (result == DC_OK)
938 result = build_mapped_resource(dc, context, dc_stream);
939
940 if (result == DC_OK) {
941 validate_guaranteed_copy_streams(
942 context, dc->caps.max_streams);
943 result = resource_build_scaling_params_for_context(dc, context);
944 }
945 if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
946 return DC_FAIL_BANDWIDTH_VALIDATE;
947
948 return result;
949}
950
951static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( 910static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
952 struct dc_state *context, 911 struct dc_state *context,
953 const struct resource_pool *pool, 912 const struct resource_pool *pool,
@@ -978,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
978 return idle_pipe; 937 return idle_pipe;
979} 938}
980 939
981enum dcc_control { 940static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
982 dcc_control__256_256_xxx,
983 dcc_control__128_128_xxx,
984 dcc_control__256_64_64,
985};
986
987enum segment_order {
988 segment_order__na,
989 segment_order__contiguous,
990 segment_order__non_contiguous,
991};
992
993static bool dcc_support_pixel_format(
994 enum surface_pixel_format format,
995 unsigned int *bytes_per_element)
996{
997 /* DML: get_bytes_per_element */
998 switch (format) {
999 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1000 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1001 *bytes_per_element = 2;
1002 return true;
1003 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1004 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1005 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1006 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1007 *bytes_per_element = 4;
1008 return true;
1009 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1010 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1011 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1012 *bytes_per_element = 8;
1013 return true;
1014 default:
1015 return false;
1016 }
1017}
1018
1019static bool dcc_support_swizzle(
1020 enum swizzle_mode_values swizzle,
1021 unsigned int bytes_per_element,
1022 enum segment_order *segment_order_horz,
1023 enum segment_order *segment_order_vert)
1024{
1025 bool standard_swizzle = false;
1026 bool display_swizzle = false;
1027
1028 switch (swizzle) {
1029 case DC_SW_4KB_S:
1030 case DC_SW_64KB_S:
1031 case DC_SW_VAR_S:
1032 case DC_SW_4KB_S_X:
1033 case DC_SW_64KB_S_X:
1034 case DC_SW_VAR_S_X:
1035 standard_swizzle = true;
1036 break;
1037 case DC_SW_4KB_D:
1038 case DC_SW_64KB_D:
1039 case DC_SW_VAR_D:
1040 case DC_SW_4KB_D_X:
1041 case DC_SW_64KB_D_X:
1042 case DC_SW_VAR_D_X:
1043 display_swizzle = true;
1044 break;
1045 default:
1046 break;
1047 }
1048
1049 if (bytes_per_element == 1 && standard_swizzle) {
1050 *segment_order_horz = segment_order__contiguous;
1051 *segment_order_vert = segment_order__na;
1052 return true;
1053 }
1054 if (bytes_per_element == 2 && standard_swizzle) {
1055 *segment_order_horz = segment_order__non_contiguous;
1056 *segment_order_vert = segment_order__contiguous;
1057 return true;
1058 }
1059 if (bytes_per_element == 4 && standard_swizzle) {
1060 *segment_order_horz = segment_order__non_contiguous;
1061 *segment_order_vert = segment_order__contiguous;
1062 return true;
1063 }
1064 if (bytes_per_element == 8 && standard_swizzle) {
1065 *segment_order_horz = segment_order__na;
1066 *segment_order_vert = segment_order__contiguous;
1067 return true;
1068 }
1069 if (bytes_per_element == 8 && display_swizzle) {
1070 *segment_order_horz = segment_order__contiguous;
1071 *segment_order_vert = segment_order__non_contiguous;
1072 return true;
1073 }
1074
1075 return false;
1076}
1077
1078static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1079 unsigned int bytes_per_element)
1080{
1081 /* copied from DML. might want to refactor DML to leverage from DML */
1082 /* DML : get_blk256_size */
1083 if (bytes_per_element == 1) {
1084 *blk256_width = 16;
1085 *blk256_height = 16;
1086 } else if (bytes_per_element == 2) {
1087 *blk256_width = 16;
1088 *blk256_height = 8;
1089 } else if (bytes_per_element == 4) {
1090 *blk256_width = 8;
1091 *blk256_height = 8;
1092 } else if (bytes_per_element == 8) {
1093 *blk256_width = 8;
1094 *blk256_height = 4;
1095 }
1096}
1097
1098static void det_request_size(
1099 unsigned int height,
1100 unsigned int width,
1101 unsigned int bpe,
1102 bool *req128_horz_wc,
1103 bool *req128_vert_wc)
1104{
1105 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
1106
1107 unsigned int blk256_height = 0;
1108 unsigned int blk256_width = 0;
1109 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1110
1111 get_blk256_size(&blk256_width, &blk256_height, bpe);
1112
1113 swath_bytes_horz_wc = height * blk256_height * bpe;
1114 swath_bytes_vert_wc = width * blk256_width * bpe;
1115
1116 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1117 false : /* full 256B request */
1118 true; /* half 128b request */
1119
1120 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1121 false : /* full 256B request */
1122 true; /* half 128b request */
1123}
1124
1125static bool get_dcc_compression_cap(const struct dc *dc,
1126 const struct dc_dcc_surface_param *input, 941 const struct dc_dcc_surface_param *input,
1127 struct dc_surface_dcc_cap *output) 942 struct dc_surface_dcc_cap *output)
1128{ 943{
1129 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ 944 return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
1130 enum dcc_control dcc_control; 945 dc->res_pool->hubbub,
1131 unsigned int bpe; 946 input,
1132 enum segment_order segment_order_horz, segment_order_vert; 947 output);
1133 bool req128_horz_wc, req128_vert_wc;
1134
1135 memset(output, 0, sizeof(*output));
1136
1137 if (dc->debug.disable_dcc == DCC_DISABLE)
1138 return false;
1139
1140 if (!dcc_support_pixel_format(input->format,
1141 &bpe))
1142 return false;
1143
1144 if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1145 &segment_order_horz, &segment_order_vert))
1146 return false;
1147
1148 det_request_size(input->surface_size.height, input->surface_size.width,
1149 bpe, &req128_horz_wc, &req128_vert_wc);
1150
1151 if (!req128_horz_wc && !req128_vert_wc) {
1152 dcc_control = dcc_control__256_256_xxx;
1153 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1154 if (!req128_horz_wc)
1155 dcc_control = dcc_control__256_256_xxx;
1156 else if (segment_order_horz == segment_order__contiguous)
1157 dcc_control = dcc_control__128_128_xxx;
1158 else
1159 dcc_control = dcc_control__256_64_64;
1160 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1161 if (!req128_vert_wc)
1162 dcc_control = dcc_control__256_256_xxx;
1163 else if (segment_order_vert == segment_order__contiguous)
1164 dcc_control = dcc_control__128_128_xxx;
1165 else
1166 dcc_control = dcc_control__256_64_64;
1167 } else {
1168 if ((req128_horz_wc &&
1169 segment_order_horz == segment_order__non_contiguous) ||
1170 (req128_vert_wc &&
1171 segment_order_vert == segment_order__non_contiguous))
1172 /* access_dir not known, must use most constraining */
1173 dcc_control = dcc_control__256_64_64;
1174 else
1175 /* reg128 is true for either horz and vert
1176 * but segment_order is contiguous
1177 */
1178 dcc_control = dcc_control__128_128_xxx;
1179 }
1180
1181 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
1182 dcc_control != dcc_control__256_256_xxx)
1183 return false;
1184
1185 switch (dcc_control) {
1186 case dcc_control__256_256_xxx:
1187 output->grph.rgb.max_uncompressed_blk_size = 256;
1188 output->grph.rgb.max_compressed_blk_size = 256;
1189 output->grph.rgb.independent_64b_blks = false;
1190 break;
1191 case dcc_control__128_128_xxx:
1192 output->grph.rgb.max_uncompressed_blk_size = 128;
1193 output->grph.rgb.max_compressed_blk_size = 128;
1194 output->grph.rgb.independent_64b_blks = false;
1195 break;
1196 case dcc_control__256_64_64:
1197 output->grph.rgb.max_uncompressed_blk_size = 256;
1198 output->grph.rgb.max_compressed_blk_size = 64;
1199 output->grph.rgb.independent_64b_blks = true;
1200 break;
1201 }
1202
1203 output->capable = true;
1204 output->const_color_support = false;
1205
1206 return true;
1207} 948}
1208 949
1209
1210static void dcn10_destroy_resource_pool(struct resource_pool **pool) 950static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1211{ 951{
1212 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); 952 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
@@ -1227,13 +967,12 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
1227} 967}
1228 968
1229static struct dc_cap_funcs cap_funcs = { 969static struct dc_cap_funcs cap_funcs = {
1230 .get_dcc_compression_cap = get_dcc_compression_cap 970 .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
1231}; 971};
1232 972
1233static struct resource_funcs dcn10_res_pool_funcs = { 973static struct resource_funcs dcn10_res_pool_funcs = {
1234 .destroy = dcn10_destroy_resource_pool, 974 .destroy = dcn10_destroy_resource_pool,
1235 .link_enc_create = dcn10_link_encoder_create, 975 .link_enc_create = dcn10_link_encoder_create,
1236 .validate_guaranteed = dcn10_validate_guaranteed,
1237 .validate_bandwidth = dcn_validate_bandwidth, 976 .validate_bandwidth = dcn_validate_bandwidth,
1238 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, 977 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1239 .validate_plane = dcn10_validate_plane, 978 .validate_plane = dcn10_validate_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
new file mode 100644
index 000000000000..befd8639ad55
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -0,0 +1,1490 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27#include "dc_bios_types.h"
28#include "dcn10_stream_encoder.h"
29#include "reg_helper.h"
30#include "hw_shared.h"
31
32#define DC_LOGGER \
33 enc1->base.ctx->logger
34
35
36#define REG(reg)\
37 (enc1->regs->reg)
38
39#undef FN
40#define FN(reg_name, field_name) \
41 enc1->se_shift->field_name, enc1->se_mask->field_name
42
43#define VBI_LINE_0 0
44#define DP_BLANK_MAX_RETRY 20
45#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
46
47
48enum {
49 DP_MST_UPDATE_MAX_RETRY = 50
50};
51
52#define CTX \
53 enc1->base.ctx
54
55void enc1_update_generic_info_packet(
56 struct dcn10_stream_encoder *enc1,
57 uint32_t packet_index,
58 const struct dc_info_packet *info_packet)
59{
60 uint32_t regval;
61 /* TODOFPGA Figure out a proper number for max_retries polling for lock
62 * use 50 for now.
63 */
64 uint32_t max_retries = 50;
65
66 /*we need turn on clock before programming AFMT block*/
67 REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
68
69 if (packet_index >= 8)
70 ASSERT(0);
71
72 /* poll dig_update_lock is not locked -> asic internal signal
73 * assume otg master lock will unlock it
74 */
75/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
76 0, 10, max_retries);*/
77
78 /* check if HW reading GSP memory */
79 REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
80 0, 10, max_retries);
81
82 /* HW does is not reading GSP memory not reading too long ->
83 * something wrong. clear GPS memory access and notify?
84 * hw SW is writing to GSP memory
85 */
86 REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
87
88 /* choose which generic packet to use */
89 regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
90 REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
91 AFMT_GENERIC_INDEX, packet_index);
92
93 /* write generic packet header
94 * (4th byte is for GENERIC0 only)
95 */
96 REG_SET_4(AFMT_GENERIC_HDR, 0,
97 AFMT_GENERIC_HB0, info_packet->hb0,
98 AFMT_GENERIC_HB1, info_packet->hb1,
99 AFMT_GENERIC_HB2, info_packet->hb2,
100 AFMT_GENERIC_HB3, info_packet->hb3);
101
102 /* write generic packet contents
103 * (we never use last 4 bytes)
104 * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
105 */
106 {
107 const uint32_t *content =
108 (const uint32_t *) &info_packet->sb[0];
109
110 REG_WRITE(AFMT_GENERIC_0, *content++);
111 REG_WRITE(AFMT_GENERIC_1, *content++);
112 REG_WRITE(AFMT_GENERIC_2, *content++);
113 REG_WRITE(AFMT_GENERIC_3, *content++);
114 REG_WRITE(AFMT_GENERIC_4, *content++);
115 REG_WRITE(AFMT_GENERIC_5, *content++);
116 REG_WRITE(AFMT_GENERIC_6, *content++);
117 REG_WRITE(AFMT_GENERIC_7, *content);
118 }
119
120 switch (packet_index) {
121 case 0:
122 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
123 AFMT_GENERIC0_FRAME_UPDATE, 1);
124 break;
125 case 1:
126 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
127 AFMT_GENERIC1_FRAME_UPDATE, 1);
128 break;
129 case 2:
130 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
131 AFMT_GENERIC2_FRAME_UPDATE, 1);
132 break;
133 case 3:
134 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
135 AFMT_GENERIC3_FRAME_UPDATE, 1);
136 break;
137 case 4:
138 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
139 AFMT_GENERIC4_FRAME_UPDATE, 1);
140 break;
141 case 5:
142 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
143 AFMT_GENERIC5_FRAME_UPDATE, 1);
144 break;
145 case 6:
146 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
147 AFMT_GENERIC6_FRAME_UPDATE, 1);
148 break;
149 case 7:
150 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
151 AFMT_GENERIC7_FRAME_UPDATE, 1);
152 break;
153 default:
154 break;
155 }
156}
157
158static void enc1_update_hdmi_info_packet(
159 struct dcn10_stream_encoder *enc1,
160 uint32_t packet_index,
161 const struct dc_info_packet *info_packet)
162{
163 uint32_t cont, send, line;
164
165 if (info_packet->valid) {
166 enc1_update_generic_info_packet(
167 enc1,
168 packet_index,
169 info_packet);
170
171 /* enable transmission of packet(s) -
172 * packet transmission begins on the next frame
173 */
174 cont = 1;
175 /* send packet(s) every frame */
176 send = 1;
177 /* select line number to send packets on */
178 line = 2;
179 } else {
180 cont = 0;
181 send = 0;
182 line = 0;
183 }
184
185 /* choose which generic packet control to use */
186 switch (packet_index) {
187 case 0:
188 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
189 HDMI_GENERIC0_CONT, cont,
190 HDMI_GENERIC0_SEND, send,
191 HDMI_GENERIC0_LINE, line);
192 break;
193 case 1:
194 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
195 HDMI_GENERIC1_CONT, cont,
196 HDMI_GENERIC1_SEND, send,
197 HDMI_GENERIC1_LINE, line);
198 break;
199 case 2:
200 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
201 HDMI_GENERIC0_CONT, cont,
202 HDMI_GENERIC0_SEND, send,
203 HDMI_GENERIC0_LINE, line);
204 break;
205 case 3:
206 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
207 HDMI_GENERIC1_CONT, cont,
208 HDMI_GENERIC1_SEND, send,
209 HDMI_GENERIC1_LINE, line);
210 break;
211 case 4:
212 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
213 HDMI_GENERIC0_CONT, cont,
214 HDMI_GENERIC0_SEND, send,
215 HDMI_GENERIC0_LINE, line);
216 break;
217 case 5:
218 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
219 HDMI_GENERIC1_CONT, cont,
220 HDMI_GENERIC1_SEND, send,
221 HDMI_GENERIC1_LINE, line);
222 break;
223 case 6:
224 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
225 HDMI_GENERIC0_CONT, cont,
226 HDMI_GENERIC0_SEND, send,
227 HDMI_GENERIC0_LINE, line);
228 break;
229 case 7:
230 REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
231 HDMI_GENERIC1_CONT, cont,
232 HDMI_GENERIC1_SEND, send,
233 HDMI_GENERIC1_LINE, line);
234 break;
235 default:
236 /* invalid HW packet index */
237 DC_LOG_WARNING(
238 "Invalid HW packet index: %s()\n",
239 __func__);
240 return;
241 }
242}
243
244/* setup stream encoder in dp mode */
245void enc1_stream_encoder_dp_set_stream_attribute(
246 struct stream_encoder *enc,
247 struct dc_crtc_timing *crtc_timing,
248 enum dc_color_space output_color_space)
249{
250 uint32_t h_active_start;
251 uint32_t v_active_start;
252 uint32_t misc0 = 0;
253 uint32_t misc1 = 0;
254 uint32_t h_blank;
255 uint32_t h_back_porch;
256 uint8_t synchronous_clock = 0; /* asynchronous mode */
257 uint8_t colorimetry_bpc;
258 uint8_t dynamic_range_rgb = 0; /*full range*/
259 uint8_t dynamic_range_ycbcr = 1; /*bt709*/
260
261 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
262
263 REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
264
265 /* set pixel encoding */
266 switch (crtc_timing->pixel_encoding) {
267 case PIXEL_ENCODING_YCBCR422:
268 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
269 DP_PIXEL_ENCODING_TYPE_YCBCR422);
270 break;
271 case PIXEL_ENCODING_YCBCR444:
272 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
273 DP_PIXEL_ENCODING_TYPE_YCBCR444);
274
275 if (crtc_timing->flags.Y_ONLY)
276 if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
277 /* HW testing only, no use case yet.
278 * Color depth of Y-only could be
279 * 8, 10, 12, 16 bits
280 */
281 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
282 DP_PIXEL_ENCODING_TYPE_Y_ONLY);
283 /* Note: DP_MSA_MISC1 bit 7 is the indicator
284 * of Y-only mode.
285 * This bit is set in HW if register
286 * DP_PIXEL_ENCODING is programmed to 0x4
287 */
288 break;
289 case PIXEL_ENCODING_YCBCR420:
290 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
291 DP_PIXEL_ENCODING_TYPE_YCBCR420);
292 REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
293 break;
294 default:
295 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
296 DP_PIXEL_ENCODING_TYPE_RGB444);
297 break;
298 }
299
300 misc1 = REG_READ(DP_MSA_MISC);
301
302 /* set color depth */
303
304 switch (crtc_timing->display_color_depth) {
305 case COLOR_DEPTH_666:
306 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
307 0);
308 break;
309 case COLOR_DEPTH_888:
310 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
311 DP_COMPONENT_PIXEL_DEPTH_8BPC);
312 break;
313 case COLOR_DEPTH_101010:
314 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
315 DP_COMPONENT_PIXEL_DEPTH_10BPC);
316
317 break;
318 case COLOR_DEPTH_121212:
319 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
320 DP_COMPONENT_PIXEL_DEPTH_12BPC);
321 break;
322 default:
323 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
324 DP_COMPONENT_PIXEL_DEPTH_6BPC);
325 break;
326 }
327
328 /* set dynamic range and YCbCr range */
329
330 switch (crtc_timing->display_color_depth) {
331 case COLOR_DEPTH_666:
332 colorimetry_bpc = 0;
333 break;
334 case COLOR_DEPTH_888:
335 colorimetry_bpc = 1;
336 break;
337 case COLOR_DEPTH_101010:
338 colorimetry_bpc = 2;
339 break;
340 case COLOR_DEPTH_121212:
341 colorimetry_bpc = 3;
342 break;
343 default:
344 colorimetry_bpc = 0;
345 break;
346 }
347
348 misc0 = misc0 | synchronous_clock;
349 misc0 = colorimetry_bpc << 5;
350
351 switch (output_color_space) {
352 case COLOR_SPACE_SRGB:
353 misc0 = misc0 | 0x0;
354 misc1 = misc1 & ~0x80; /* bit7 = 0*/
355 dynamic_range_rgb = 0; /*full range*/
356 break;
357 case COLOR_SPACE_SRGB_LIMITED:
358 misc0 = misc0 | 0x8; /* bit3=1 */
359 misc1 = misc1 & ~0x80; /* bit7 = 0*/
360 dynamic_range_rgb = 1; /*limited range*/
361 break;
362 case COLOR_SPACE_YCBCR601:
363 case COLOR_SPACE_YCBCR601_LIMITED:
364 misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
365 misc1 = misc1 & ~0x80; /* bit7 = 0*/
366 dynamic_range_ycbcr = 0; /*bt601*/
367 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
368 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
369 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
370 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
371 break;
372 case COLOR_SPACE_YCBCR709:
373 case COLOR_SPACE_YCBCR709_LIMITED:
374 misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
375 misc1 = misc1 & ~0x80; /* bit7 = 0*/
376 dynamic_range_ycbcr = 1; /*bt709*/
377 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
378 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
379 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
380 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
381 break;
382 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
383 dynamic_range_rgb = 1; /*limited range*/
384 break;
385 case COLOR_SPACE_2020_RGB_FULLRANGE:
386 case COLOR_SPACE_2020_YCBCR:
387 case COLOR_SPACE_XR_RGB:
388 case COLOR_SPACE_MSREF_SCRGB:
389 case COLOR_SPACE_ADOBERGB:
390 case COLOR_SPACE_DCIP3:
391 case COLOR_SPACE_XV_YCC_709:
392 case COLOR_SPACE_XV_YCC_601:
393 case COLOR_SPACE_DISPLAYNATIVE:
394 case COLOR_SPACE_DOLBYVISION:
395 case COLOR_SPACE_APPCTRL:
396 case COLOR_SPACE_CUSTOMPOINTS:
397 case COLOR_SPACE_UNKNOWN:
398 /* do nothing */
399 break;
400 }
401
402 REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
403 REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
404
405 /* dcn new register
406 * dc_crtc_timing is vesa dmt struct. data from edid
407 */
408 REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
409 DP_MSA_HTOTAL, crtc_timing->h_total,
410 DP_MSA_VTOTAL, crtc_timing->v_total);
411
412 /* calculate from vesa timing parameters
413 * h_active_start related to leading edge of sync
414 */
415
416 h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
417 crtc_timing->h_addressable - crtc_timing->h_border_right;
418
419 h_back_porch = h_blank - crtc_timing->h_front_porch -
420 crtc_timing->h_sync_width;
421
422 /* start at beginning of left border */
423 h_active_start = crtc_timing->h_sync_width + h_back_porch;
424
425
426 v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
427 crtc_timing->v_addressable - crtc_timing->v_border_bottom -
428 crtc_timing->v_front_porch;
429
430
431 /* start at beginning of left border */
432 REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
433 DP_MSA_HSTART, h_active_start,
434 DP_MSA_VSTART, v_active_start);
435
436 REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
437 DP_MSA_HSYNCWIDTH,
438 crtc_timing->h_sync_width,
439 DP_MSA_HSYNCPOLARITY,
440 !crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
441 DP_MSA_VSYNCWIDTH,
442 crtc_timing->v_sync_width,
443 DP_MSA_VSYNCPOLARITY,
444 !crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
445
446 /* HWDITH include border or overscan */
447 REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
448 DP_MSA_HWIDTH, crtc_timing->h_border_left +
449 crtc_timing->h_addressable + crtc_timing->h_border_right,
450 DP_MSA_VHEIGHT, crtc_timing->v_border_top +
451 crtc_timing->v_addressable + crtc_timing->v_border_bottom);
452}
453
454static void enc1_stream_encoder_set_stream_attribute_helper(
455 struct dcn10_stream_encoder *enc1,
456 struct dc_crtc_timing *crtc_timing)
457{
458 switch (crtc_timing->pixel_encoding) {
459 case PIXEL_ENCODING_YCBCR422:
460 REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
461 break;
462 default:
463 REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
464 break;
465 }
466 REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
467}
468
469/* setup stream encoder in hdmi mode */
470void enc1_stream_encoder_hdmi_set_stream_attribute(
471 struct stream_encoder *enc,
472 struct dc_crtc_timing *crtc_timing,
473 int actual_pix_clk_khz,
474 bool enable_audio)
475{
476 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
477 struct bp_encoder_control cntl = {0};
478
479 cntl.action = ENCODER_CONTROL_SETUP;
480 cntl.engine_id = enc1->base.id;
481 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
482 cntl.enable_dp_audio = enable_audio;
483 cntl.pixel_clock = actual_pix_clk_khz;
484 cntl.lanes_number = LANE_COUNT_FOUR;
485
486 if (enc1->base.bp->funcs->encoder_control(
487 enc1->base.bp, &cntl) != BP_RESULT_OK)
488 return;
489
490 enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
491
492 /* setup HDMI engine */
493 REG_UPDATE_5(HDMI_CONTROL,
494 HDMI_PACKET_GEN_VERSION, 1,
495 HDMI_KEEPOUT_MODE, 1,
496 HDMI_DEEP_COLOR_ENABLE, 0,
497 HDMI_DATA_SCRAMBLE_EN, 0,
498 HDMI_CLOCK_CHANNEL_RATE, 0);
499
500
501 switch (crtc_timing->display_color_depth) {
502 case COLOR_DEPTH_888:
503 REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
504 break;
505 case COLOR_DEPTH_101010:
506 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
507 REG_UPDATE_2(HDMI_CONTROL,
508 HDMI_DEEP_COLOR_DEPTH, 1,
509 HDMI_DEEP_COLOR_ENABLE, 0);
510 } else {
511 REG_UPDATE_2(HDMI_CONTROL,
512 HDMI_DEEP_COLOR_DEPTH, 1,
513 HDMI_DEEP_COLOR_ENABLE, 1);
514 }
515 break;
516 case COLOR_DEPTH_121212:
517 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
518 REG_UPDATE_2(HDMI_CONTROL,
519 HDMI_DEEP_COLOR_DEPTH, 2,
520 HDMI_DEEP_COLOR_ENABLE, 0);
521 } else {
522 REG_UPDATE_2(HDMI_CONTROL,
523 HDMI_DEEP_COLOR_DEPTH, 2,
524 HDMI_DEEP_COLOR_ENABLE, 1);
525 }
526 break;
527 case COLOR_DEPTH_161616:
528 REG_UPDATE_2(HDMI_CONTROL,
529 HDMI_DEEP_COLOR_DEPTH, 3,
530 HDMI_DEEP_COLOR_ENABLE, 1);
531 break;
532 default:
533 break;
534 }
535
536 if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
537 /* enable HDMI data scrambler
538 * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
539 * Clock channel frequency is 1/4 of character rate.
540 */
541 REG_UPDATE_2(HDMI_CONTROL,
542 HDMI_DATA_SCRAMBLE_EN, 1,
543 HDMI_CLOCK_CHANNEL_RATE, 1);
544 } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
545
546 /* TODO: New feature for DCE11, still need to implement */
547
548 /* enable HDMI data scrambler
549 * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
550 * Clock channel frequency is the same
551 * as character rate
552 */
553 REG_UPDATE_2(HDMI_CONTROL,
554 HDMI_DATA_SCRAMBLE_EN, 1,
555 HDMI_CLOCK_CHANNEL_RATE, 0);
556 }
557
558
559 REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
560 HDMI_GC_CONT, 1,
561 HDMI_GC_SEND, 1,
562 HDMI_NULL_SEND, 1);
563
564 /* following belongs to audio */
565 REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
566
567 REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
568
569 REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
570 VBI_LINE_0 + 2);
571
572 REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
573}
574
575/* setup stream encoder in dvi mode */
576void enc1_stream_encoder_dvi_set_stream_attribute(
577 struct stream_encoder *enc,
578 struct dc_crtc_timing *crtc_timing,
579 bool is_dual_link)
580{
581 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
582 struct bp_encoder_control cntl = {0};
583
584 cntl.action = ENCODER_CONTROL_SETUP;
585 cntl.engine_id = enc1->base.id;
586 cntl.signal = is_dual_link ?
587 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
588 cntl.enable_dp_audio = false;
589 cntl.pixel_clock = crtc_timing->pix_clk_khz;
590 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
591
592 if (enc1->base.bp->funcs->encoder_control(
593 enc1->base.bp, &cntl) != BP_RESULT_OK)
594 return;
595
596 ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
597 ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
598 enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
599}
600
601void enc1_stream_encoder_set_mst_bandwidth(
602 struct stream_encoder *enc,
603 struct fixed31_32 avg_time_slots_per_mtp)
604{
605 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
606 uint32_t x = dal_fixed31_32_floor(
607 avg_time_slots_per_mtp);
608 uint32_t y = dal_fixed31_32_ceil(
609 dal_fixed31_32_shl(
610 dal_fixed31_32_sub_int(
611 avg_time_slots_per_mtp,
612 x),
613 26));
614
615 REG_SET_2(DP_MSE_RATE_CNTL, 0,
616 DP_MSE_RATE_X, x,
617 DP_MSE_RATE_Y, y);
618
619 /* wait for update to be completed on the link */
620 /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
621 /* is reset to 0 (not pending) */
622 REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
623 0,
624 10, DP_MST_UPDATE_MAX_RETRY);
625}
626
627static void enc1_stream_encoder_update_hdmi_info_packets(
628 struct stream_encoder *enc,
629 const struct encoder_info_frame *info_frame)
630{
631 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
632
633 /* for bring up, disable dp double TODO */
634 REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
635
636 enc1_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
637 enc1_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
638 enc1_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
639 enc1_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
640 enc1_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
641}
642
643static void enc1_stream_encoder_stop_hdmi_info_packets(
644 struct stream_encoder *enc)
645{
646 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
647
648 /* stop generic packets 0 & 1 on HDMI */
649 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
650 HDMI_GENERIC1_CONT, 0,
651 HDMI_GENERIC1_LINE, 0,
652 HDMI_GENERIC1_SEND, 0,
653 HDMI_GENERIC0_CONT, 0,
654 HDMI_GENERIC0_LINE, 0,
655 HDMI_GENERIC0_SEND, 0);
656
657 /* stop generic packets 2 & 3 on HDMI */
658 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
659 HDMI_GENERIC0_CONT, 0,
660 HDMI_GENERIC0_LINE, 0,
661 HDMI_GENERIC0_SEND, 0,
662 HDMI_GENERIC1_CONT, 0,
663 HDMI_GENERIC1_LINE, 0,
664 HDMI_GENERIC1_SEND, 0);
665
666 /* stop generic packets 2 & 3 on HDMI */
667 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
668 HDMI_GENERIC0_CONT, 0,
669 HDMI_GENERIC0_LINE, 0,
670 HDMI_GENERIC0_SEND, 0,
671 HDMI_GENERIC1_CONT, 0,
672 HDMI_GENERIC1_LINE, 0,
673 HDMI_GENERIC1_SEND, 0);
674
675 REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
676 HDMI_GENERIC0_CONT, 0,
677 HDMI_GENERIC0_LINE, 0,
678 HDMI_GENERIC0_SEND, 0,
679 HDMI_GENERIC1_CONT, 0,
680 HDMI_GENERIC1_LINE, 0,
681 HDMI_GENERIC1_SEND, 0);
682}
683
684void enc1_stream_encoder_update_dp_info_packets(
685 struct stream_encoder *enc,
686 const struct encoder_info_frame *info_frame)
687{
688 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
689 uint32_t value = 0;
690
691 if (info_frame->vsc.valid)
692 enc1_update_generic_info_packet(
693 enc1,
694 0, /* packetIndex */
695 &info_frame->vsc);
696
697 if (info_frame->spd.valid)
698 enc1_update_generic_info_packet(
699 enc1,
700 2, /* packetIndex */
701 &info_frame->spd);
702
703 if (info_frame->hdrsmd.valid)
704 enc1_update_generic_info_packet(
705 enc1,
706 3, /* packetIndex */
707 &info_frame->hdrsmd);
708
709 /* enable/disable transmission of packet(s).
710 * If enabled, packet transmission begins on the next frame
711 */
712 REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
713 REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
714 REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
715
716
717 /* This bit is the master enable bit.
718 * When enabling secondary stream engine,
719 * this master bit must also be set.
720 * This register shared with audio info frame.
721 * Therefore we need to enable master bit
722 * if at least on of the fields is not 0
723 */
724 value = REG_READ(DP_SEC_CNTL);
725 if (value)
726 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
727}
728
729void enc1_stream_encoder_stop_dp_info_packets(
730 struct stream_encoder *enc)
731{
732 /* stop generic packets on DP */
733 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
734 uint32_t value = 0;
735
736 REG_SET_10(DP_SEC_CNTL, 0,
737 DP_SEC_GSP0_ENABLE, 0,
738 DP_SEC_GSP1_ENABLE, 0,
739 DP_SEC_GSP2_ENABLE, 0,
740 DP_SEC_GSP3_ENABLE, 0,
741 DP_SEC_GSP4_ENABLE, 0,
742 DP_SEC_GSP5_ENABLE, 0,
743 DP_SEC_GSP6_ENABLE, 0,
744 DP_SEC_GSP7_ENABLE, 0,
745 DP_SEC_MPG_ENABLE, 0,
746 DP_SEC_STREAM_ENABLE, 0);
747
748 /* this register shared with audio info frame.
749 * therefore we need to keep master enabled
750 * if at least one of the fields is not 0 */
751 value = REG_READ(DP_SEC_CNTL);
752 if (value)
753 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
754
755}
756
757void enc1_stream_encoder_dp_blank(
758 struct stream_encoder *enc)
759{
760 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
761 uint32_t retries = 0;
762 uint32_t reg1 = 0;
763 uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
764
765 /* Note: For CZ, we are changing driver default to disable
766 * stream deferred to next VBLANK. If results are positive, we
767 * will make the same change to all DCE versions. There are a
768 * handful of panels that cannot handle disable stream at
769 * HBLANK and will result in a white line flash across the
770 * screen on stream disable.
771 */
772 REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1);
773 if ((reg1 & 0x1) == 0)
774 /*stream not enabled*/
775 return;
776 /* Specify the video stream disable point
777 * (2 = start of the next vertical blank)
778 */
779 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
780 /* Larger delay to wait until VBLANK - use max retry of
781 * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
782 * a little more because we may not trust delay accuracy.
783 */
784 max_retries = DP_BLANK_MAX_RETRY * 150;
785
786 /* disable DP stream */
787 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
788
789 /* the encoder stops sending the video stream
790 * at the start of the vertical blanking.
791 * Poll for DP_VID_STREAM_STATUS == 0
792 */
793
794 REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
795 0,
796 10, max_retries);
797
798 ASSERT(retries <= max_retries);
799
800 /* Tell the DP encoder to ignore timing from CRTC, must be done after
801 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
802 * complete, stream status will be stuck in video stream enabled state,
803 * i.e. DP_VID_STREAM_STATUS stuck at 1.
804 */
805
806 REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
807}
808
809/* output video stream to link encoder */
810void enc1_stream_encoder_dp_unblank(
811 struct stream_encoder *enc,
812 const struct encoder_unblank_param *param)
813{
814 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
815
816 if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
817 uint32_t n_vid = 0x8000;
818 uint32_t m_vid;
819
820 /* M / N = Fstream / Flink
821 * m_vid / n_vid = pixel rate / link rate
822 */
823
824 uint64_t m_vid_l = n_vid;
825
826 m_vid_l *= param->pixel_clk_khz;
827 m_vid_l = div_u64(m_vid_l,
828 param->link_settings.link_rate
829 * LINK_RATE_REF_FREQ_IN_KHZ);
830
831 m_vid = (uint32_t) m_vid_l;
832
833 /* enable auto measurement */
834
835 REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
836
837 /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
838 * therefore program initial value for Mvid and Nvid
839 */
840
841 REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
842
843 REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
844
845 REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
846 }
847
848 /* set DIG_START to 0x1 to resync FIFO */
849
850 REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
851
852 /* switch DP encoder to CRTC data */
853
854 REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
855
856 /* wait 100us for DIG/DP logic to prime
857 * (i.e. a few video lines)
858 */
859 udelay(100);
860
861 /* the hardware would start sending video at the start of the next DP
862 * frame (i.e. rising edge of the vblank).
863 * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
864 * register has no effect on enable transition! HW always guarantees
865 * VID_STREAM enable at start of next frame, and this is not
866 * programmable
867 */
868
869 REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
870}
871
872void enc1_stream_encoder_set_avmute(
873 struct stream_encoder *enc,
874 bool enable)
875{
876 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
877 unsigned int value = enable ? 1 : 0;
878
879 REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
880}
881
882
883#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
884#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
885
886#include "include/audio_types.h"
887
888/**
889* speakersToChannels
890*
891* @brief
892* translate speakers to channels
893*
894* FL - Front Left
895* FR - Front Right
896* RL - Rear Left
897* RR - Rear Right
898* RC - Rear Center
899* FC - Front Center
900* FLC - Front Left Center
901* FRC - Front Right Center
902* RLC - Rear Left Center
903* RRC - Rear Right Center
904* LFE - Low Freq Effect
905*
906* FC
907* FLC FRC
908* FL FR
909*
910* LFE
911* ()
912*
913*
914* RL RR
915* RLC RRC
916* RC
917*
918* ch 8 7 6 5 4 3 2 1
919* 0b00000011 - - - - - - FR FL
920* 0b00000111 - - - - - LFE FR FL
921* 0b00001011 - - - - FC - FR FL
922* 0b00001111 - - - - FC LFE FR FL
923* 0b00010011 - - - RC - - FR FL
924* 0b00010111 - - - RC - LFE FR FL
925* 0b00011011 - - - RC FC - FR FL
926* 0b00011111 - - - RC FC LFE FR FL
927* 0b00110011 - - RR RL - - FR FL
928* 0b00110111 - - RR RL - LFE FR FL
929* 0b00111011 - - RR RL FC - FR FL
930* 0b00111111 - - RR RL FC LFE FR FL
931* 0b01110011 - RC RR RL - - FR FL
932* 0b01110111 - RC RR RL - LFE FR FL
933* 0b01111011 - RC RR RL FC - FR FL
934* 0b01111111 - RC RR RL FC LFE FR FL
935* 0b11110011 RRC RLC RR RL - - FR FL
936* 0b11110111 RRC RLC RR RL - LFE FR FL
937* 0b11111011 RRC RLC RR RL FC - FR FL
938* 0b11111111 RRC RLC RR RL FC LFE FR FL
939* 0b11000011 FRC FLC - - - - FR FL
940* 0b11000111 FRC FLC - - - LFE FR FL
941* 0b11001011 FRC FLC - - FC - FR FL
942* 0b11001111 FRC FLC - - FC LFE FR FL
943* 0b11010011 FRC FLC - RC - - FR FL
944* 0b11010111 FRC FLC - RC - LFE FR FL
945* 0b11011011 FRC FLC - RC FC - FR FL
946* 0b11011111 FRC FLC - RC FC LFE FR FL
947* 0b11110011 FRC FLC RR RL - - FR FL
948* 0b11110111 FRC FLC RR RL - LFE FR FL
949* 0b11111011 FRC FLC RR RL FC - FR FL
950* 0b11111111 FRC FLC RR RL FC LFE FR FL
951*
952* @param
953* speakers - speaker information as it comes from CEA audio block
954*/
955/* translate speakers to channels */
956
957union audio_cea_channels {
958 uint8_t all;
959 struct audio_cea_channels_bits {
960 uint32_t FL:1;
961 uint32_t FR:1;
962 uint32_t LFE:1;
963 uint32_t FC:1;
964 uint32_t RL_RC:1;
965 uint32_t RR:1;
966 uint32_t RC_RLC_FLC:1;
967 uint32_t RRC_FRC:1;
968 } channels;
969};
970
971struct audio_clock_info {
972 /* pixel clock frequency*/
973 uint32_t pixel_clock_in_10khz;
974 /* N - 32KHz audio */
975 uint32_t n_32khz;
976 /* CTS - 32KHz audio*/
977 uint32_t cts_32khz;
978 uint32_t n_44khz;
979 uint32_t cts_44khz;
980 uint32_t n_48khz;
981 uint32_t cts_48khz;
982};
983
984/* 25.2MHz/1.001*/
985/* 25.2MHz/1.001*/
986/* 25.2MHz*/
987/* 27MHz */
988/* 27MHz*1.001*/
989/* 27MHz*1.001*/
990/* 54MHz*/
991/* 54MHz*1.001*/
992/* 74.25MHz/1.001*/
993/* 74.25MHz*/
994/* 148.5MHz/1.001*/
995/* 148.5MHz*/
996
997static const struct audio_clock_info audio_clock_info_table[16] = {
998 {2517, 4576, 28125, 7007, 31250, 6864, 28125},
999 {2518, 4576, 28125, 7007, 31250, 6864, 28125},
1000 {2520, 4096, 25200, 6272, 28000, 6144, 25200},
1001 {2700, 4096, 27000, 6272, 30000, 6144, 27000},
1002 {2702, 4096, 27027, 6272, 30030, 6144, 27027},
1003 {2703, 4096, 27027, 6272, 30030, 6144, 27027},
1004 {5400, 4096, 54000, 6272, 60000, 6144, 54000},
1005 {5405, 4096, 54054, 6272, 60060, 6144, 54054},
1006 {7417, 11648, 210937, 17836, 234375, 11648, 140625},
1007 {7425, 4096, 74250, 6272, 82500, 6144, 74250},
1008 {14835, 11648, 421875, 8918, 234375, 5824, 140625},
1009 {14850, 4096, 148500, 6272, 165000, 6144, 148500},
1010 {29670, 5824, 421875, 4459, 234375, 5824, 281250},
1011 {29700, 3072, 222750, 4704, 247500, 5120, 247500},
1012 {59340, 5824, 843750, 8918, 937500, 5824, 562500},
1013 {59400, 3072, 445500, 9408, 990000, 6144, 594000}
1014};
1015
1016static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
1017 {2517, 9152, 84375, 7007, 48875, 9152, 56250},
1018 {2518, 9152, 84375, 7007, 48875, 9152, 56250},
1019 {2520, 4096, 37800, 6272, 42000, 6144, 37800},
1020 {2700, 4096, 40500, 6272, 45000, 6144, 40500},
1021 {2702, 8192, 81081, 6272, 45045, 8192, 54054},
1022 {2703, 8192, 81081, 6272, 45045, 8192, 54054},
1023 {5400, 4096, 81000, 6272, 90000, 6144, 81000},
1024 {5405, 4096, 81081, 6272, 90090, 6144, 81081},
1025 {7417, 11648, 316406, 17836, 351562, 11648, 210937},
1026 {7425, 4096, 111375, 6272, 123750, 6144, 111375},
1027 {14835, 11648, 632812, 17836, 703125, 11648, 421875},
1028 {14850, 4096, 222750, 6272, 247500, 6144, 222750},
1029 {29670, 5824, 632812, 8918, 703125, 5824, 421875},
1030 {29700, 4096, 445500, 4704, 371250, 5120, 371250}
1031};
1032
1033static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
1034 {2517, 4576, 56250, 7007, 62500, 6864, 56250},
1035 {2518, 4576, 56250, 7007, 62500, 6864, 56250},
1036 {2520, 4096, 50400, 6272, 56000, 6144, 50400},
1037 {2700, 4096, 54000, 6272, 60000, 6144, 54000},
1038 {2702, 4096, 54054, 6267, 60060, 8192, 54054},
1039 {2703, 4096, 54054, 6272, 60060, 8192, 54054},
1040 {5400, 4096, 108000, 6272, 120000, 6144, 108000},
1041 {5405, 4096, 108108, 6272, 120120, 6144, 108108},
1042 {7417, 11648, 421875, 17836, 468750, 11648, 281250},
1043 {7425, 4096, 148500, 6272, 165000, 6144, 148500},
1044 {14835, 11648, 843750, 8918, 468750, 11648, 281250},
1045 {14850, 4096, 297000, 6272, 330000, 6144, 297000},
1046 {29670, 5824, 843750, 4459, 468750, 5824, 562500},
1047 {29700, 3072, 445500, 4704, 495000, 5120, 495000}
1048
1049
1050};
1051
1052static union audio_cea_channels speakers_to_channels(
1053 struct audio_speaker_flags speaker_flags)
1054{
1055 union audio_cea_channels cea_channels = {0};
1056
1057 /* these are one to one */
1058 cea_channels.channels.FL = speaker_flags.FL_FR;
1059 cea_channels.channels.FR = speaker_flags.FL_FR;
1060 cea_channels.channels.LFE = speaker_flags.LFE;
1061 cea_channels.channels.FC = speaker_flags.FC;
1062
1063 /* if Rear Left and Right exist move RC speaker to channel 7
1064 * otherwise to channel 5
1065 */
1066 if (speaker_flags.RL_RR) {
1067 cea_channels.channels.RL_RC = speaker_flags.RL_RR;
1068 cea_channels.channels.RR = speaker_flags.RL_RR;
1069 cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
1070 } else {
1071 cea_channels.channels.RL_RC = speaker_flags.RC;
1072 }
1073
1074 /* FRONT Left Right Center and REAR Left Right Center are exclusive */
1075 if (speaker_flags.FLC_FRC) {
1076 cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
1077 cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
1078 } else {
1079 cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
1080 cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
1081 }
1082
1083 return cea_channels;
1084}
1085
1086static uint32_t calc_max_audio_packets_per_line(
1087 const struct audio_crtc_info *crtc_info)
1088{
1089 uint32_t max_packets_per_line;
1090
1091 max_packets_per_line =
1092 crtc_info->h_total - crtc_info->h_active;
1093
1094 if (crtc_info->pixel_repetition)
1095 max_packets_per_line *= crtc_info->pixel_repetition;
1096
1097 /* for other hdmi features */
1098 max_packets_per_line -= 58;
1099 /* for Control Period */
1100 max_packets_per_line -= 16;
1101 /* Number of Audio Packets per Line */
1102 max_packets_per_line /= 32;
1103
1104 return max_packets_per_line;
1105}
1106
1107static void get_audio_clock_info(
1108 enum dc_color_depth color_depth,
1109 uint32_t crtc_pixel_clock_in_khz,
1110 uint32_t actual_pixel_clock_in_khz,
1111 struct audio_clock_info *audio_clock_info)
1112{
1113 const struct audio_clock_info *clock_info;
1114 uint32_t index;
1115 uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
1116 uint32_t audio_array_size;
1117
1118 switch (color_depth) {
1119 case COLOR_DEPTH_161616:
1120 clock_info = audio_clock_info_table_48bpc;
1121 audio_array_size = ARRAY_SIZE(
1122 audio_clock_info_table_48bpc);
1123 break;
1124 case COLOR_DEPTH_121212:
1125 clock_info = audio_clock_info_table_36bpc;
1126 audio_array_size = ARRAY_SIZE(
1127 audio_clock_info_table_36bpc);
1128 break;
1129 default:
1130 clock_info = audio_clock_info_table;
1131 audio_array_size = ARRAY_SIZE(
1132 audio_clock_info_table);
1133 break;
1134 }
1135
1136 if (clock_info != NULL) {
1137 /* search for exact pixel clock in table */
1138 for (index = 0; index < audio_array_size; index++) {
1139 if (clock_info[index].pixel_clock_in_10khz >
1140 crtc_pixel_clock_in_10khz)
1141 break; /* not match */
1142 else if (clock_info[index].pixel_clock_in_10khz ==
1143 crtc_pixel_clock_in_10khz) {
1144 /* match found */
1145 *audio_clock_info = clock_info[index];
1146 return;
1147 }
1148 }
1149 }
1150
1151 /* not found */
1152 if (actual_pixel_clock_in_khz == 0)
1153 actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
1154
1155 /* See HDMI spec the table entry under
1156 * pixel clock of "Other". */
1157 audio_clock_info->pixel_clock_in_10khz =
1158 actual_pixel_clock_in_khz / 10;
1159 audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
1160 audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
1161 audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
1162
1163 audio_clock_info->n_32khz = 4096;
1164 audio_clock_info->n_44khz = 6272;
1165 audio_clock_info->n_48khz = 6144;
1166}
1167
1168static void enc1_se_audio_setup(
1169 struct stream_encoder *enc,
1170 unsigned int az_inst,
1171 struct audio_info *audio_info)
1172{
1173 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1174
1175 uint32_t speakers = 0;
1176 uint32_t channels = 0;
1177
1178 ASSERT(audio_info);
1179 if (audio_info == NULL)
1180 /* This should not happen.it does so we don't get BSOD*/
1181 return;
1182
1183 speakers = audio_info->flags.info.ALLSPEAKERS;
1184 channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
1185
1186 /* setup the audio stream source select (audio -> dig mapping) */
1187 REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
1188
1189 /* Channel allocation */
1190 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
1191}
1192
1193static void enc1_se_setup_hdmi_audio(
1194 struct stream_encoder *enc,
1195 const struct audio_crtc_info *crtc_info)
1196{
1197 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1198
1199 struct audio_clock_info audio_clock_info = {0};
1200 uint32_t max_packets_per_line;
1201
1202 /* For now still do calculation, although this field is ignored when
1203 * above HDMI_PACKET_GEN_VERSION set to 1
1204 */
1205 max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
1206
1207 /* HDMI_AUDIO_PACKET_CONTROL */
1208 REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
1209 HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
1210 HDMI_AUDIO_DELAY_EN, 1);
1211
1212 /* AFMT_AUDIO_PACKET_CONTROL */
1213 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1214
1215 /* AFMT_AUDIO_PACKET_CONTROL2 */
1216 REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
1217 AFMT_AUDIO_LAYOUT_OVRD, 0,
1218 AFMT_60958_OSF_OVRD, 0);
1219
1220 /* HDMI_ACR_PACKET_CONTROL */
1221 REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
1222 HDMI_ACR_AUTO_SEND, 1,
1223 HDMI_ACR_SOURCE, 0,
1224 HDMI_ACR_AUDIO_PRIORITY, 0);
1225
1226 /* Program audio clock sample/regeneration parameters */
1227 get_audio_clock_info(crtc_info->color_depth,
1228 crtc_info->requested_pixel_clock,
1229 crtc_info->calculated_pixel_clock,
1230 &audio_clock_info);
1231 DC_LOG_HW_AUDIO(
1232 "\n%s:Input::requested_pixel_clock = %d" \
1233 "calculated_pixel_clock = %d \n", __func__, \
1234 crtc_info->requested_pixel_clock, \
1235 crtc_info->calculated_pixel_clock);
1236
1237 /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
1238 REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
1239
1240 /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
1241 REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
1242
1243 /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
1244 REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
1245
1246 /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
1247 REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
1248
1249 /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
1250 REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
1251
1252 /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
1253 REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
1254
1255 /* Video driver cannot know in advance which sample rate will
1256 * be used by HD Audio driver
1257 * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
1258 * programmed below in interruppt callback
1259 */
1260
1261 /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
1262 * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
1263 */
1264 REG_UPDATE_2(AFMT_60958_0,
1265 AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
1266 AFMT_60958_CS_CLOCK_ACCURACY, 0);
1267
1268 /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
1269 REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1270
1271 /* AFMT_60958_2 now keep this settings until
1272 * Programming guide comes out
1273 */
1274 REG_UPDATE_6(AFMT_60958_2,
1275 AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
1276 AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
1277 AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
1278 AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
1279 AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
1280 AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1281}
1282
1283static void enc1_se_setup_dp_audio(
1284 struct stream_encoder *enc)
1285{
1286 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1287
1288 /* --- DP Audio packet configurations --- */
1289
1290 /* ATP Configuration */
1291 REG_SET(DP_SEC_AUD_N, 0,
1292 DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
1293
1294 /* Async/auto-calc timestamp mode */
1295 REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
1296 DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
1297
1298 /* --- The following are the registers
1299 * copied from the SetupHDMI ---
1300 */
1301
1302 /* AFMT_AUDIO_PACKET_CONTROL */
1303 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1304
1305 /* AFMT_AUDIO_PACKET_CONTROL2 */
1306 /* Program the ATP and AIP next */
1307 REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
1308 AFMT_AUDIO_LAYOUT_OVRD, 0,
1309 AFMT_60958_OSF_OVRD, 0);
1310
1311 /* AFMT_INFOFRAME_CONTROL0 */
1312 REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1313
1314 /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
1315 REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
1316}
1317
1318static void enc1_se_enable_audio_clock(
1319 struct stream_encoder *enc,
1320 bool enable)
1321{
1322 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1323
1324 if (REG(AFMT_CNTL) == 0)
1325 return; /* DCE8/10 does not have this register */
1326
1327 REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
1328
1329 /* wait for AFMT clock to turn on,
1330 * expectation: this should complete in 1-2 reads
1331 *
1332 * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
1333 *
1334 * TODO: wait for clock_on does not work well. May need HW
1335 * program sequence. But audio seems work normally even without wait
1336 * for clock_on status change
1337 */
1338}
1339
1340static void enc1_se_enable_dp_audio(
1341 struct stream_encoder *enc)
1342{
1343 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1344
1345 /* Enable Audio packets */
1346 REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1347
1348 /* Program the ATP and AIP next */
1349 REG_UPDATE_2(DP_SEC_CNTL,
1350 DP_SEC_ATP_ENABLE, 1,
1351 DP_SEC_AIP_ENABLE, 1);
1352
1353 /* Program STREAM_ENABLE after all the other enables. */
1354 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1355}
1356
1357static void enc1_se_disable_dp_audio(
1358 struct stream_encoder *enc)
1359{
1360 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1361 uint32_t value = 0;
1362
1363 /* Disable Audio packets */
1364 REG_UPDATE_5(DP_SEC_CNTL,
1365 DP_SEC_ASP_ENABLE, 0,
1366 DP_SEC_ATP_ENABLE, 0,
1367 DP_SEC_AIP_ENABLE, 0,
1368 DP_SEC_ACM_ENABLE, 0,
1369 DP_SEC_STREAM_ENABLE, 0);
1370
1371 /* This register shared with encoder info frame. Therefore we need to
1372 * keep master enabled if at least on of the fields is not 0
1373 */
1374 value = REG_READ(DP_SEC_CNTL);
1375 if (value != 0)
1376 REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1377
1378}
1379
1380void enc1_se_audio_mute_control(
1381 struct stream_encoder *enc,
1382 bool mute)
1383{
1384 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1385
1386 REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
1387}
1388
1389void enc1_se_dp_audio_setup(
1390 struct stream_encoder *enc,
1391 unsigned int az_inst,
1392 struct audio_info *info)
1393{
1394 enc1_se_audio_setup(enc, az_inst, info);
1395}
1396
1397void enc1_se_dp_audio_enable(
1398 struct stream_encoder *enc)
1399{
1400 enc1_se_enable_audio_clock(enc, true);
1401 enc1_se_setup_dp_audio(enc);
1402 enc1_se_enable_dp_audio(enc);
1403}
1404
1405void enc1_se_dp_audio_disable(
1406 struct stream_encoder *enc)
1407{
1408 enc1_se_disable_dp_audio(enc);
1409 enc1_se_enable_audio_clock(enc, false);
1410}
1411
1412void enc1_se_hdmi_audio_setup(
1413 struct stream_encoder *enc,
1414 unsigned int az_inst,
1415 struct audio_info *info,
1416 struct audio_crtc_info *audio_crtc_info)
1417{
1418 enc1_se_enable_audio_clock(enc, true);
1419 enc1_se_setup_hdmi_audio(enc, audio_crtc_info);
1420 enc1_se_audio_setup(enc, az_inst, info);
1421}
1422
1423void enc1_se_hdmi_audio_disable(
1424 struct stream_encoder *enc)
1425{
1426 enc1_se_enable_audio_clock(enc, false);
1427}
1428
1429
1430void enc1_setup_stereo_sync(
1431 struct stream_encoder *enc,
1432 int tg_inst, bool enable)
1433{
1434 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1435 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
1436 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
1437}
1438
1439
1440static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
1441 .dp_set_stream_attribute =
1442 enc1_stream_encoder_dp_set_stream_attribute,
1443 .hdmi_set_stream_attribute =
1444 enc1_stream_encoder_hdmi_set_stream_attribute,
1445 .dvi_set_stream_attribute =
1446 enc1_stream_encoder_dvi_set_stream_attribute,
1447 .set_mst_bandwidth =
1448 enc1_stream_encoder_set_mst_bandwidth,
1449 .update_hdmi_info_packets =
1450 enc1_stream_encoder_update_hdmi_info_packets,
1451 .stop_hdmi_info_packets =
1452 enc1_stream_encoder_stop_hdmi_info_packets,
1453 .update_dp_info_packets =
1454 enc1_stream_encoder_update_dp_info_packets,
1455 .stop_dp_info_packets =
1456 enc1_stream_encoder_stop_dp_info_packets,
1457 .dp_blank =
1458 enc1_stream_encoder_dp_blank,
1459 .dp_unblank =
1460 enc1_stream_encoder_dp_unblank,
1461 .audio_mute_control = enc1_se_audio_mute_control,
1462
1463 .dp_audio_setup = enc1_se_dp_audio_setup,
1464 .dp_audio_enable = enc1_se_dp_audio_enable,
1465 .dp_audio_disable = enc1_se_dp_audio_disable,
1466
1467 .hdmi_audio_setup = enc1_se_hdmi_audio_setup,
1468 .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
1469 .setup_stereo_sync = enc1_setup_stereo_sync,
1470 .set_avmute = enc1_stream_encoder_set_avmute,
1471};
1472
1473void dcn10_stream_encoder_construct(
1474 struct dcn10_stream_encoder *enc1,
1475 struct dc_context *ctx,
1476 struct dc_bios *bp,
1477 enum engine_id eng_id,
1478 const struct dcn10_stream_enc_registers *regs,
1479 const struct dcn10_stream_encoder_shift *se_shift,
1480 const struct dcn10_stream_encoder_mask *se_mask)
1481{
1482 enc1->base.funcs = &dcn10_str_enc_funcs;
1483 enc1->base.ctx = ctx;
1484 enc1->base.id = eng_id;
1485 enc1->base.bp = bp;
1486 enc1->regs = regs;
1487 enc1->se_shift = se_shift;
1488 enc1->se_mask = se_mask;
1489}
1490
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
new file mode 100644
index 000000000000..6b3e4ded155b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -0,0 +1,524 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DC_STREAM_ENCODER_DCN10_H__
27#define __DC_STREAM_ENCODER_DCN10_H__
28
29#include "stream_encoder.h"
30
31#define DCN10STRENC_FROM_STRENC(stream_encoder)\
32 container_of(stream_encoder, struct dcn10_stream_encoder, base)
33
34#define SE_COMMON_DCN_REG_LIST(id) \
35 SRI(AFMT_CNTL, DIG, id), \
36 SRI(AFMT_GENERIC_0, DIG, id), \
37 SRI(AFMT_GENERIC_1, DIG, id), \
38 SRI(AFMT_GENERIC_2, DIG, id), \
39 SRI(AFMT_GENERIC_3, DIG, id), \
40 SRI(AFMT_GENERIC_4, DIG, id), \
41 SRI(AFMT_GENERIC_5, DIG, id), \
42 SRI(AFMT_GENERIC_6, DIG, id), \
43 SRI(AFMT_GENERIC_7, DIG, id), \
44 SRI(AFMT_GENERIC_HDR, DIG, id), \
45 SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
46 SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
47 SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id), \
48 SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
49 SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
50 SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
51 SRI(AFMT_60958_0, DIG, id), \
52 SRI(AFMT_60958_1, DIG, id), \
53 SRI(AFMT_60958_2, DIG, id), \
54 SRI(DIG_FE_CNTL, DIG, id), \
55 SRI(HDMI_CONTROL, DIG, id), \
56 SRI(HDMI_DB_CONTROL, DIG, id), \
57 SRI(HDMI_GC, DIG, id), \
58 SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
59 SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
60 SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
61 SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
62 SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
63 SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
64 SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
65 SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
66 SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
67 SRI(HDMI_ACR_32_0, DIG, id),\
68 SRI(HDMI_ACR_32_1, DIG, id),\
69 SRI(HDMI_ACR_44_0, DIG, id),\
70 SRI(HDMI_ACR_44_1, DIG, id),\
71 SRI(HDMI_ACR_48_0, DIG, id),\
72 SRI(HDMI_ACR_48_1, DIG, id),\
73 SRI(DP_DB_CNTL, DP, id), \
74 SRI(DP_MSA_MISC, DP, id), \
75 SRI(DP_MSA_COLORIMETRY, DP, id), \
76 SRI(DP_MSA_TIMING_PARAM1, DP, id), \
77 SRI(DP_MSA_TIMING_PARAM2, DP, id), \
78 SRI(DP_MSA_TIMING_PARAM3, DP, id), \
79 SRI(DP_MSA_TIMING_PARAM4, DP, id), \
80 SRI(DP_MSE_RATE_CNTL, DP, id), \
81 SRI(DP_MSE_RATE_UPDATE, DP, id), \
82 SRI(DP_PIXEL_FORMAT, DP, id), \
83 SRI(DP_SEC_CNTL, DP, id), \
84 SRI(DP_STEER_FIFO, DP, id), \
85 SRI(DP_VID_M, DP, id), \
86 SRI(DP_VID_N, DP, id), \
87 SRI(DP_VID_STREAM_CNTL, DP, id), \
88 SRI(DP_VID_TIMING, DP, id), \
89 SRI(DP_SEC_AUD_N, DP, id), \
90 SRI(DP_SEC_TIMESTAMP, DP, id)
91
92#define SE_DCN_REG_LIST(id)\
93 SE_COMMON_DCN_REG_LIST(id)
94
95
96struct dcn10_stream_enc_registers {
97 uint32_t AFMT_CNTL;
98 uint32_t AFMT_AVI_INFO0;
99 uint32_t AFMT_AVI_INFO1;
100 uint32_t AFMT_AVI_INFO2;
101 uint32_t AFMT_AVI_INFO3;
102 uint32_t AFMT_GENERIC_0;
103 uint32_t AFMT_GENERIC_1;
104 uint32_t AFMT_GENERIC_2;
105 uint32_t AFMT_GENERIC_3;
106 uint32_t AFMT_GENERIC_4;
107 uint32_t AFMT_GENERIC_5;
108 uint32_t AFMT_GENERIC_6;
109 uint32_t AFMT_GENERIC_7;
110 uint32_t AFMT_GENERIC_HDR;
111 uint32_t AFMT_INFOFRAME_CONTROL0;
112 uint32_t AFMT_VBI_PACKET_CONTROL;
113 uint32_t AFMT_VBI_PACKET_CONTROL1;
114 uint32_t AFMT_AUDIO_PACKET_CONTROL;
115 uint32_t AFMT_AUDIO_PACKET_CONTROL2;
116 uint32_t AFMT_AUDIO_SRC_CONTROL;
117 uint32_t AFMT_60958_0;
118 uint32_t AFMT_60958_1;
119 uint32_t AFMT_60958_2;
120 uint32_t DIG_FE_CNTL;
121 uint32_t DP_MSE_RATE_CNTL;
122 uint32_t DP_MSE_RATE_UPDATE;
123 uint32_t DP_PIXEL_FORMAT;
124 uint32_t DP_SEC_CNTL;
125 uint32_t DP_STEER_FIFO;
126 uint32_t DP_VID_M;
127 uint32_t DP_VID_N;
128 uint32_t DP_VID_STREAM_CNTL;
129 uint32_t DP_VID_TIMING;
130 uint32_t DP_SEC_AUD_N;
131 uint32_t DP_SEC_TIMESTAMP;
132 uint32_t HDMI_CONTROL;
133 uint32_t HDMI_GC;
134 uint32_t HDMI_GENERIC_PACKET_CONTROL0;
135 uint32_t HDMI_GENERIC_PACKET_CONTROL1;
136 uint32_t HDMI_GENERIC_PACKET_CONTROL2;
137 uint32_t HDMI_GENERIC_PACKET_CONTROL3;
138 uint32_t HDMI_GENERIC_PACKET_CONTROL4;
139 uint32_t HDMI_GENERIC_PACKET_CONTROL5;
140 uint32_t HDMI_INFOFRAME_CONTROL0;
141 uint32_t HDMI_INFOFRAME_CONTROL1;
142 uint32_t HDMI_VBI_PACKET_CONTROL;
143 uint32_t HDMI_AUDIO_PACKET_CONTROL;
144 uint32_t HDMI_ACR_PACKET_CONTROL;
145 uint32_t HDMI_ACR_32_0;
146 uint32_t HDMI_ACR_32_1;
147 uint32_t HDMI_ACR_44_0;
148 uint32_t HDMI_ACR_44_1;
149 uint32_t HDMI_ACR_48_0;
150 uint32_t HDMI_ACR_48_1;
151 uint32_t DP_DB_CNTL;
152 uint32_t DP_MSA_MISC;
153 uint32_t DP_MSA_COLORIMETRY;
154 uint32_t DP_MSA_TIMING_PARAM1;
155 uint32_t DP_MSA_TIMING_PARAM2;
156 uint32_t DP_MSA_TIMING_PARAM3;
157 uint32_t DP_MSA_TIMING_PARAM4;
158 uint32_t HDMI_DB_CONTROL;
159};
160
161
162#define SE_SF(reg_name, field_name, post_fix)\
163 .field_name = reg_name ## __ ## field_name ## post_fix
164
165#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
166 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
167 SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
168 SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
169 SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
170 SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
171 SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
172 SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
173 SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
174 SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
175 SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
176 SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
177 SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
178 SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
179 SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
180 SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
181 SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
182 SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
183 SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
184 SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
185 SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
186 SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
187 SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
188 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
189 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
190 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
191 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
192 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
193 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
194 SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
195 SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
196 SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
197 SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
198 SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
199 SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
200 SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
201 SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
202 SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
203 SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
204 SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
205 SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
206 SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
207 SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
208 SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
209 SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
210 SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
211 SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
212 SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
213 SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
214 SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
215 SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
216 SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
217 SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
218 SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
219 SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
220 SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
221 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
222 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
223 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
224 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
225 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
226 SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
227 SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
228 SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
229 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
230 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
231 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
232 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
233 SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
234 SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
235 SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
236 SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
237 SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
238 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
239 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
240 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
241 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
242 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
243 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
244 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
245 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
246 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
247 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
248 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
249 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
250 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
251 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
252 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
253 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
254 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
255 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
256 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
257 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
258 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
259 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
260 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
261 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
262 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
263 SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
264 SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
265 SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
266 SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
267 SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
268 SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
269 SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
270 SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
271 SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
272 SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
273 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
274 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
275 SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
276 SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
277
278#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
279 SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
280
281#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
282 SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
283 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
284 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
285 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
286 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
287 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
288 SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh)
289
290
291#define SE_REG_FIELD_LIST_DCN1_0(type) \
292 type AFMT_GENERIC_INDEX;\
293 type AFMT_GENERIC_HB0;\
294 type AFMT_GENERIC_HB1;\
295 type AFMT_GENERIC_HB2;\
296 type AFMT_GENERIC_HB3;\
297 type AFMT_GENERIC_LOCK_STATUS;\
298 type AFMT_GENERIC_CONFLICT;\
299 type AFMT_GENERIC_CONFLICT_CLR;\
300 type AFMT_GENERIC0_FRAME_UPDATE_PENDING;\
301 type AFMT_GENERIC1_FRAME_UPDATE_PENDING;\
302 type AFMT_GENERIC2_FRAME_UPDATE_PENDING;\
303 type AFMT_GENERIC3_FRAME_UPDATE_PENDING;\
304 type AFMT_GENERIC4_FRAME_UPDATE_PENDING;\
305 type AFMT_GENERIC5_FRAME_UPDATE_PENDING;\
306 type AFMT_GENERIC6_FRAME_UPDATE_PENDING;\
307 type AFMT_GENERIC7_FRAME_UPDATE_PENDING;\
308 type AFMT_GENERIC0_FRAME_UPDATE;\
309 type AFMT_GENERIC1_FRAME_UPDATE;\
310 type AFMT_GENERIC2_FRAME_UPDATE;\
311 type AFMT_GENERIC3_FRAME_UPDATE;\
312 type AFMT_GENERIC4_FRAME_UPDATE;\
313 type AFMT_GENERIC5_FRAME_UPDATE;\
314 type AFMT_GENERIC6_FRAME_UPDATE;\
315 type AFMT_GENERIC7_FRAME_UPDATE;\
316 type HDMI_GENERIC0_CONT;\
317 type HDMI_GENERIC0_SEND;\
318 type HDMI_GENERIC0_LINE;\
319 type HDMI_GENERIC1_CONT;\
320 type HDMI_GENERIC1_SEND;\
321 type HDMI_GENERIC1_LINE;\
322 type HDMI_GENERIC2_CONT;\
323 type HDMI_GENERIC2_SEND;\
324 type HDMI_GENERIC2_LINE;\
325 type HDMI_GENERIC3_CONT;\
326 type HDMI_GENERIC3_SEND;\
327 type HDMI_GENERIC3_LINE;\
328 type HDMI_GENERIC4_CONT;\
329 type HDMI_GENERIC4_SEND;\
330 type HDMI_GENERIC4_LINE;\
331 type HDMI_GENERIC5_CONT;\
332 type HDMI_GENERIC5_SEND;\
333 type HDMI_GENERIC5_LINE;\
334 type HDMI_GENERIC6_CONT;\
335 type HDMI_GENERIC6_SEND;\
336 type HDMI_GENERIC6_LINE;\
337 type HDMI_GENERIC7_CONT;\
338 type HDMI_GENERIC7_SEND;\
339 type HDMI_GENERIC7_LINE;\
340 type DP_PIXEL_ENCODING;\
341 type DP_COMPONENT_DEPTH;\
342 type HDMI_PACKET_GEN_VERSION;\
343 type HDMI_KEEPOUT_MODE;\
344 type HDMI_DEEP_COLOR_ENABLE;\
345 type HDMI_CLOCK_CHANNEL_RATE;\
346 type HDMI_DEEP_COLOR_DEPTH;\
347 type HDMI_GC_CONT;\
348 type HDMI_GC_SEND;\
349 type HDMI_NULL_SEND;\
350 type HDMI_DATA_SCRAMBLE_EN;\
351 type HDMI_AUDIO_INFO_SEND;\
352 type AFMT_AUDIO_INFO_UPDATE;\
353 type HDMI_AUDIO_INFO_LINE;\
354 type HDMI_GC_AVMUTE;\
355 type DP_MSE_RATE_X;\
356 type DP_MSE_RATE_Y;\
357 type DP_MSE_RATE_UPDATE_PENDING;\
358 type DP_SEC_GSP0_ENABLE;\
359 type DP_SEC_STREAM_ENABLE;\
360 type DP_SEC_GSP1_ENABLE;\
361 type DP_SEC_GSP2_ENABLE;\
362 type DP_SEC_GSP3_ENABLE;\
363 type DP_SEC_GSP4_ENABLE;\
364 type DP_SEC_GSP5_ENABLE;\
365 type DP_SEC_GSP6_ENABLE;\
366 type DP_SEC_GSP7_ENABLE;\
367 type DP_SEC_MPG_ENABLE;\
368 type DP_VID_STREAM_DIS_DEFER;\
369 type DP_VID_STREAM_ENABLE;\
370 type DP_VID_STREAM_STATUS;\
371 type DP_STEER_FIFO_RESET;\
372 type DP_VID_M_N_GEN_EN;\
373 type DP_VID_N;\
374 type DP_VID_M;\
375 type DIG_START;\
376 type AFMT_AUDIO_SRC_SELECT;\
377 type AFMT_AUDIO_CHANNEL_ENABLE;\
378 type HDMI_AUDIO_PACKETS_PER_LINE;\
379 type HDMI_AUDIO_DELAY_EN;\
380 type AFMT_60958_CS_UPDATE;\
381 type AFMT_AUDIO_LAYOUT_OVRD;\
382 type AFMT_60958_OSF_OVRD;\
383 type HDMI_ACR_AUTO_SEND;\
384 type HDMI_ACR_SOURCE;\
385 type HDMI_ACR_AUDIO_PRIORITY;\
386 type HDMI_ACR_CTS_32;\
387 type HDMI_ACR_N_32;\
388 type HDMI_ACR_CTS_44;\
389 type HDMI_ACR_N_44;\
390 type HDMI_ACR_CTS_48;\
391 type HDMI_ACR_N_48;\
392 type AFMT_60958_CS_CHANNEL_NUMBER_L;\
393 type AFMT_60958_CS_CLOCK_ACCURACY;\
394 type AFMT_60958_CS_CHANNEL_NUMBER_R;\
395 type AFMT_60958_CS_CHANNEL_NUMBER_2;\
396 type AFMT_60958_CS_CHANNEL_NUMBER_3;\
397 type AFMT_60958_CS_CHANNEL_NUMBER_4;\
398 type AFMT_60958_CS_CHANNEL_NUMBER_5;\
399 type AFMT_60958_CS_CHANNEL_NUMBER_6;\
400 type AFMT_60958_CS_CHANNEL_NUMBER_7;\
401 type DP_SEC_AUD_N;\
402 type DP_SEC_TIMESTAMP_MODE;\
403 type DP_SEC_ASP_ENABLE;\
404 type DP_SEC_ATP_ENABLE;\
405 type DP_SEC_AIP_ENABLE;\
406 type DP_SEC_ACM_ENABLE;\
407 type AFMT_AUDIO_SAMPLE_SEND;\
408 type AFMT_AUDIO_CLOCK_EN;\
409 type TMDS_PIXEL_ENCODING;\
410 type TMDS_COLOR_FORMAT;\
411 type DIG_STEREOSYNC_SELECT;\
412 type DIG_STEREOSYNC_GATE_EN;\
413 type DP_DB_DISABLE;\
414 type DP_MSA_MISC0;\
415 type DP_MSA_HTOTAL;\
416 type DP_MSA_VTOTAL;\
417 type DP_MSA_HSTART;\
418 type DP_MSA_VSTART;\
419 type DP_MSA_HSYNCWIDTH;\
420 type DP_MSA_HSYNCPOLARITY;\
421 type DP_MSA_VSYNCWIDTH;\
422 type DP_MSA_VSYNCPOLARITY;\
423 type DP_MSA_HWIDTH;\
424 type DP_MSA_VHEIGHT;\
425 type HDMI_DB_DISABLE;\
426 type DP_VID_N_MUL;\
427 type DP_VID_M_DOUBLE_VALUE_EN
428
429struct dcn10_stream_encoder_shift {
430 SE_REG_FIELD_LIST_DCN1_0(uint8_t);
431};
432
433struct dcn10_stream_encoder_mask {
434 SE_REG_FIELD_LIST_DCN1_0(uint32_t);
435};
436
437struct dcn10_stream_encoder {
438 struct stream_encoder base;
439 const struct dcn10_stream_enc_registers *regs;
440 const struct dcn10_stream_encoder_shift *se_shift;
441 const struct dcn10_stream_encoder_mask *se_mask;
442};
443
444void dcn10_stream_encoder_construct(
445 struct dcn10_stream_encoder *enc1,
446 struct dc_context *ctx,
447 struct dc_bios *bp,
448 enum engine_id eng_id,
449 const struct dcn10_stream_enc_registers *regs,
450 const struct dcn10_stream_encoder_shift *se_shift,
451 const struct dcn10_stream_encoder_mask *se_mask);
452
453void enc1_update_generic_info_packet(
454 struct dcn10_stream_encoder *enc1,
455 uint32_t packet_index,
456 const struct dc_info_packet *info_packet);
457
458void enc1_stream_encoder_dp_set_stream_attribute(
459 struct stream_encoder *enc,
460 struct dc_crtc_timing *crtc_timing,
461 enum dc_color_space output_color_space);
462
463void enc1_stream_encoder_hdmi_set_stream_attribute(
464 struct stream_encoder *enc,
465 struct dc_crtc_timing *crtc_timing,
466 int actual_pix_clk_khz,
467 bool enable_audio);
468
469void enc1_stream_encoder_dvi_set_stream_attribute(
470 struct stream_encoder *enc,
471 struct dc_crtc_timing *crtc_timing,
472 bool is_dual_link);
473
474void enc1_stream_encoder_set_mst_bandwidth(
475 struct stream_encoder *enc,
476 struct fixed31_32 avg_time_slots_per_mtp);
477
478void enc1_stream_encoder_update_dp_info_packets(
479 struct stream_encoder *enc,
480 const struct encoder_info_frame *info_frame);
481
482void enc1_stream_encoder_stop_dp_info_packets(
483 struct stream_encoder *enc);
484
485void enc1_stream_encoder_dp_blank(
486 struct stream_encoder *enc);
487
488void enc1_stream_encoder_dp_unblank(
489 struct stream_encoder *enc,
490 const struct encoder_unblank_param *param);
491
492void enc1_setup_stereo_sync(
493 struct stream_encoder *enc,
494 int tg_inst, bool enable);
495
496void enc1_stream_encoder_set_avmute(
497 struct stream_encoder *enc,
498 bool enable);
499
500void enc1_se_audio_mute_control(
501 struct stream_encoder *enc,
502 bool mute);
503
504void enc1_se_dp_audio_setup(
505 struct stream_encoder *enc,
506 unsigned int az_inst,
507 struct audio_info *info);
508
509void enc1_se_dp_audio_enable(
510 struct stream_encoder *enc);
511
512void enc1_se_dp_audio_disable(
513 struct stream_encoder *enc);
514
515void enc1_se_hdmi_audio_setup(
516 struct stream_encoder *enc,
517 unsigned int az_inst,
518 struct audio_info *info,
519 struct audio_crtc_info *audio_crtc_info);
520
521void enc1_se_hdmi_audio_disable(
522 struct stream_encoder *enc);
523
524#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 22e7ee7dcd26..8eafe1af8a5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -341,6 +341,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
341 341
342unsigned long long dm_get_timestamp(struct dc_context *ctx); 342unsigned long long dm_get_timestamp(struct dc_context *ctx);
343 343
344unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
345 unsigned long long current_time_stamp,
346 unsigned long long last_time_stamp);
347
344/* 348/*
345 * performance tracing 349 * performance tracing
346 */ 350 */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index c109b2c34c8f..fd9d97aab071 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,75 +26,89 @@
26#include "display_mode_lib.h" 26#include "display_mode_lib.h"
27#include "dc_features.h" 27#include "dc_features.h"
28 28
29static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
30 .rob_buffer_size_kbytes = 64,
31 .det_buffer_size_kbytes = 164,
32 .dpte_buffer_size_in_pte_reqs = 42,
33 .dpp_output_buffer_pixels = 2560,
34 .opp_output_buffer_lines = 1,
35 .pixel_chunk_size_kbytes = 8,
36 .pte_enable = 1,
37 .pte_chunk_size_kbytes = 2,
38 .meta_chunk_size_kbytes = 2,
39 .writeback_chunk_size_kbytes = 2,
40 .line_buffer_size_bits = 589824,
41 .max_line_buffer_lines = 12,
42 .IsLineBufferBppFixed = 0,
43 .LineBufferFixedBpp = -1,
44 .writeback_luma_buffer_size_kbytes = 12,
45 .writeback_chroma_buffer_size_kbytes = 8,
46 .max_num_dpp = 4,
47 .max_num_wb = 2,
48 .max_dchub_pscl_bw_pix_per_clk = 4,
49 .max_pscl_lb_bw_pix_per_clk = 2,
50 .max_lb_vscl_bw_pix_per_clk = 4,
51 .max_vscl_hscl_bw_pix_per_clk = 4,
52 .max_hscl_ratio = 4,
53 .max_vscl_ratio = 4,
54 .hscl_mults = 4,
55 .vscl_mults = 4,
56 .max_hscl_taps = 8,
57 .max_vscl_taps = 8,
58 .dispclk_ramp_margin_percent = 1,
59 .underscan_factor = 1.10,
60 .min_vblank_lines = 14,
61 .dppclk_delay_subtotal = 90,
62 .dispclk_delay_subtotal = 42,
63 .dcfclk_cstate_latency = 10,
64 .max_inter_dcn_tile_repeaters = 8,
65 .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
66 .bug_forcing_LC_req_same_size_fixed = 0,
67};
68
69static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
70 .sr_exit_time_us = 9.0,
71 .sr_enter_plus_exit_time_us = 11.0,
72 .urgent_latency_us = 4.0,
73 .writeback_latency_us = 12.0,
74 .ideal_dram_bw_after_urgent_percent = 80.0,
75 .max_request_size_bytes = 256,
76 .downspread_percent = 0.5,
77 .dram_page_open_time_ns = 50.0,
78 .dram_rw_turnaround_time_ns = 17.5,
79 .dram_return_buffer_per_channel_bytes = 8192,
80 .round_trip_ping_latency_dcfclk_cycles = 128,
81 .urgent_out_of_order_return_per_channel_bytes = 256,
82 .channel_interleave_bytes = 256,
83 .num_banks = 8,
84 .num_chans = 2,
85 .vmm_page_size_bytes = 4096,
86 .dram_clock_change_latency_us = 17.0,
87 .writeback_dram_clock_change_latency_us = 23.0,
88 .return_bus_width_bytes = 64,
89};
90
29static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project) 91static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
30{ 92{
31 if (project == DML_PROJECT_RAVEN1) { 93 switch (project) {
32 soc->sr_exit_time_us = 9.0; 94 case DML_PROJECT_RAVEN1:
33 soc->sr_enter_plus_exit_time_us = 11.0; 95 *soc = dcn1_0_soc;
34 soc->urgent_latency_us = 4.0; 96 break;
35 soc->writeback_latency_us = 12.0; 97 default:
36 soc->ideal_dram_bw_after_urgent_percent = 80.0; 98 ASSERT(0);
37 soc->max_request_size_bytes = 256; 99 break;
38 soc->downspread_percent = 0.5;
39 soc->dram_page_open_time_ns = 50.0;
40 soc->dram_rw_turnaround_time_ns = 17.5;
41 soc->dram_return_buffer_per_channel_bytes = 8192;
42 soc->round_trip_ping_latency_dcfclk_cycles = 128;
43 soc->urgent_out_of_order_return_per_channel_bytes = 256;
44 soc->channel_interleave_bytes = 256;
45 soc->num_banks = 8;
46 soc->num_chans = 2;
47 soc->vmm_page_size_bytes = 4096;
48 soc->dram_clock_change_latency_us = 17.0;
49 soc->writeback_dram_clock_change_latency_us = 23.0;
50 soc->return_bus_width_bytes = 64;
51 } else {
52 BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
53 } 100 }
54} 101}
55 102
56static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project) 103static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
57{ 104{
58 if (project == DML_PROJECT_RAVEN1) { 105 switch (project) {
59 ip->rob_buffer_size_kbytes = 64; 106 case DML_PROJECT_RAVEN1:
60 ip->det_buffer_size_kbytes = 164; 107 *ip = dcn1_0_ip;
61 ip->dpte_buffer_size_in_pte_reqs = 42; 108 break;
62 ip->dpp_output_buffer_pixels = 2560; 109 default:
63 ip->opp_output_buffer_lines = 1; 110 ASSERT(0);
64 ip->pixel_chunk_size_kbytes = 8; 111 break;
65 ip->pte_enable = 1;
66 ip->pte_chunk_size_kbytes = 2;
67 ip->meta_chunk_size_kbytes = 2;
68 ip->writeback_chunk_size_kbytes = 2;
69 ip->line_buffer_size_bits = 589824;
70 ip->max_line_buffer_lines = 12;
71 ip->IsLineBufferBppFixed = 0;
72 ip->LineBufferFixedBpp = -1;
73 ip->writeback_luma_buffer_size_kbytes = 12;
74 ip->writeback_chroma_buffer_size_kbytes = 8;
75 ip->max_num_dpp = 4;
76 ip->max_num_wb = 2;
77 ip->max_dchub_pscl_bw_pix_per_clk = 4;
78 ip->max_pscl_lb_bw_pix_per_clk = 2;
79 ip->max_lb_vscl_bw_pix_per_clk = 4;
80 ip->max_vscl_hscl_bw_pix_per_clk = 4;
81 ip->max_hscl_ratio = 4;
82 ip->max_vscl_ratio = 4;
83 ip->hscl_mults = 4;
84 ip->vscl_mults = 4;
85 ip->max_hscl_taps = 8;
86 ip->max_vscl_taps = 8;
87 ip->dispclk_ramp_margin_percent = 1;
88 ip->underscan_factor = 1.10;
89 ip->min_vblank_lines = 14;
90 ip->dppclk_delay_subtotal = 90;
91 ip->dispclk_delay_subtotal = 42;
92 ip->dcfclk_cstate_latency = 10;
93 ip->max_inter_dcn_tile_repeaters = 8;
94 ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
95 ip->bug_forcing_LC_req_same_size_fixed = 0;
96 } else {
97 BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
98 } 112 }
99} 113}
100 114
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 09affa16cc43..ce750edc1e5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -215,8 +215,8 @@ struct writeback_st {
215 int wb_vtaps_luma; 215 int wb_vtaps_luma;
216 int wb_htaps_chroma; 216 int wb_htaps_chroma;
217 int wb_vtaps_chroma; 217 int wb_vtaps_chroma;
218 int wb_hratio; 218 double wb_hratio;
219 int wb_vratio; 219 double wb_vratio;
220}; 220};
221 221
222struct _vcs_dpi_display_output_params_st { 222struct _vcs_dpi_display_output_params_st {
@@ -224,6 +224,7 @@ struct _vcs_dpi_display_output_params_st {
224 int output_bpp; 224 int output_bpp;
225 int dsc_enable; 225 int dsc_enable;
226 int wb_enable; 226 int wb_enable;
227 int num_active_wb;
227 int opp_input_bpc; 228 int opp_input_bpc;
228 int output_type; 229 int output_type;
229 int output_format; 230 int output_format;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 87b580fa4bc9..61fe484da1a0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -75,6 +75,9 @@ bool dal_hw_factory_init(
75 return true; 75 return true;
76 case DCE_VERSION_11_0: 76 case DCE_VERSION_11_0:
77 case DCE_VERSION_11_2: 77 case DCE_VERSION_11_2:
78#if defined(CONFIG_DRM_AMD_DC_VEGAM)
79 case DCE_VERSION_11_22:
80#endif
78 dal_hw_factory_dce110_init(factory); 81 dal_hw_factory_dce110_init(factory);
79 return true; 82 return true;
80 case DCE_VERSION_12_0: 83 case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 0ae8ace25739..910ae2b7bf64 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -72,6 +72,9 @@ bool dal_hw_translate_init(
72 case DCE_VERSION_10_0: 72 case DCE_VERSION_10_0:
73 case DCE_VERSION_11_0: 73 case DCE_VERSION_11_0:
74 case DCE_VERSION_11_2: 74 case DCE_VERSION_11_2:
75#if defined(CONFIG_DRM_AMD_DC_VEGAM)
76 case DCE_VERSION_11_22:
77#endif
75 dal_hw_translate_dce110_init(translate); 78 dal_hw_translate_dce110_init(translate);
76 return true; 79 return true;
77 case DCE_VERSION_12_0: 80 case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index abd0095ced30..b7256f595052 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -527,7 +527,7 @@ static void construct(
527 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); 527 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
528 528
529 if (xtal_ref_div == 0) { 529 if (xtal_ref_div == 0) {
530 DC_LOG_WARNING("Invalid base timer divider\n", 530 DC_LOG_WARNING("Invalid base timer divider [%s]\n",
531 __func__); 531 __func__);
532 xtal_ref_div = 2; 532 xtal_ref_div = 2;
533 } 533 }
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 5cbf6626b8d4..c3d7c320fdba 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -83,6 +83,9 @@ struct i2caux *dal_i2caux_create(
83 case DCE_VERSION_8_3: 83 case DCE_VERSION_8_3:
84 return dal_i2caux_dce80_create(ctx); 84 return dal_i2caux_dce80_create(ctx);
85 case DCE_VERSION_11_2: 85 case DCE_VERSION_11_2:
86#if defined(CONFIG_DRM_AMD_DC_VEGAM)
87 case DCE_VERSION_11_22:
88#endif
86 return dal_i2caux_dce112_create(ctx); 89 return dal_i2caux_dce112_create(ctx);
87 case DCE_VERSION_11_0: 90 case DCE_VERSION_11_0:
88 return dal_i2caux_dce110_create(ctx); 91 return dal_i2caux_dce110_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8c51ad70cace..a94942d4e66b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,11 +95,6 @@ struct resource_funcs {
95 struct link_encoder *(*link_enc_create)( 95 struct link_encoder *(*link_enc_create)(
96 const struct encoder_init_data *init); 96 const struct encoder_init_data *init);
97 97
98 enum dc_status (*validate_guaranteed)(
99 struct dc *dc,
100 struct dc_stream_state *stream,
101 struct dc_state *context);
102
103 bool (*validate_bandwidth)( 98 bool (*validate_bandwidth)(
104 struct dc *dc, 99 struct dc *dc,
105 struct dc_state *context); 100 struct dc_state *context);
@@ -250,6 +245,7 @@ struct dce_bw_output {
250 bool all_displays_in_sync; 245 bool all_displays_in_sync;
251 struct dce_watermarks urgent_wm_ns[MAX_PIPES]; 246 struct dce_watermarks urgent_wm_ns[MAX_PIPES];
252 struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES]; 247 struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
248 struct dce_watermarks stutter_entry_wm_ns[MAX_PIPES];
253 struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES]; 249 struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
254 int sclk_khz; 250 int sclk_khz;
255 int sclk_deep_sleep_khz; 251 int sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 090b7a8dd67b..30b3a08b91be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
102 uint8_t *read_buf, 102 uint8_t *read_buf,
103 uint32_t read_size); 103 uint32_t read_size);
104 104
105ssize_t dal_ddc_service_read_dpcd_data( 105enum ddc_result dal_ddc_service_read_dpcd_data(
106 struct ddc_service *ddc, 106 struct ddc_service *ddc,
107 bool i2c, 107 bool i2c,
108 enum i2c_mot_mode mot, 108 enum i2c_mot_mode mot,
109 uint32_t address, 109 uint32_t address,
110 uint8_t *data, 110 uint8_t *data,
111 uint32_t len); 111 uint32_t len,
112 uint32_t *read);
112 113
113enum ddc_result dal_ddc_service_write_dpcd_data( 114enum ddc_result dal_ddc_service_write_dpcd_data(
114 struct ddc_service *ddc, 115 struct ddc_service *ddc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index a9bfe9ff8ce6..933ea7a1e18b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -42,6 +42,10 @@ enum bw_calcs_version {
42 BW_CALCS_VERSION_CARRIZO, 42 BW_CALCS_VERSION_CARRIZO,
43 BW_CALCS_VERSION_POLARIS10, 43 BW_CALCS_VERSION_POLARIS10,
44 BW_CALCS_VERSION_POLARIS11, 44 BW_CALCS_VERSION_POLARIS11,
45 BW_CALCS_VERSION_POLARIS12,
46#if defined(CONFIG_DRM_AMD_DC_VEGAM)
47 BW_CALCS_VERSION_VEGAM,
48#endif
45 BW_CALCS_VERSION_STONEY, 49 BW_CALCS_VERSION_STONEY,
46 BW_CALCS_VERSION_VEGA10 50 BW_CALCS_VERSION_VEGA10
47}; 51};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
new file mode 100644
index 000000000000..02f757dd70d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_DCHUBBUB_H__
27#define __DAL_DCHUBBUB_H__
28
29
30enum dcc_control {
31 dcc_control__256_256_xxx,
32 dcc_control__128_128_xxx,
33 dcc_control__256_64_64,
34};
35
36enum segment_order {
37 segment_order__na,
38 segment_order__contiguous,
39 segment_order__non_contiguous,
40};
41
42
43struct hubbub_funcs {
44 void (*update_dchub)(
45 struct hubbub *hubbub,
46 struct dchub_init_data *dh_data);
47
48 bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
49 const struct dc_dcc_surface_param *input,
50 struct dc_surface_dcc_cap *output);
51
52 bool (*dcc_support_swizzle)(
53 enum swizzle_mode_values swizzle,
54 unsigned int bytes_per_element,
55 enum segment_order *segment_order_horz,
56 enum segment_order *segment_order_vert);
57
58 bool (*dcc_support_pixel_format)(
59 enum surface_pixel_format format,
60 unsigned int *bytes_per_element);
61};
62
63
64#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 99995608b620..582458f028f8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -44,7 +44,23 @@ struct dpp_grph_csc_adjustment {
44 enum graphics_gamut_adjust_type gamut_adjust_type; 44 enum graphics_gamut_adjust_type gamut_adjust_type;
45}; 45};
46 46
47struct dcn_dpp_state {
48 uint32_t igam_lut_mode;
49 uint32_t igam_input_format;
50 uint32_t dgam_lut_mode;
51 uint32_t rgam_lut_mode;
52 uint32_t gamut_remap_mode;
53 uint32_t gamut_remap_c11_c12;
54 uint32_t gamut_remap_c13_c14;
55 uint32_t gamut_remap_c21_c22;
56 uint32_t gamut_remap_c23_c24;
57 uint32_t gamut_remap_c31_c32;
58 uint32_t gamut_remap_c33_c34;
59};
60
47struct dpp_funcs { 61struct dpp_funcs {
62 void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
63
48 void (*dpp_reset)(struct dpp *dpp); 64 void (*dpp_reset)(struct dpp *dpp);
49 65
50 void (*dpp_set_scaler)(struct dpp *dpp, 66 void (*dpp_set_scaler)(struct dpp *dpp,
@@ -117,7 +133,7 @@ struct dpp_funcs {
117 struct dpp *dpp_base, 133 struct dpp *dpp_base,
118 enum surface_pixel_format format, 134 enum surface_pixel_format format,
119 enum expansion_mode mode, 135 enum expansion_mode mode,
120 struct csc_transform input_csc_color_matrix, 136 struct dc_csc_transform input_csc_color_matrix,
121 enum dc_color_space input_color_space); 137 enum dc_color_space input_color_space);
122 138
123 void (*dpp_full_bypass)(struct dpp *dpp_base); 139 void (*dpp_full_bypass)(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 9ced254e652c..331f8ff57ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -56,7 +56,6 @@ struct hubp {
56 bool power_gated; 56 bool power_gated;
57}; 57};
58 58
59
60struct hubp_funcs { 59struct hubp_funcs {
61 void (*hubp_setup)( 60 void (*hubp_setup)(
62 struct hubp *hubp, 61 struct hubp *hubp,
@@ -121,6 +120,7 @@ struct hubp_funcs {
121 120
122 void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); 121 void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
123 void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); 122 void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
123 void (*hubp_read_state)(struct hubp *hubp);
124 124
125}; 125};
126 126
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b22158190262..cf7433ebf91a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -140,11 +140,6 @@ enum opp_regamma {
140 OPP_REGAMMA_USER 140 OPP_REGAMMA_USER
141}; 141};
142 142
143struct csc_transform {
144 uint16_t matrix[12];
145 bool enable_adjustment;
146};
147
148struct dc_bias_and_scale { 143struct dc_bias_and_scale {
149 uint16_t scale_red; 144 uint16_t scale_red;
150 uint16_t bias_red; 145 uint16_t bias_red;
@@ -191,4 +186,9 @@ enum controller_dp_test_pattern {
191 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA 186 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
192}; 187};
193 188
189enum dc_lut_mode {
190 LUT_BYPASS,
191 LUT_RAM_A,
192 LUT_RAM_B
193};
194#endif /* __DAL_HW_SHARED_H__ */ 194#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
index 2109eac20a3d..b2fa4c4cd920 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -87,7 +87,7 @@ struct ipp_funcs {
87 struct input_pixel_processor *ipp, 87 struct input_pixel_processor *ipp,
88 enum surface_pixel_format format, 88 enum surface_pixel_format format,
89 enum expansion_mode mode, 89 enum expansion_mode mode,
90 struct csc_transform input_csc_color_matrix, 90 struct dc_csc_transform input_csc_color_matrix,
91 enum dc_color_space input_color_space); 91 enum dc_color_space input_color_space);
92 92
93 /* DCE function to setup IPP. TODO: see if we can consolidate to setup */ 93 /* DCE function to setup IPP. TODO: see if we can consolidate to setup */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 54d8a1386142..cf6df2e7beb2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -149,6 +149,7 @@ struct link_encoder_funcs {
149 bool connect); 149 bool connect);
150 void (*enable_hpd)(struct link_encoder *enc); 150 void (*enable_hpd)(struct link_encoder *enc);
151 void (*disable_hpd)(struct link_encoder *enc); 151 void (*disable_hpd)(struct link_encoder *enc);
152 bool (*is_dig_enabled)(struct link_encoder *enc);
152 void (*destroy)(struct link_encoder **enc); 153 void (*destroy)(struct link_encoder **enc);
153}; 154};
154 155
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 3e1e7e6a8792..47f1dc5a43b7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -104,6 +104,7 @@ struct mem_input_funcs {
104 struct mem_input *mem_input, 104 struct mem_input *mem_input,
105 struct dce_watermarks nbp, 105 struct dce_watermarks nbp,
106 struct dce_watermarks stutter, 106 struct dce_watermarks stutter,
107 struct dce_watermarks stutter_enter,
107 struct dce_watermarks urgent, 108 struct dce_watermarks urgent,
108 uint32_t total_dest_line_time_ns); 109 uint32_t total_dest_line_time_ns);
109 110
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 23a8d5e53a89..caf74e3c836f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -105,7 +105,24 @@ struct mpc {
105 struct mpcc mpcc_array[MAX_MPCC]; 105 struct mpcc mpcc_array[MAX_MPCC];
106}; 106};
107 107
108struct mpcc_state {
109 uint32_t opp_id;
110 uint32_t dpp_id;
111 uint32_t bot_mpcc_id;
112 uint32_t mode;
113 uint32_t alpha_mode;
114 uint32_t pre_multiplied_alpha;
115 uint32_t overlap_only;
116 uint32_t idle;
117 uint32_t busy;
118};
119
108struct mpc_funcs { 120struct mpc_funcs {
121 void (*read_mpcc_state)(
122 struct mpc *mpc,
123 int mpcc_inst,
124 struct mpcc_state *s);
125
109 /* 126 /*
110 * Insert DPP into MPC tree based on specified blending position. 127 * Insert DPP into MPC tree based on specified blending position.
111 * Only used for planes that are part of blending chain for OPP output 128 * Only used for planes that are part of blending chain for OPP output
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index b5db1692393c..cfa7ec9517ae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -29,31 +29,40 @@
29#define STREAM_ENCODER_H_ 29#define STREAM_ENCODER_H_
30 30
31#include "audio_types.h" 31#include "audio_types.h"
32#include "hw_shared.h"
32 33
33struct dc_bios; 34struct dc_bios;
34struct dc_context; 35struct dc_context;
35struct dc_crtc_timing; 36struct dc_crtc_timing;
36 37
37struct encoder_info_packet { 38enum dp_pixel_encoding_type {
38 bool valid; 39 DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000,
39 uint8_t hb0; 40 DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001,
40 uint8_t hb1; 41 DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002,
41 uint8_t hb2; 42 DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003,
42 uint8_t hb3; 43 DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004,
43 uint8_t sb[32]; 44 DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005
45};
46
47enum dp_component_depth {
48 DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000,
49 DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001,
50 DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002,
51 DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003,
52 DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
44}; 53};
45 54
46struct encoder_info_frame { 55struct encoder_info_frame {
47 /* auxiliary video information */ 56 /* auxiliary video information */
48 struct encoder_info_packet avi; 57 struct dc_info_packet avi;
49 struct encoder_info_packet gamut; 58 struct dc_info_packet gamut;
50 struct encoder_info_packet vendor; 59 struct dc_info_packet vendor;
51 /* source product description */ 60 /* source product description */
52 struct encoder_info_packet spd; 61 struct dc_info_packet spd;
53 /* video stream configuration */ 62 /* video stream configuration */
54 struct encoder_info_packet vsc; 63 struct dc_info_packet vsc;
55 /* HDR Static MetaData */ 64 /* HDR Static MetaData */
56 struct encoder_info_packet hdrsmd; 65 struct dc_info_packet hdrsmd;
57}; 66};
58 67
59struct encoder_unblank_param { 68struct encoder_unblank_param {
@@ -147,6 +156,7 @@ struct stream_encoder_funcs {
147 156
148 void (*set_avmute)( 157 void (*set_avmute)(
149 struct stream_encoder *enc, bool enable); 158 struct stream_encoder *enc, bool enable);
159
150}; 160};
151 161
152#endif /* STREAM_ENCODER_H_ */ 162#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 3217b5bf6c7a..69cb0a105300 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -140,6 +140,9 @@ struct timing_generator_funcs {
140 void (*program_timing)(struct timing_generator *tg, 140 void (*program_timing)(struct timing_generator *tg,
141 const struct dc_crtc_timing *timing, 141 const struct dc_crtc_timing *timing,
142 bool use_vbios); 142 bool use_vbios);
143 void (*program_vline_interrupt)(struct timing_generator *optc,
144 const struct dc_crtc_timing *dc_crtc_timing,
145 unsigned long long vsync_delta);
143 bool (*enable_crtc)(struct timing_generator *tg); 146 bool (*enable_crtc)(struct timing_generator *tg);
144 bool (*disable_crtc)(struct timing_generator *tg); 147 bool (*disable_crtc)(struct timing_generator *tg);
145 bool (*is_counter_moving)(struct timing_generator *tg); 148 bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index c5b3623bcbd9..fecc80c47c26 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -252,7 +252,7 @@ struct transform_funcs {
252 struct transform *xfm_base, 252 struct transform *xfm_base,
253 enum surface_pixel_format format, 253 enum surface_pixel_format format,
254 enum expansion_mode mode, 254 enum expansion_mode mode,
255 struct csc_transform input_csc_color_matrix, 255 struct dc_csc_transform input_csc_color_matrix,
256 enum dc_color_space input_color_space); 256 enum dc_color_space input_color_space);
257 257
258 void (*ipp_full_bypass)(struct transform *xfm_base); 258 void (*ipp_full_bypass)(struct transform *xfm_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e764cbad881b..29abf3ecb39c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,6 +32,8 @@
32#include "inc/hw/link_encoder.h" 32#include "inc/hw/link_encoder.h"
33#include "core_status.h" 33#include "core_status.h"
34 34
35#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
36
35enum pipe_gating_control { 37enum pipe_gating_control {
36 PIPE_GATING_CONTROL_DISABLE = 0, 38 PIPE_GATING_CONTROL_DISABLE = 0,
37 PIPE_GATING_CONTROL_ENABLE, 39 PIPE_GATING_CONTROL_ENABLE,
@@ -63,6 +65,7 @@ struct dchub_init_data;
63struct dc_static_screen_events; 65struct dc_static_screen_events;
64struct resource_pool; 66struct resource_pool;
65struct resource_context; 67struct resource_context;
68struct stream_resource;
66 69
67struct hw_sequencer_funcs { 70struct hw_sequencer_funcs {
68 71
@@ -93,6 +96,12 @@ struct hw_sequencer_funcs {
93 enum dc_color_space colorspace, 96 enum dc_color_space colorspace,
94 uint16_t *matrix); 97 uint16_t *matrix);
95 98
99 void (*program_output_csc)(struct dc *dc,
100 struct pipe_ctx *pipe_ctx,
101 enum dc_color_space colorspace,
102 uint16_t *matrix,
103 int opp_id);
104
96 void (*update_plane_addr)( 105 void (*update_plane_addr)(
97 const struct dc *dc, 106 const struct dc *dc,
98 struct pipe_ctx *pipe_ctx); 107 struct pipe_ctx *pipe_ctx);
@@ -154,6 +163,11 @@ struct hw_sequencer_funcs {
154 struct dc *dc, 163 struct dc *dc,
155 struct pipe_ctx *pipe, 164 struct pipe_ctx *pipe,
156 bool lock); 165 bool lock);
166 void (*blank_pixel_data)(
167 struct dc *dc,
168 struct stream_resource *stream_res,
169 struct dc_stream_state *stream,
170 bool blank);
157 171
158 void (*set_bandwidth)( 172 void (*set_bandwidth)(
159 struct dc *dc, 173 struct dc *dc,
@@ -169,7 +183,7 @@ struct hw_sequencer_funcs {
169 void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, 183 void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
170 int num_pipes, const struct dc_static_screen_events *events); 184 int num_pipes, const struct dc_static_screen_events *events);
171 185
172 enum dc_status (*prog_pixclk_crtc_otg)( 186 enum dc_status (*enable_stream_timing)(
173 struct pipe_ctx *pipe_ctx, 187 struct pipe_ctx *pipe_ctx,
174 struct dc_state *context, 188 struct dc_state *context,
175 struct dc *dc); 189 struct dc *dc);
@@ -201,6 +215,7 @@ struct hw_sequencer_funcs {
201 215
202 void (*set_cursor_position)(struct pipe_ctx *pipe); 216 void (*set_cursor_position)(struct pipe_ctx *pipe);
203 void (*set_cursor_attribute)(struct pipe_ctx *pipe); 217 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
218
204}; 219};
205 220
206void color_space_to_black_color( 221void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 77eb72874e90..3306e7b0b3e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -183,6 +183,36 @@
183 FN(reg_name, f4), v4, \ 183 FN(reg_name, f4), v4, \
184 FN(reg_name, f5), v5) 184 FN(reg_name, f5), v5)
185 185
186#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
187 generic_reg_get6(CTX, REG(reg_name), \
188 FN(reg_name, f1), v1, \
189 FN(reg_name, f2), v2, \
190 FN(reg_name, f3), v3, \
191 FN(reg_name, f4), v4, \
192 FN(reg_name, f5), v5, \
193 FN(reg_name, f6), v6)
194
195#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
196 generic_reg_get7(CTX, REG(reg_name), \
197 FN(reg_name, f1), v1, \
198 FN(reg_name, f2), v2, \
199 FN(reg_name, f3), v3, \
200 FN(reg_name, f4), v4, \
201 FN(reg_name, f5), v5, \
202 FN(reg_name, f6), v6, \
203 FN(reg_name, f7), v7)
204
205#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
206 generic_reg_get8(CTX, REG(reg_name), \
207 FN(reg_name, f1), v1, \
208 FN(reg_name, f2), v2, \
209 FN(reg_name, f3), v3, \
210 FN(reg_name, f4), v4, \
211 FN(reg_name, f5), v5, \
212 FN(reg_name, f6), v6, \
213 FN(reg_name, f7), v7, \
214 FN(reg_name, f8), v8)
215
186/* macro to poll and wait for a register field to read back given value */ 216/* macro to poll and wait for a register field to read back given value */
187 217
188#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \ 218#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
@@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
389 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 419 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
390 uint8_t shift5, uint32_t mask5, uint32_t *field_value5); 420 uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
391 421
422uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
423 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
424 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
425 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
426 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
427 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
428 uint8_t shift6, uint32_t mask6, uint32_t *field_value6);
429
430uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
431 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
432 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
433 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
434 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
435 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
436 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
437 uint8_t shift7, uint32_t mask7, uint32_t *field_value7);
438
439uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
440 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
441 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
442 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
443 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
444 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
445 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
446 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
447 uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
392#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ 448#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5467332faf7b..640a647f4611 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -139,10 +139,6 @@ bool resource_validate_attach_surfaces(
139 struct dc_state *context, 139 struct dc_state *context,
140 const struct resource_pool *pool); 140 const struct resource_pool *pool);
141 141
142void validate_guaranteed_copy_streams(
143 struct dc_state *context,
144 int max_streams);
145
146void resource_validate_ctx_update_pointer_after_copy( 142void resource_validate_ctx_update_pointer_after_copy(
147 const struct dc_state *src_ctx, 143 const struct dc_state *src_ctx,
148 struct dc_state *dst_ctx); 144 struct dc_state *dst_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index a506c2e939f5..cc3b1bc6cedd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -135,6 +135,13 @@ enum dc_irq_source {
135 DC_IRQ_SOURCE_VBLANK5, 135 DC_IRQ_SOURCE_VBLANK5,
136 DC_IRQ_SOURCE_VBLANK6, 136 DC_IRQ_SOURCE_VBLANK6,
137 137
138 DC_IRQ_SOURCE_DC1_VLINE0,
139 DC_IRQ_SOURCE_DC2_VLINE0,
140 DC_IRQ_SOURCE_DC3_VLINE0,
141 DC_IRQ_SOURCE_DC4_VLINE0,
142 DC_IRQ_SOURCE_DC5_VLINE0,
143 DC_IRQ_SOURCE_DC6_VLINE0,
144
138 DAL_IRQ_SOURCES_NUMBER 145 DAL_IRQ_SOURCES_NUMBER
139}; 146};
140 147
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9831cb5eaa7c..1b987b6a347d 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -98,7 +98,14 @@
98 (eChipRev < VI_POLARIS11_M_A0)) 98 (eChipRev < VI_POLARIS11_M_A0))
99#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \ 99#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
100 (eChipRev < VI_POLARIS12_V_A0)) 100 (eChipRev < VI_POLARIS12_V_A0))
101#if defined(CONFIG_DRM_AMD_DC_VEGAM)
102#define VI_VEGAM_A0 110
103#define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
104 (eChipRev < VI_VEGAM_A0))
105#define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
106#else
101#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0) 107#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
108#endif
102 109
103/* DCE11 */ 110/* DCE11 */
104#define CZ_CARRIZO_A0 0x01 111#define CZ_CARRIZO_A0 0x01
@@ -113,9 +120,14 @@
113 120
114#define AI_GREENLAND_P_A0 1 121#define AI_GREENLAND_P_A0 1
115#define AI_GREENLAND_P_A1 2 122#define AI_GREENLAND_P_A1 2
123#define AI_UNKNOWN 0xFF
116 124
117#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN) 125#define AI_VEGA12_P_A0 20
118#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN) 126#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0)
127#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0)
128
129#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
130#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
119 131
120/* DCN1_0 */ 132/* DCN1_0 */
121#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ 133#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fa543965feb5..5b1f8cef0c22 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -40,6 +40,9 @@ enum dce_version {
40 DCE_VERSION_10_0, 40 DCE_VERSION_10_0,
41 DCE_VERSION_11_0, 41 DCE_VERSION_11_0,
42 DCE_VERSION_11_2, 42 DCE_VERSION_11_2,
43#if defined(CONFIG_DRM_AMD_DC_VEGAM)
44 DCE_VERSION_11_22,
45#endif
43 DCE_VERSION_12_0, 46 DCE_VERSION_12_0,
44 DCE_VERSION_MAX, 47 DCE_VERSION_MAX,
45 DCN_VERSION_1_0, 48 DCN_VERSION_1_0,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 0de258622c12..16cbdb43d856 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,8 +26,6 @@
26#ifndef __DAL_FIXED31_32_H__ 26#ifndef __DAL_FIXED31_32_H__
27#define __DAL_FIXED31_32_H__ 27#define __DAL_FIXED31_32_H__
28 28
29#include "os_types.h"
30
31#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32 29#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
32 30
33/* 31/*
@@ -44,7 +42,7 @@
44 */ 42 */
45 43
46struct fixed31_32 { 44struct fixed31_32 {
47 int64_t value; 45 long long value;
48}; 46};
49 47
50/* 48/*
@@ -73,15 +71,15 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
73 * result = numerator / denominator 71 * result = numerator / denominator
74 */ 72 */
75struct fixed31_32 dal_fixed31_32_from_fraction( 73struct fixed31_32 dal_fixed31_32_from_fraction(
76 int64_t numerator, 74 long long numerator,
77 int64_t denominator); 75 long long denominator);
78 76
79/* 77/*
80 * @brief 78 * @brief
81 * result = arg 79 * result = arg
82 */ 80 */
83struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg); 81struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg);
84static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg) 82static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
85{ 83{
86 if (__builtin_constant_p(arg)) { 84 if (__builtin_constant_p(arg)) {
87 struct fixed31_32 res; 85 struct fixed31_32 res;
@@ -213,7 +211,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp(
213 */ 211 */
214struct fixed31_32 dal_fixed31_32_shl( 212struct fixed31_32 dal_fixed31_32_shl(
215 struct fixed31_32 arg, 213 struct fixed31_32 arg,
216 uint8_t shift); 214 unsigned char shift);
217 215
218/* 216/*
219 * @brief 217 * @brief
@@ -221,7 +219,7 @@ struct fixed31_32 dal_fixed31_32_shl(
221 */ 219 */
222static inline struct fixed31_32 dal_fixed31_32_shr( 220static inline struct fixed31_32 dal_fixed31_32_shr(
223 struct fixed31_32 arg, 221 struct fixed31_32 arg,
224 uint8_t shift) 222 unsigned char shift)
225{ 223{
226 struct fixed31_32 res; 224 struct fixed31_32 res;
227 res.value = arg.value >> shift; 225 res.value = arg.value >> shift;
@@ -246,7 +244,7 @@ struct fixed31_32 dal_fixed31_32_add(
246 * result = arg1 + arg2 244 * result = arg1 + arg2
247 */ 245 */
248static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1, 246static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
249 int32_t arg2) 247 int arg2)
250{ 248{
251 return dal_fixed31_32_add(arg1, 249 return dal_fixed31_32_add(arg1,
252 dal_fixed31_32_from_int(arg2)); 250 dal_fixed31_32_from_int(arg2));
@@ -265,7 +263,7 @@ struct fixed31_32 dal_fixed31_32_sub(
265 * result = arg1 - arg2 263 * result = arg1 - arg2
266 */ 264 */
267static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1, 265static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
268 int32_t arg2) 266 int arg2)
269{ 267{
270 return dal_fixed31_32_sub(arg1, 268 return dal_fixed31_32_sub(arg1,
271 dal_fixed31_32_from_int(arg2)); 269 dal_fixed31_32_from_int(arg2));
@@ -291,7 +289,7 @@ struct fixed31_32 dal_fixed31_32_mul(
291 * result = arg1 * arg2 289 * result = arg1 * arg2
292 */ 290 */
293static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1, 291static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
294 int32_t arg2) 292 int arg2)
295{ 293{
296 return dal_fixed31_32_mul(arg1, 294 return dal_fixed31_32_mul(arg1,
297 dal_fixed31_32_from_int(arg2)); 295 dal_fixed31_32_from_int(arg2));
@@ -309,7 +307,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
309 * result = arg1 / arg2 307 * result = arg1 / arg2
310 */ 308 */
311static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1, 309static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
312 int64_t arg2) 310 long long arg2)
313{ 311{
314 return dal_fixed31_32_from_fraction(arg1.value, 312 return dal_fixed31_32_from_fraction(arg1.value,
315 dal_fixed31_32_from_int(arg2).value); 313 dal_fixed31_32_from_int(arg2).value);
@@ -434,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow(
434 * @brief 432 * @brief
435 * result = floor(arg) := greatest integer lower than or equal to arg 433 * result = floor(arg) := greatest integer lower than or equal to arg
436 */ 434 */
437int32_t dal_fixed31_32_floor( 435int dal_fixed31_32_floor(
438 struct fixed31_32 arg); 436 struct fixed31_32 arg);
439 437
440/* 438/*
441 * @brief 439 * @brief
442 * result = round(arg) := integer nearest to arg 440 * result = round(arg) := integer nearest to arg
443 */ 441 */
444int32_t dal_fixed31_32_round( 442int dal_fixed31_32_round(
445 struct fixed31_32 arg); 443 struct fixed31_32 arg);
446 444
447/* 445/*
448 * @brief 446 * @brief
449 * result = ceil(arg) := lowest integer greater than or equal to arg 447 * result = ceil(arg) := lowest integer greater than or equal to arg
450 */ 448 */
451int32_t dal_fixed31_32_ceil( 449int dal_fixed31_32_ceil(
452 struct fixed31_32 arg); 450 struct fixed31_32 arg);
453 451
454/* the following two function are used in scaler hw programming to convert fixed 452/* the following two function are used in scaler hw programming to convert fixed
@@ -457,20 +455,20 @@ int32_t dal_fixed31_32_ceil(
457 * fractional 455 * fractional
458 */ 456 */
459 457
460uint32_t dal_fixed31_32_u2d19( 458unsigned int dal_fixed31_32_u2d19(
461 struct fixed31_32 arg); 459 struct fixed31_32 arg);
462 460
463uint32_t dal_fixed31_32_u0d19( 461unsigned int dal_fixed31_32_u0d19(
464 struct fixed31_32 arg); 462 struct fixed31_32 arg);
465 463
466 464
467uint32_t dal_fixed31_32_clamp_u0d14( 465unsigned int dal_fixed31_32_clamp_u0d14(
468 struct fixed31_32 arg); 466 struct fixed31_32 arg);
469 467
470uint32_t dal_fixed31_32_clamp_u0d10( 468unsigned int dal_fixed31_32_clamp_u0d10(
471 struct fixed31_32 arg); 469 struct fixed31_32 arg);
472 470
473int32_t dal_fixed31_32_s4d19( 471int dal_fixed31_32_s4d19(
474 struct fixed31_32 arg); 472 struct fixed31_32 arg);
475 473
476#endif 474#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 427796bdc14a..b608a0830801 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -29,39 +29,39 @@
29#include "os_types.h" 29#include "os_types.h"
30 30
31#define MAX_NAME_LEN 32 31#define MAX_NAME_LEN 32
32#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__)
33#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__)
34#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__)
35#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__)
36#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__)
37#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__)
38#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__)
39#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__)
40#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__)
41#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__)
42#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__)
43#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__)
44#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__)
45#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__)
46#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__)
47#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__)
48#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__)
49#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__)
50#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__)
51#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__)
52#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__)
53#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__)
54#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__)
55#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__)
56#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__)
57#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__)
58#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__)
59#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__)
60#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__)
61#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__)
62#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__)
63#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__)
64 32
33#define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__)
34#define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__)
35#define DC_LOG_DEBUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
36#define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
37#define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
38#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
39#define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
40#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
41#define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
42#define DC_LOG_HW_RESUME_S3(...) DRM_DEBUG_KMS(__VA_ARGS__)
43#define DC_LOG_HW_AUDIO(...) pr_debug("[HW_AUDIO]:"__VA_ARGS__)
44#define DC_LOG_HW_HPD_IRQ(...) DRM_DEBUG_KMS(__VA_ARGS__)
45#define DC_LOG_MST(...) DRM_DEBUG_KMS(__VA_ARGS__)
46#define DC_LOG_SCALER(...) pr_debug("[SCALER]:"__VA_ARGS__)
47#define DC_LOG_BIOS(...) pr_debug("[BIOS]:"__VA_ARGS__)
48#define DC_LOG_BANDWIDTH_CALCS(...) pr_debug("[BANDWIDTH_CALCS]:"__VA_ARGS__)
49#define DC_LOG_BANDWIDTH_VALIDATION(...) DRM_DEBUG_KMS(__VA_ARGS__)
50#define DC_LOG_I2C_AUX(...) DRM_DEBUG_KMS(__VA_ARGS__)
51#define DC_LOG_SYNC(...) DRM_DEBUG_KMS(__VA_ARGS__)
52#define DC_LOG_BACKLIGHT(...) DRM_DEBUG_KMS(__VA_ARGS__)
53#define DC_LOG_FEATURE_OVERRIDE(...) DRM_DEBUG_KMS(__VA_ARGS__)
54#define DC_LOG_DETECTION_EDID_PARSER(...) DRM_DEBUG_KMS(__VA_ARGS__)
55#define DC_LOG_DETECTION_DP_CAPS(...) DRM_DEBUG_KMS(__VA_ARGS__)
56#define DC_LOG_RESOURCE(...) DRM_DEBUG_KMS(__VA_ARGS__)
57#define DC_LOG_DML(...) pr_debug("[DML]:"__VA_ARGS__)
58#define DC_LOG_EVENT_MODE_SET(...) DRM_DEBUG_KMS(__VA_ARGS__)
59#define DC_LOG_EVENT_DETECTION(...) DRM_DEBUG_KMS(__VA_ARGS__)
60#define DC_LOG_EVENT_LINK_TRAINING(...) DRM_DEBUG_KMS(__VA_ARGS__)
61#define DC_LOG_EVENT_LINK_LOSS(...) DRM_DEBUG_KMS(__VA_ARGS__)
62#define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__)
63#define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__)
64#define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__)
65 65
66struct dal_logger; 66struct dal_logger;
67 67
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e7e374f56864..15e5b72e6e00 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -185,14 +185,14 @@ struct dividers {
185 185
186static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4) 186static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
187{ 187{
188 static const int32_t numerator01[] = { 31308, 180000}; 188 static const int32_t numerator01[] = { 31308, 180000};
189 static const int32_t numerator02[] = { 12920, 4500}; 189 static const int32_t numerator02[] = { 12920, 4500};
190 static const int32_t numerator03[] = { 55, 99}; 190 static const int32_t numerator03[] = { 55, 99};
191 static const int32_t numerator04[] = { 55, 99}; 191 static const int32_t numerator04[] = { 55, 99};
192 static const int32_t numerator05[] = { 2400, 2200}; 192 static const int32_t numerator05[] = { 2400, 2200};
193 193
194 uint32_t i = 0; 194 uint32_t i = 0;
195 uint32_t index = is_2_4 == true ? 0:1; 195 uint32_t index = is_2_4 == true ? 0:1;
196 196
197 do { 197 do {
198 coefficients->a0[i] = dal_fixed31_32_from_fraction( 198 coefficients->a0[i] = dal_fixed31_32_from_fraction(
@@ -691,7 +691,7 @@ static void build_degamma(struct pwl_float_data_ex *curve,
691 } 691 }
692} 692}
693 693
694static bool scale_gamma(struct pwl_float_data *pwl_rgb, 694static void scale_gamma(struct pwl_float_data *pwl_rgb,
695 const struct dc_gamma *ramp, 695 const struct dc_gamma *ramp,
696 struct dividers dividers) 696 struct dividers dividers)
697{ 697{
@@ -752,11 +752,9 @@ static bool scale_gamma(struct pwl_float_data *pwl_rgb,
752 dividers.divider3); 752 dividers.divider3);
753 rgb->b = dal_fixed31_32_mul(rgb_last->b, 753 rgb->b = dal_fixed31_32_mul(rgb_last->b,
754 dividers.divider3); 754 dividers.divider3);
755
756 return true;
757} 755}
758 756
759static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb, 757static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
760 const struct dc_gamma *ramp, 758 const struct dc_gamma *ramp,
761 struct dividers dividers) 759 struct dividers dividers)
762{ 760{
@@ -818,8 +816,71 @@ static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
818 pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g); 816 pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
819 pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int( 817 pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
820 pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); 818 pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
819}
821 820
822 return true; 821/* todo: all these scale_gamma functions are inherently the same but
822 * take different structures as params or different format for ramp
823 * values. We could probably implement it in a more generic fashion
824 */
825static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
826 const struct regamma_ramp *ramp,
827 struct dividers dividers)
828{
829 unsigned short max_driver = 0xFFFF;
830 unsigned short max_os = 0xFF00;
831 unsigned short scaler = max_os;
832 uint32_t i;
833 struct pwl_float_data *rgb = pwl_rgb;
834 struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
835
836 i = 0;
837 do {
838 if (ramp->gamma[i] > max_os ||
839 ramp->gamma[i + 256] > max_os ||
840 ramp->gamma[i + 512] > max_os) {
841 scaler = max_driver;
842 break;
843 }
844 i++;
845 } while (i != GAMMA_RGB_256_ENTRIES);
846
847 i = 0;
848 do {
849 rgb->r = dal_fixed31_32_from_fraction(
850 ramp->gamma[i], scaler);
851 rgb->g = dal_fixed31_32_from_fraction(
852 ramp->gamma[i + 256], scaler);
853 rgb->b = dal_fixed31_32_from_fraction(
854 ramp->gamma[i + 512], scaler);
855
856 ++rgb;
857 ++i;
858 } while (i != GAMMA_RGB_256_ENTRIES);
859
860 rgb->r = dal_fixed31_32_mul(rgb_last->r,
861 dividers.divider1);
862 rgb->g = dal_fixed31_32_mul(rgb_last->g,
863 dividers.divider1);
864 rgb->b = dal_fixed31_32_mul(rgb_last->b,
865 dividers.divider1);
866
867 ++rgb;
868
869 rgb->r = dal_fixed31_32_mul(rgb_last->r,
870 dividers.divider2);
871 rgb->g = dal_fixed31_32_mul(rgb_last->g,
872 dividers.divider2);
873 rgb->b = dal_fixed31_32_mul(rgb_last->b,
874 dividers.divider2);
875
876 ++rgb;
877
878 rgb->r = dal_fixed31_32_mul(rgb_last->r,
879 dividers.divider3);
880 rgb->g = dal_fixed31_32_mul(rgb_last->g,
881 dividers.divider3);
882 rgb->b = dal_fixed31_32_mul(rgb_last->b,
883 dividers.divider3);
823} 884}
824 885
825/* 886/*
@@ -949,7 +1010,7 @@ static inline void copy_rgb_regamma_to_coordinates_x(
949 uint32_t i = 0; 1010 uint32_t i = 0;
950 const struct pwl_float_data_ex *rgb_regamma = rgb_ex; 1011 const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
951 1012
952 while (i <= hw_points_num) { 1013 while (i <= hw_points_num + 1) {
953 coords->regamma_y_red = rgb_regamma->r; 1014 coords->regamma_y_red = rgb_regamma->r;
954 coords->regamma_y_green = rgb_regamma->g; 1015 coords->regamma_y_green = rgb_regamma->g;
955 coords->regamma_y_blue = rgb_regamma->b; 1016 coords->regamma_y_blue = rgb_regamma->b;
@@ -1002,6 +1063,102 @@ static bool calculate_interpolated_hardware_curve(
1002 return true; 1063 return true;
1003} 1064}
1004 1065
1066/* The "old" interpolation uses a complicated scheme to build an array of
1067 * coefficients while also using an array of 0-255 normalized to 0-1
1068 * Then there's another loop using both of the above + new scaled user ramp
1069 * and we concatenate them. It also searches for points of interpolation and
1070 * uses enums for positions.
1071 *
1072 * This function uses a different approach:
1073 * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
1074 * To find index for hwX , we notice the following:
1075 * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1
1076 * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
1077 *
1078 * Once the index is known, combined Y is simply:
1079 * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
1080 *
1081 * We should switch to this method in all cases, it's simpler and faster
1082 * ToDo one day - for now this only applies to ADL regamma to avoid regression
1083 * for regular use cases (sRGB and PQ)
1084 */
1085static void interpolate_user_regamma(uint32_t hw_points_num,
1086 struct pwl_float_data *rgb_user,
1087 bool apply_degamma,
1088 struct dc_transfer_func_distributed_points *tf_pts)
1089{
1090 uint32_t i;
1091 uint32_t color = 0;
1092 int32_t index;
1093 int32_t index_next;
1094 struct fixed31_32 *tf_point;
1095 struct fixed31_32 hw_x;
1096 struct fixed31_32 norm_factor =
1097 dal_fixed31_32_from_int_nonconst(255);
1098 struct fixed31_32 norm_x;
1099 struct fixed31_32 index_f;
1100 struct fixed31_32 lut1;
1101 struct fixed31_32 lut2;
1102 struct fixed31_32 delta_lut;
1103 struct fixed31_32 delta_index;
1104
1105 i = 0;
1106 /* fixed_pt library has problems handling too small values */
1107 while (i != 32) {
1108 tf_pts->red[i] = dal_fixed31_32_zero;
1109 tf_pts->green[i] = dal_fixed31_32_zero;
1110 tf_pts->blue[i] = dal_fixed31_32_zero;
1111 ++i;
1112 }
1113 while (i <= hw_points_num + 1) {
1114 for (color = 0; color < 3; color++) {
1115 if (color == 0)
1116 tf_point = &tf_pts->red[i];
1117 else if (color == 1)
1118 tf_point = &tf_pts->green[i];
1119 else
1120 tf_point = &tf_pts->blue[i];
1121
1122 if (apply_degamma) {
1123 if (color == 0)
1124 hw_x = coordinates_x[i].regamma_y_red;
1125 else if (color == 1)
1126 hw_x = coordinates_x[i].regamma_y_green;
1127 else
1128 hw_x = coordinates_x[i].regamma_y_blue;
1129 } else
1130 hw_x = coordinates_x[i].x;
1131
1132 norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
1133 index = dal_fixed31_32_floor(norm_x);
1134 if (index < 0 || index > 255)
1135 continue;
1136
1137 index_f = dal_fixed31_32_from_int_nonconst(index);
1138 index_next = (index == 255) ? index : index + 1;
1139
1140 if (color == 0) {
1141 lut1 = rgb_user[index].r;
1142 lut2 = rgb_user[index_next].r;
1143 } else if (color == 1) {
1144 lut1 = rgb_user[index].g;
1145 lut2 = rgb_user[index_next].g;
1146 } else {
1147 lut1 = rgb_user[index].b;
1148 lut2 = rgb_user[index_next].b;
1149 }
1150
1151 // we have everything now, so interpolate
1152 delta_lut = dal_fixed31_32_sub(lut2, lut1);
1153 delta_index = dal_fixed31_32_sub(norm_x, index_f);
1154
1155 *tf_point = dal_fixed31_32_add(lut1,
1156 dal_fixed31_32_mul(delta_index, delta_lut));
1157 }
1158 ++i;
1159 }
1160}
1161
1005static void build_new_custom_resulted_curve( 1162static void build_new_custom_resulted_curve(
1006 uint32_t hw_points_num, 1163 uint32_t hw_points_num,
1007 struct dc_transfer_func_distributed_points *tf_pts) 1164 struct dc_transfer_func_distributed_points *tf_pts)
@@ -1025,6 +1182,29 @@ static void build_new_custom_resulted_curve(
1025 } 1182 }
1026} 1183}
1027 1184
1185static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
1186 uint32_t hw_points_num)
1187{
1188 uint32_t i;
1189
1190 struct gamma_coefficients coeff;
1191 struct pwl_float_data_ex *rgb = rgb_regamma;
1192 const struct hw_x_point *coord_x = coordinates_x;
1193
1194 build_coefficients(&coeff, true);
1195
1196 i = 0;
1197 while (i != hw_points_num + 1) {
1198 rgb->r = translate_from_linear_space_ex(
1199 coord_x->x, &coeff, 0);
1200 rgb->g = rgb->r;
1201 rgb->b = rgb->r;
1202 ++coord_x;
1203 ++rgb;
1204 ++i;
1205 }
1206}
1207
1028static bool map_regamma_hw_to_x_user( 1208static bool map_regamma_hw_to_x_user(
1029 const struct dc_gamma *ramp, 1209 const struct dc_gamma *ramp,
1030 struct pixel_gamma_point *coeff128, 1210 struct pixel_gamma_point *coeff128,
@@ -1062,6 +1242,7 @@ static bool map_regamma_hw_to_x_user(
1062 } 1242 }
1063 } 1243 }
1064 1244
1245 /* this should be named differently, all it does is clamp to 0-1 */
1065 build_new_custom_resulted_curve(hw_points_num, tf_pts); 1246 build_new_custom_resulted_curve(hw_points_num, tf_pts);
1066 1247
1067 return true; 1248 return true;
@@ -1093,19 +1274,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1093 1274
1094 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1275 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1095 1276
1096 rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), 1277 rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
1097 GFP_KERNEL); 1278 GFP_KERNEL);
1098 if (!rgb_user) 1279 if (!rgb_user)
1099 goto rgb_user_alloc_fail; 1280 goto rgb_user_alloc_fail;
1100 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), 1281 rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
1101 GFP_KERNEL); 1282 GFP_KERNEL);
1102 if (!rgb_regamma) 1283 if (!rgb_regamma)
1103 goto rgb_regamma_alloc_fail; 1284 goto rgb_regamma_alloc_fail;
1104 axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), 1285 axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
1105 GFP_KERNEL); 1286 GFP_KERNEL);
1106 if (!axix_x) 1287 if (!axix_x)
1107 goto axix_x_alloc_fail; 1288 goto axix_x_alloc_fail;
1108 coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); 1289 coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
1109 if (!coeff) 1290 if (!coeff)
1110 goto coeff_alloc_fail; 1291 goto coeff_alloc_fail;
1111 1292
@@ -1157,10 +1338,117 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1157 1338
1158 ret = true; 1339 ret = true;
1159 1340
1160 kfree(coeff); 1341 kvfree(coeff);
1161coeff_alloc_fail: 1342coeff_alloc_fail:
1162 kfree(axix_x); 1343 kvfree(axix_x);
1163axix_x_alloc_fail: 1344axix_x_alloc_fail:
1345 kvfree(rgb_regamma);
1346rgb_regamma_alloc_fail:
1347 kvfree(rgb_user);
1348rgb_user_alloc_fail:
1349 return ret;
1350}
1351
1352bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
1353 const struct regamma_lut *regamma)
1354{
1355 struct gamma_coefficients coeff;
1356 const struct hw_x_point *coord_x = coordinates_x;
1357 uint32_t i = 0;
1358
1359 do {
1360 coeff.a0[i] = dal_fixed31_32_from_fraction(
1361 regamma->coeff.A0[i], 10000000);
1362 coeff.a1[i] = dal_fixed31_32_from_fraction(
1363 regamma->coeff.A1[i], 1000);
1364 coeff.a2[i] = dal_fixed31_32_from_fraction(
1365 regamma->coeff.A2[i], 1000);
1366 coeff.a3[i] = dal_fixed31_32_from_fraction(
1367 regamma->coeff.A3[i], 1000);
1368 coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
1369 regamma->coeff.gamma[i], 1000);
1370
1371 ++i;
1372 } while (i != 3);
1373
1374 i = 0;
1375 /* fixed_pt library has problems handling too small values */
1376 while (i != 32) {
1377 output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
1378 output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
1379 output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
1380 ++coord_x;
1381 ++i;
1382 }
1383 while (i != MAX_HW_POINTS + 1) {
1384 output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
1385 coord_x->x, &coeff, 0);
1386 output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
1387 coord_x->x, &coeff, 1);
1388 output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
1389 coord_x->x, &coeff, 2);
1390 ++coord_x;
1391 ++i;
1392 }
1393
1394 // this function just clamps output to 0-1
1395 build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
1396 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1397
1398 return true;
1399}
1400
1401bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
1402 const struct regamma_lut *regamma)
1403{
1404 struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
1405 struct dividers dividers;
1406
1407 struct pwl_float_data *rgb_user = NULL;
1408 struct pwl_float_data_ex *rgb_regamma = NULL;
1409 bool ret = false;
1410
1411 if (regamma == NULL)
1412 return false;
1413
1414 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1415
1416 rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
1417 GFP_KERNEL);
1418 if (!rgb_user)
1419 goto rgb_user_alloc_fail;
1420
1421 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
1422 GFP_KERNEL);
1423 if (!rgb_regamma)
1424 goto rgb_regamma_alloc_fail;
1425
1426 dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
1427 dividers.divider2 = dal_fixed31_32_from_int(2);
1428 dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
1429
1430 scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
1431
1432 if (regamma->flags.bits.applyDegamma == 1) {
1433 apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
1434 copy_rgb_regamma_to_coordinates_x(coordinates_x,
1435 MAX_HW_POINTS, rgb_regamma);
1436 }
1437
1438 interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
1439 regamma->flags.bits.applyDegamma, tf_pts);
1440
1441 // no custom HDR curves!
1442 tf_pts->end_exponent = 0;
1443 tf_pts->x_point_at_y1_red = 1;
1444 tf_pts->x_point_at_y1_green = 1;
1445 tf_pts->x_point_at_y1_blue = 1;
1446
1447 // this function just clamps output to 0-1
1448 build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
1449
1450 ret = true;
1451
1164 kfree(rgb_regamma); 1452 kfree(rgb_regamma);
1165rgb_regamma_alloc_fail: 1453rgb_regamma_alloc_fail:
1166 kfree(rgb_user); 1454 kfree(rgb_user);
@@ -1192,19 +1480,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
1192 1480
1193 input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1481 input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1194 1482
1195 rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), 1483 rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
1196 GFP_KERNEL); 1484 GFP_KERNEL);
1197 if (!rgb_user) 1485 if (!rgb_user)
1198 goto rgb_user_alloc_fail; 1486 goto rgb_user_alloc_fail;
1199 curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), 1487 curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
1200 GFP_KERNEL); 1488 GFP_KERNEL);
1201 if (!curve) 1489 if (!curve)
1202 goto curve_alloc_fail; 1490 goto curve_alloc_fail;
1203 axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), 1491 axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
1204 GFP_KERNEL); 1492 GFP_KERNEL);
1205 if (!axix_x) 1493 if (!axix_x)
1206 goto axix_x_alloc_fail; 1494 goto axix_x_alloc_fail;
1207 coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); 1495 coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
1208 if (!coeff) 1496 if (!coeff)
1209 goto coeff_alloc_fail; 1497 goto coeff_alloc_fail;
1210 1498
@@ -1246,13 +1534,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
1246 1534
1247 ret = true; 1535 ret = true;
1248 1536
1249 kfree(coeff); 1537 kvfree(coeff);
1250coeff_alloc_fail: 1538coeff_alloc_fail:
1251 kfree(axix_x); 1539 kvfree(axix_x);
1252axix_x_alloc_fail: 1540axix_x_alloc_fail:
1253 kfree(curve); 1541 kvfree(curve);
1254curve_alloc_fail: 1542curve_alloc_fail:
1255 kfree(rgb_user); 1543 kvfree(rgb_user);
1256rgb_user_alloc_fail: 1544rgb_user_alloc_fail:
1257 1545
1258 return ret; 1546 return ret;
@@ -1281,8 +1569,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1281 } 1569 }
1282 ret = true; 1570 ret = true;
1283 } else if (trans == TRANSFER_FUNCTION_PQ) { 1571 } else if (trans == TRANSFER_FUNCTION_PQ) {
1284 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + 1572 rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
1285 _EXTRA_POINTS), GFP_KERNEL); 1573 (MAX_HW_POINTS + _EXTRA_POINTS),
1574 GFP_KERNEL);
1286 if (!rgb_regamma) 1575 if (!rgb_regamma)
1287 goto rgb_regamma_alloc_fail; 1576 goto rgb_regamma_alloc_fail;
1288 points->end_exponent = 7; 1577 points->end_exponent = 7;
@@ -1302,11 +1591,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1302 } 1591 }
1303 ret = true; 1592 ret = true;
1304 1593
1305 kfree(rgb_regamma); 1594 kvfree(rgb_regamma);
1306 } else if (trans == TRANSFER_FUNCTION_SRGB || 1595 } else if (trans == TRANSFER_FUNCTION_SRGB ||
1307 trans == TRANSFER_FUNCTION_BT709) { 1596 trans == TRANSFER_FUNCTION_BT709) {
1308 rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + 1597 rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
1309 _EXTRA_POINTS), GFP_KERNEL); 1598 (MAX_HW_POINTS + _EXTRA_POINTS),
1599 GFP_KERNEL);
1310 if (!rgb_regamma) 1600 if (!rgb_regamma)
1311 goto rgb_regamma_alloc_fail; 1601 goto rgb_regamma_alloc_fail;
1312 points->end_exponent = 0; 1602 points->end_exponent = 0;
@@ -1324,7 +1614,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1324 } 1614 }
1325 ret = true; 1615 ret = true;
1326 1616
1327 kfree(rgb_regamma); 1617 kvfree(rgb_regamma);
1328 } 1618 }
1329rgb_regamma_alloc_fail: 1619rgb_regamma_alloc_fail:
1330 return ret; 1620 return ret;
@@ -1348,8 +1638,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1348 } 1638 }
1349 ret = true; 1639 ret = true;
1350 } else if (trans == TRANSFER_FUNCTION_PQ) { 1640 } else if (trans == TRANSFER_FUNCTION_PQ) {
1351 rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + 1641 rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
1352 _EXTRA_POINTS), GFP_KERNEL); 1642 (MAX_HW_POINTS + _EXTRA_POINTS),
1643 GFP_KERNEL);
1353 if (!rgb_degamma) 1644 if (!rgb_degamma)
1354 goto rgb_degamma_alloc_fail; 1645 goto rgb_degamma_alloc_fail;
1355 1646
@@ -1364,11 +1655,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1364 } 1655 }
1365 ret = true; 1656 ret = true;
1366 1657
1367 kfree(rgb_degamma); 1658 kvfree(rgb_degamma);
1368 } else if (trans == TRANSFER_FUNCTION_SRGB || 1659 } else if (trans == TRANSFER_FUNCTION_SRGB ||
1369 trans == TRANSFER_FUNCTION_BT709) { 1660 trans == TRANSFER_FUNCTION_BT709) {
1370 rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + 1661 rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
1371 _EXTRA_POINTS), GFP_KERNEL); 1662 (MAX_HW_POINTS + _EXTRA_POINTS),
1663 GFP_KERNEL);
1372 if (!rgb_degamma) 1664 if (!rgb_degamma)
1373 goto rgb_degamma_alloc_fail; 1665 goto rgb_degamma_alloc_fail;
1374 1666
@@ -1382,7 +1674,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1382 } 1674 }
1383 ret = true; 1675 ret = true;
1384 1676
1385 kfree(rgb_degamma); 1677 kvfree(rgb_degamma);
1386 } 1678 }
1387 points->end_exponent = 0; 1679 points->end_exponent = 0;
1388 points->x_point_at_y1_red = 1; 1680 points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index b7f9bc27d101..b64048991a95 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -32,6 +32,47 @@ struct dc_transfer_func_distributed_points;
32struct dc_rgb_fixed; 32struct dc_rgb_fixed;
33enum dc_transfer_func_predefined; 33enum dc_transfer_func_predefined;
34 34
35/* For SetRegamma ADL interface support
36 * Must match escape type
37 */
38union regamma_flags {
39 unsigned int raw;
40 struct {
41 unsigned int gammaRampArray :1; // RegammaRamp is in use
42 unsigned int gammaFromEdid :1; //gamma from edid is in use
43 unsigned int gammaFromEdidEx :1; //gamma from edid is in use , but only for Display Id 1.2
44 unsigned int gammaFromUser :1; //user custom gamma is used
45 unsigned int coeffFromUser :1; //coeff. A0-A3 from user is in use
46 unsigned int coeffFromEdid :1; //coeff. A0-A3 from edid is in use
47 unsigned int applyDegamma :1; //flag for additional degamma correction in driver
48 unsigned int gammaPredefinedSRGB :1; //flag for SRGB gamma
49 unsigned int gammaPredefinedPQ :1; //flag for PQ gamma
50 unsigned int gammaPredefinedPQ2084Interim :1; //flag for PQ gamma, lower max nits
51 unsigned int gammaPredefined36 :1; //flag for 3.6 gamma
52 unsigned int gammaPredefinedReset :1; //flag to return to previous gamma
53 } bits;
54};
55
56struct regamma_ramp {
57 unsigned short gamma[256*3]; // gamma ramp packed in same way as OS windows ,r , g & b
58};
59
60struct regamma_coeff {
61 int gamma[3];
62 int A0[3];
63 int A1[3];
64 int A2[3];
65 int A3[3];
66};
67
68struct regamma_lut {
69 union regamma_flags flags;
70 union {
71 struct regamma_ramp ramp;
72 struct regamma_coeff coeff;
73 };
74};
75
35void setup_x_points_distribution(void); 76void setup_x_points_distribution(void);
36void precompute_pq(void); 77void precompute_pq(void);
37void precompute_de_pq(void); 78void precompute_de_pq(void);
@@ -45,9 +86,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
45bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, 86bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
46 struct dc_transfer_func_distributed_points *points); 87 struct dc_transfer_func_distributed_points *points);
47 88
48bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, 89bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
49 struct dc_transfer_func_distributed_points *points); 90 struct dc_transfer_func_distributed_points *points);
50 91
92bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
93 const struct regamma_lut *regamma);
94
95bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
96 const struct regamma_lut *regamma);
51 97
52 98
53#endif /* COLOR_MOD_COLOR_GAMMA_H_ */ 99#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 041f87b73d5f..48e02197919f 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -115,18 +115,22 @@ struct mod_stats *mod_stats_create(struct dc *dc)
115 &reg_data, sizeof(unsigned int), &flag)) 115 &reg_data, sizeof(unsigned int), &flag))
116 core_stats->enabled = reg_data; 116 core_stats->enabled = reg_data;
117 117
118 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT; 118 if (core_stats->enabled) {
119 if (dm_read_persistent_data(dc->ctx, NULL, NULL, 119 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
120 DAL_STATS_ENTRIES_REGKEY, 120 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
121 &reg_data, sizeof(unsigned int), &flag)) { 121 DAL_STATS_ENTRIES_REGKEY,
122 if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX) 122 &reg_data, sizeof(unsigned int), &flag)) {
123 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX; 123 if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
124 else 124 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
125 core_stats->entries = reg_data; 125 else
126 } 126 core_stats->entries = reg_data;
127 }
127 128
128 core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries, 129 core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
129 GFP_KERNEL); 130 GFP_KERNEL);
131 } else {
132 core_stats->entries = 0;
133 }
130 134
131 if (core_stats->time == NULL) 135 if (core_stats->time == NULL)
132 goto fail_construct; 136 goto fail_construct;
@@ -187,7 +191,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
187 191
188 for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { 192 for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
189 dm_logger_write(logger, LOG_PROFILING, 193 dm_logger_write(logger, LOG_PROFILING,
190 "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n", 194 "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
191 time[i].render_time_in_us, 195 time[i].render_time_in_us,
192 time[i].avg_render_time_in_us_last_ten, 196 time[i].avg_render_time_in_us_last_ten,
193 time[i].min_window, 197 time[i].min_window,
@@ -227,7 +231,7 @@ void mod_stats_reset_data(struct mod_stats *mod_stats)
227 memset(core_stats->time, 0, 231 memset(core_stats->time, 0,
228 sizeof(struct stats_time_cache) * core_stats->entries); 232 sizeof(struct stats_time_cache) * core_stats->entries);
229 233
230 core_stats->index = 0; 234 core_stats->index = 1;
231} 235}
232 236
233void mod_stats_update_flip(struct mod_stats *mod_stats, 237void mod_stats_update_flip(struct mod_stats *mod_stats,
@@ -250,7 +254,7 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
250 254
251 time[index].flip_timestamp_in_ns = timestamp_in_ns; 255 time[index].flip_timestamp_in_ns = timestamp_in_ns;
252 time[index].render_time_in_us = 256 time[index].render_time_in_us =
253 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns; 257 (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
254 258
255 if (index >= 10) { 259 if (index >= 10) {
256 for (unsigned int i = 0; i < 10; i++) 260 for (unsigned int i = 0; i < 10; i++)
@@ -261,10 +265,12 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
261 265
262 if (time[index].num_vsync_between_flips > 0) 266 if (time[index].num_vsync_between_flips > 0)
263 time[index].vsync_to_flip_time_in_us = 267 time[index].vsync_to_flip_time_in_us =
264 timestamp_in_ns - time[index].vupdate_timestamp_in_ns; 268 (timestamp_in_ns -
269 time[index].vupdate_timestamp_in_ns) / 1000;
265 else 270 else
266 time[index].vsync_to_flip_time_in_us = 271 time[index].vsync_to_flip_time_in_us =
267 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns; 272 (timestamp_in_ns -
273 time[index - 1].vupdate_timestamp_in_ns) / 1000;
268 274
269 core_stats->index++; 275 core_stats->index++;
270} 276}
@@ -275,6 +281,8 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
275 struct core_stats *core_stats = NULL; 281 struct core_stats *core_stats = NULL;
276 struct stats_time_cache *time = NULL; 282 struct stats_time_cache *time = NULL;
277 unsigned int index = 0; 283 unsigned int index = 0;
284 unsigned int num_vsyncs = 0;
285 unsigned int prev_vsync_in_ns = 0;
278 286
279 if (mod_stats == NULL) 287 if (mod_stats == NULL)
280 return; 288 return;
@@ -286,14 +294,27 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
286 294
287 time = core_stats->time; 295 time = core_stats->time;
288 index = core_stats->index; 296 index = core_stats->index;
297 num_vsyncs = time[index].num_vsync_between_flips;
298
299 if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
300 if (num_vsyncs == 0) {
301 prev_vsync_in_ns =
302 time[index - 1].vupdate_timestamp_in_ns;
303
304 time[index].flip_to_vsync_time_in_us =
305 (timestamp_in_ns -
306 time[index - 1].flip_timestamp_in_ns) /
307 1000;
308 } else {
309 prev_vsync_in_ns =
310 time[index].vupdate_timestamp_in_ns;
311 }
289 312
290 time[index].vupdate_timestamp_in_ns = timestamp_in_ns; 313 time[index].v_sync_time_in_us[num_vsyncs] =
291 if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS) 314 (timestamp_in_ns - prev_vsync_in_ns) / 1000;
292 time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] = 315 }
293 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
294 time[index].flip_to_vsync_time_in_us =
295 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
296 316
317 time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
297 time[index].num_vsync_between_flips++; 318 time[index].num_vsync_between_flips++;
298} 319}
299 320
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9fa3aaef3f33..33de33016bda 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,26 @@ enum amd_powergating_state {
109#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) 109#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
110#define AMD_PG_SUPPORT_MMHUB (1 << 13) 110#define AMD_PG_SUPPORT_MMHUB (1 << 13)
111 111
112enum PP_FEATURE_MASK {
113 PP_SCLK_DPM_MASK = 0x1,
114 PP_MCLK_DPM_MASK = 0x2,
115 PP_PCIE_DPM_MASK = 0x4,
116 PP_SCLK_DEEP_SLEEP_MASK = 0x8,
117 PP_POWER_CONTAINMENT_MASK = 0x10,
118 PP_UVD_HANDSHAKE_MASK = 0x20,
119 PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
120 PP_VBI_TIME_SUPPORT_MASK = 0x80,
121 PP_ULV_MASK = 0x100,
122 PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
123 PP_CLOCK_STRETCH_MASK = 0x400,
124 PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
125 PP_SOCCLK_DPM_MASK = 0x1000,
126 PP_DCEFCLK_DPM_MASK = 0x2000,
127 PP_OVERDRIVE_MASK = 0x4000,
128 PP_GFXOFF_MASK = 0x8000,
129 PP_ACG_MASK = 0x10000,
130};
131
112struct amd_ip_funcs { 132struct amd_ip_funcs {
113 /* Name of IP block */ 133 /* Name of IP block */
114 char *name; 134 char *name;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 4ccf9681c45d..721c61171045 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -3895,6 +3895,10 @@
3895#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2 3895#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
3896#define mmCM0_CM_MEM_PWR_STATUS 0x0d33 3896#define mmCM0_CM_MEM_PWR_STATUS 0x0d33
3897#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2 3897#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
3898#define mmCM0_CM_TEST_DEBUG_INDEX 0x0d35
3899#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
3900#define mmCM0_CM_TEST_DEBUG_DATA 0x0d36
3901#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
3898 3902
3899 3903
3900// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec 3904// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -4367,7 +4371,10 @@
4367#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2 4371#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
4368#define mmCM1_CM_MEM_PWR_STATUS 0x0e4e 4372#define mmCM1_CM_MEM_PWR_STATUS 0x0e4e
4369#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2 4373#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
4370 4374#define mmCM1_CM_TEST_DEBUG_INDEX 0x0e50
4375#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
4376#define mmCM1_CM_TEST_DEBUG_DATA 0x0e51
4377#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
4371 4378
4372// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec 4379// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
4373// base address: 0x399c 4380// base address: 0x399c
@@ -4839,7 +4846,10 @@
4839#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2 4846#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
4840#define mmCM2_CM_MEM_PWR_STATUS 0x0f69 4847#define mmCM2_CM_MEM_PWR_STATUS 0x0f69
4841#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2 4848#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
4842 4849#define mmCM2_CM_TEST_DEBUG_INDEX 0x0f6b
4850#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
4851#define mmCM2_CM_TEST_DEBUG_DATA 0x0f6c
4852#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
4843 4853
4844// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec 4854// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
4845// base address: 0x3e08 4855// base address: 0x3e08
@@ -5311,7 +5321,10 @@
5311#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2 5321#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
5312#define mmCM3_CM_MEM_PWR_STATUS 0x1084 5322#define mmCM3_CM_MEM_PWR_STATUS 0x1084
5313#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2 5323#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
5314 5324#define mmCM3_CM_TEST_DEBUG_INDEX 0x1086
5325#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
5326#define mmCM3_CM_TEST_DEBUG_DATA 0x1087
5327#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
5315 5328
5316// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec 5329// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
5317// base address: 0x4274 5330// base address: 0x4274
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index e2a2f114bd8e..e7c0cad41081 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -14049,6 +14049,14 @@
14049#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE__SHIFT 0x2 14049#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE__SHIFT 0x2
14050#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L 14050#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
14051#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE_MASK 0x0000000CL 14051#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE_MASK 0x0000000CL
14052//CM0_CM_TEST_DEBUG_INDEX
14053#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
14054#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
14055#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
14056#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
14057//CM0_CM_TEST_DEBUG_DATA
14058#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
14059#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
14052 14060
14053 14061
14054// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec 14062// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
new file mode 100644
index 000000000000..9e19e723081b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _df_1_7_DEFAULT_HEADER
22#define _df_1_7_DEFAULT_HEADER
23
24#define mmFabricConfigAccessControl_DEFAULT 0x00000000
25
26#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
new file mode 100644
index 000000000000..2b305dd021e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _df_1_7_OFFSET_HEADER
22#define _df_1_7_OFFSET_HEADER
23
24#define mmFabricConfigAccessControl 0x0410
25#define mmFabricConfigAccessControl_BASE_IDX 0
26
27#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
28#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
29
30#define mmDF_CS_AON0_DramBaseAddress0 0x0044
31#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
32
33#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
new file mode 100644
index 000000000000..2ba849798924
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21#ifndef _df_1_7_SH_MASK_HEADER
22#define _df_1_7_SH_MASK_HEADER
23
24/* FabricConfigAccessControl */
25#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
26#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
27#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
28#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
29#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
30#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
31
32/* DF_PIE_AON0_DfGlobalClkGater */
33#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
34#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
35
36/* DF_CS_AON0_DramBaseAddress0 */
37#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
38#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
39#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
40#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
41#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
42#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
43#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
44#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
45#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
46#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
47
48#endif
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index f696bbb643ef..7931502fa54f 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -632,6 +632,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
632 ULONG ulReserved; 632 ULONG ulReserved;
633}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2; 633}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
634 634
635typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3
636{
637 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
638 USHORT usMclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536
639 USHORT usMclk_fcw_int; //integer divider of fcwc
640}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3;
641
635//Input parameter of DynamicMemorySettingsTable 642//Input parameter of DynamicMemorySettingsTable
636//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM 643//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
637typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER 644typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0f5ad54d3fd3..de177ce8ca80 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
501 LIQUID_COOLING = 0x01 501 LIQUID_COOLING = 0x01
502}; 502};
503 503
504struct atom_firmware_info_v3_2 {
505 struct atom_common_table_header table_header;
506 uint32_t firmware_revision;
507 uint32_t bootup_sclk_in10khz;
508 uint32_t bootup_mclk_in10khz;
509 uint32_t firmware_capability; // enum atombios_firmware_capability
510 uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
511 uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
512 uint16_t bootup_vddc_mv;
513 uint16_t bootup_vddci_mv;
514 uint16_t bootup_mvddc_mv;
515 uint16_t bootup_vddgfx_mv;
516 uint8_t mem_module_id;
517 uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
518 uint8_t reserved1[2];
519 uint32_t mc_baseaddr_high;
520 uint32_t mc_baseaddr_low;
521 uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
522 uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
523 uint8_t board_i2c_feature_slave_addr;
524 uint8_t reserved3;
525 uint16_t bootup_mvddq_mv;
526 uint16_t bootup_mvpp_mv;
527 uint32_t zfbstartaddrin16mb;
528 uint32_t reserved2[3];
529};
504 530
505/* 531/*
506 *************************************************************************** 532 ***************************************************************************
@@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2
1169 uint32_t rlc_gpu_timer_refclk; 1195 uint32_t rlc_gpu_timer_refclk;
1170}; 1196};
1171 1197
1172 1198struct atom_gfx_info_v2_3 {
1199 struct atom_common_table_header table_header;
1200 uint8_t gfxip_min_ver;
1201 uint8_t gfxip_max_ver;
1202 uint8_t max_shader_engines;
1203 uint8_t max_tile_pipes;
1204 uint8_t max_cu_per_sh;
1205 uint8_t max_sh_per_se;
1206 uint8_t max_backends_per_se;
1207 uint8_t max_texture_channel_caches;
1208 uint32_t regaddr_cp_dma_src_addr;
1209 uint32_t regaddr_cp_dma_src_addr_hi;
1210 uint32_t regaddr_cp_dma_dst_addr;
1211 uint32_t regaddr_cp_dma_dst_addr_hi;
1212 uint32_t regaddr_cp_dma_command;
1213 uint32_t regaddr_cp_status;
1214 uint32_t regaddr_rlc_gpu_clock_32;
1215 uint32_t rlc_gpu_timer_refclk;
1216 uint8_t active_cu_per_sh;
1217 uint8_t active_rb_per_se;
1218 uint16_t gcgoldenoffset;
1219 uint32_t rm21_sram_vmin_value;
1220};
1173 1221
1174/* 1222/*
1175 *************************************************************************** 1223 ***************************************************************************
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
1198 uint8_t fw_ctf_polarity; // GPIO polarity for CTF 1246 uint8_t fw_ctf_polarity; // GPIO polarity for CTF
1199}; 1247};
1200 1248
1249struct atom_smu_info_v3_2 {
1250 struct atom_common_table_header table_header;
1251 uint8_t smuip_min_ver;
1252 uint8_t smuip_max_ver;
1253 uint8_t smu_rsd1;
1254 uint8_t gpuclk_ss_mode;
1255 uint16_t sclk_ss_percentage;
1256 uint16_t sclk_ss_rate_10hz;
1257 uint16_t gpuclk_ss_percentage; // in unit of 0.001%
1258 uint16_t gpuclk_ss_rate_10hz;
1259 uint32_t core_refclk_10khz;
1260 uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
1261 uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
1262 uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
1263 uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
1264 uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
1265 uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
1266 uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
1267 uint8_t fw_ctf_polarity; // GPIO polarity for CTF
1268 uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
1269 uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
1270 uint16_t smugoldenoffset;
1271 uint32_t gpupll_vco_freq_10khz;
1272 uint32_t bootup_smnclk_10khz;
1273 uint32_t bootup_socclk_10khz;
1274 uint32_t bootup_mp0clk_10khz;
1275 uint32_t bootup_mp1clk_10khz;
1276 uint32_t bootup_lclk_10khz;
1277 uint32_t bootup_dcefclk_10khz;
1278 uint32_t ctf_threshold_override_value;
1279 uint32_t reserved[5];
1280};
1281
1282struct atom_smu_info_v3_3 {
1283 struct atom_common_table_header table_header;
1284 uint8_t smuip_min_ver;
1285 uint8_t smuip_max_ver;
1286 uint8_t smu_rsd1;
1287 uint8_t gpuclk_ss_mode;
1288 uint16_t sclk_ss_percentage;
1289 uint16_t sclk_ss_rate_10hz;
1290 uint16_t gpuclk_ss_percentage; // in unit of 0.001%
1291 uint16_t gpuclk_ss_rate_10hz;
1292 uint32_t core_refclk_10khz;
1293 uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
1294 uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
1295 uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
1296 uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
1297 uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
1298 uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
1299 uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
1300 uint8_t fw_ctf_polarity; // GPIO polarity for CTF
1301 uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
1302 uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
1303 uint16_t smugoldenoffset;
1304 uint32_t gpupll_vco_freq_10khz;
1305 uint32_t bootup_smnclk_10khz;
1306 uint32_t bootup_socclk_10khz;
1307 uint32_t bootup_mp0clk_10khz;
1308 uint32_t bootup_mp1clk_10khz;
1309 uint32_t bootup_lclk_10khz;
1310 uint32_t bootup_dcefclk_10khz;
1311 uint32_t ctf_threshold_override_value;
1312 uint32_t syspll3_0_vco_freq_10khz;
1313 uint32_t syspll3_1_vco_freq_10khz;
1314 uint32_t bootup_fclk_10khz;
1315 uint32_t bootup_waflclk_10khz;
1316 uint32_t reserved[3];
1317};
1318
1201/* 1319/*
1202 *************************************************************************** 1320 ***************************************************************************
1203 Data Table smc_dpm_info structure 1321 Data Table smc_dpm_info structure
@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1
1283 uint32_t boardreserved[10]; 1401 uint32_t boardreserved[10];
1284}; 1402};
1285 1403
1286
1287/* 1404/*
1288 *************************************************************************** 1405 ***************************************************************************
1289 Data Table asic_profiling_info structure 1406 Data Table asic_profiling_info structure
@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
1864 SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK 1981 SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK
1865}; 1982};
1866 1983
1984enum atom_smu11_syspll_id {
1985 SMU11_SYSPLL0_ID = 0,
1986 SMU11_SYSPLL1_0_ID = 1,
1987 SMU11_SYSPLL1_1_ID = 2,
1988 SMU11_SYSPLL1_2_ID = 3,
1989 SMU11_SYSPLL2_ID = 4,
1990 SMU11_SYSPLL3_0_ID = 5,
1991 SMU11_SYSPLL3_1_ID = 6,
1992};
1993
1994
1995enum atom_smu11_syspll0_clock_id {
1996 SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
1997 SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
1998 SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
1999 SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
2000 SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
2001 SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
2002};
2003
2004
2005enum atom_smu11_syspll1_0_clock_id {
2006 SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
2007};
2008
2009enum atom_smu11_syspll1_1_clock_id {
2010 SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b
2011};
2012
2013enum atom_smu11_syspll1_2_clock_id {
2014 SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK
2015};
2016
2017enum atom_smu11_syspll2_clock_id {
2018 SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK
2019};
2020
2021enum atom_smu11_syspll3_0_clock_id {
2022 SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK
2023 SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK
2024 SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK
2025};
2026
2027enum atom_smu11_syspll3_1_clock_id {
2028 SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK
2029 SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK
2030 SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
2031};
2032
1867struct atom_get_smu_clock_info_output_parameters_v3_1 2033struct atom_get_smu_clock_info_output_parameters_v3_1
1868{ 2034{
1869 union { 2035 union {
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index f2814ae7ecdd..a69deb3a2ac0 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -42,20 +42,6 @@ enum cgs_ind_reg {
42 CGS_IND_REG__AUDIO_ENDPT 42 CGS_IND_REG__AUDIO_ENDPT
43}; 43};
44 44
45/**
46 * enum cgs_engine - Engines that can be statically power-gated
47 */
48enum cgs_engine {
49 CGS_ENGINE__UVD,
50 CGS_ENGINE__VCE,
51 CGS_ENGINE__VP8,
52 CGS_ENGINE__ACP_DMA,
53 CGS_ENGINE__ACP_DSP0,
54 CGS_ENGINE__ACP_DSP1,
55 CGS_ENGINE__ISP,
56 /* ... */
57};
58
59/* 45/*
60 * enum cgs_ucode_id - Firmware types for different IPs 46 * enum cgs_ucode_id - Firmware types for different IPs
61 */ 47 */
@@ -76,17 +62,6 @@ enum cgs_ucode_id {
76 CGS_UCODE_ID_MAXIMUM, 62 CGS_UCODE_ID_MAXIMUM,
77}; 63};
78 64
79/*
80 * enum cgs_resource_type - GPU resource type
81 */
82enum cgs_resource_type {
83 CGS_RESOURCE_TYPE_MMIO = 0,
84 CGS_RESOURCE_TYPE_FB,
85 CGS_RESOURCE_TYPE_IO,
86 CGS_RESOURCE_TYPE_DOORBELL,
87 CGS_RESOURCE_TYPE_ROM,
88};
89
90/** 65/**
91 * struct cgs_firmware_info - Firmware information 66 * struct cgs_firmware_info - Firmware information
92 */ 67 */
@@ -104,17 +79,6 @@ struct cgs_firmware_info {
104 bool is_kicker; 79 bool is_kicker;
105}; 80};
106 81
107struct cgs_mode_info {
108 uint32_t refresh_rate;
109 uint32_t vblank_time_us;
110};
111
112struct cgs_display_info {
113 uint32_t display_count;
114 uint32_t active_display_mask;
115 struct cgs_mode_info *mode_info;
116};
117
118typedef unsigned long cgs_handle_t; 82typedef unsigned long cgs_handle_t;
119 83
120/** 84/**
@@ -170,119 +134,18 @@ typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs
170#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \ 134#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
171 cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field)) 135 cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
172 136
173/**
174 * cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
175 * @cgs_device: opaque device handle
176 * @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
177 * @size: size of the region
178 * @offset: offset from the start of the region
179 * @resource_base: base address (not including offset) returned
180 *
181 * Return: 0 on success, -errno otherwise
182 */
183typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
184 enum cgs_resource_type resource_type,
185 uint64_t size,
186 uint64_t offset,
187 uint64_t *resource_base);
188
189/**
190 * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
191 * @cgs_device: opaque device handle
192 * @table: data table index
193 * @size: size of the table (output, may be NULL)
194 * @frev: table format revision (output, may be NULL)
195 * @crev: table content revision (output, may be NULL)
196 *
197 * Return: Pointer to start of the table, or NULL on failure
198 */
199typedef const void *(*cgs_atom_get_data_table_t)(
200 struct cgs_device *cgs_device, unsigned table,
201 uint16_t *size, uint8_t *frev, uint8_t *crev);
202
203/**
204 * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
205 * @cgs_device: opaque device handle
206 * @table: data table index
207 * @frev: table format revision (output, may be NULL)
208 * @crev: table content revision (output, may be NULL)
209 *
210 * Return: 0 on success, -errno otherwise
211 */
212typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
213 uint8_t *frev, uint8_t *crev);
214
215/**
216 * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
217 * @cgs_device: opaque device handle
218 * @table: command table index
219 * @args: arguments
220 *
221 * Return: 0 on success, -errno otherwise
222 */
223typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
224 unsigned table, void *args);
225
226/**
227 * cgs_get_firmware_info - Get the firmware information from core driver
228 * @cgs_device: opaque device handle
229 * @type: the firmware type
230 * @info: returend firmware information
231 *
232 * Return: 0 on success, -errno otherwise
233 */
234typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, 137typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
235 enum cgs_ucode_id type, 138 enum cgs_ucode_id type,
236 struct cgs_firmware_info *info); 139 struct cgs_firmware_info *info);
237 140
238typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
239 enum cgs_ucode_id type);
240
241typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
242 enum amd_ip_block_type block_type,
243 enum amd_powergating_state state);
244
245typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
246 enum amd_ip_block_type block_type,
247 enum amd_clockgating_state state);
248
249typedef int(*cgs_get_active_displays_info)(
250 struct cgs_device *cgs_device,
251 struct cgs_display_info *info);
252
253typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
254
255typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
256
257typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
258
259typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
260
261struct cgs_ops { 141struct cgs_ops {
262 /* MMIO access */ 142 /* MMIO access */
263 cgs_read_register_t read_register; 143 cgs_read_register_t read_register;
264 cgs_write_register_t write_register; 144 cgs_write_register_t write_register;
265 cgs_read_ind_register_t read_ind_register; 145 cgs_read_ind_register_t read_ind_register;
266 cgs_write_ind_register_t write_ind_register; 146 cgs_write_ind_register_t write_ind_register;
267 /* PCI resources */
268 cgs_get_pci_resource_t get_pci_resource;
269 /* ATOM BIOS */
270 cgs_atom_get_data_table_t atom_get_data_table;
271 cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
272 cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
273 /* Firmware Info */ 147 /* Firmware Info */
274 cgs_get_firmware_info get_firmware_info; 148 cgs_get_firmware_info get_firmware_info;
275 cgs_rel_firmware rel_firmware;
276 /* cg pg interface*/
277 cgs_set_powergating_state set_powergating_state;
278 cgs_set_clockgating_state set_clockgating_state;
279 /* display manager */
280 cgs_get_active_displays_info get_active_displays_info;
281 /* notify dpm enabled */
282 cgs_notify_dpm_enabled notify_dpm_enabled;
283 cgs_is_virtualization_enabled_t is_virtualization_enabled;
284 cgs_enter_safe_mode enter_safe_mode;
285 cgs_lock_grbm_idx lock_grbm_idx;
286}; 149};
287 150
288struct cgs_os_ops; /* To be define in OS-specific CGS header */ 151struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -309,40 +172,7 @@ struct cgs_device
309#define cgs_write_ind_register(dev,space,index,value) \ 172#define cgs_write_ind_register(dev,space,index,value) \
310 CGS_CALL(write_ind_register,dev,space,index,value) 173 CGS_CALL(write_ind_register,dev,space,index,value)
311 174
312#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
313 CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
314#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
315 CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
316#define cgs_atom_exec_cmd_table(dev,table,args) \
317 CGS_CALL(atom_exec_cmd_table,dev,table,args)
318
319#define cgs_get_firmware_info(dev, type, info) \ 175#define cgs_get_firmware_info(dev, type, info) \
320 CGS_CALL(get_firmware_info, dev, type, info) 176 CGS_CALL(get_firmware_info, dev, type, info)
321#define cgs_rel_firmware(dev, type) \
322 CGS_CALL(rel_firmware, dev, type)
323#define cgs_set_powergating_state(dev, block_type, state) \
324 CGS_CALL(set_powergating_state, dev, block_type, state)
325#define cgs_set_clockgating_state(dev, block_type, state) \
326 CGS_CALL(set_clockgating_state, dev, block_type, state)
327#define cgs_notify_dpm_enabled(dev, enabled) \
328 CGS_CALL(notify_dpm_enabled, dev, enabled)
329
330#define cgs_get_active_displays_info(dev, info) \
331 CGS_CALL(get_active_displays_info, dev, info)
332
333#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
334 resource_base) \
335 CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
336 resource_base)
337
338#define cgs_is_virtualization_enabled(cgs_device) \
339 CGS_CALL(is_virtualization_enabled, cgs_device)
340
341#define cgs_enter_safe_mode(cgs_device, en) \
342 CGS_CALL(enter_safe_mode, cgs_device, en)
343
344#define cgs_lock_grbm_idx(cgs_device, lock) \
345 CGS_CALL(lock_grbm_idx, cgs_device, lock)
346
347 177
348#endif /* _CGS_COMMON_H */ 178#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 5c840c022b52..06f08f34a110 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -94,6 +94,7 @@ enum pp_clock_type {
94 PP_PCIE, 94 PP_PCIE,
95 OD_SCLK, 95 OD_SCLK,
96 OD_MCLK, 96 OD_MCLK,
97 OD_RANGE,
97}; 98};
98 99
99enum amd_pp_sensors { 100enum amd_pp_sensors {
@@ -149,13 +150,6 @@ struct pp_states_info {
149 uint32_t states[16]; 150 uint32_t states[16];
150}; 151};
151 152
152struct pp_gpu_power {
153 uint32_t vddc_power;
154 uint32_t vddci_power;
155 uint32_t max_gpu_power;
156 uint32_t average_gpu_power;
157};
158
159#define PP_GROUP_MASK 0xF0000000 153#define PP_GROUP_MASK 0xF0000000
160#define PP_GROUP_SHIFT 28 154#define PP_GROUP_SHIFT 28
161 155
@@ -246,11 +240,6 @@ struct amd_pm_funcs {
246 int (*load_firmware)(void *handle); 240 int (*load_firmware)(void *handle);
247 int (*wait_for_fw_loading_complete)(void *handle); 241 int (*wait_for_fw_loading_complete)(void *handle);
248 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); 242 int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
249 int (*notify_smu_memory_info)(void *handle, uint32_t virtual_addr_low,
250 uint32_t virtual_addr_hi,
251 uint32_t mc_addr_low,
252 uint32_t mc_addr_hi,
253 uint32_t size);
254 int (*set_power_limit)(void *handle, uint32_t n); 243 int (*set_power_limit)(void *handle, uint32_t n);
255 int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit); 244 int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
256/* export to DC */ 245/* export to DC */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7e8ad30d98e2..b493369e6d0f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -25,30 +25,16 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/firmware.h>
28#include "amd_shared.h" 29#include "amd_shared.h"
29#include "amd_powerplay.h" 30#include "amd_powerplay.h"
30#include "power_state.h" 31#include "power_state.h"
31#include "amdgpu.h" 32#include "amdgpu.h"
32#include "hwmgr.h" 33#include "hwmgr.h"
33 34
34#define PP_DPM_DISABLED 0xCCCC
35
36static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37 enum amd_pm_state_type *user_state);
38 35
39static const struct amd_pm_funcs pp_dpm_funcs; 36static const struct amd_pm_funcs pp_dpm_funcs;
40 37
41static inline int pp_check(struct pp_hwmgr *hwmgr)
42{
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 return -EINVAL;
45
46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47 return PP_DPM_DISABLED;
48
49 return 0;
50}
51
52static int amd_powerplay_create(struct amdgpu_device *adev) 38static int amd_powerplay_create(struct amdgpu_device *adev)
53{ 39{
54 struct pp_hwmgr *hwmgr; 40 struct pp_hwmgr *hwmgr;
@@ -61,19 +47,21 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
61 return -ENOMEM; 47 return -ENOMEM;
62 48
63 hwmgr->adev = adev; 49 hwmgr->adev = adev;
64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; 50 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
65 hwmgr->device = amdgpu_cgs_create_device(adev); 52 hwmgr->device = amdgpu_cgs_create_device(adev);
66 mutex_init(&hwmgr->smu_lock); 53 mutex_init(&hwmgr->smu_lock);
67 hwmgr->chip_family = adev->family; 54 hwmgr->chip_family = adev->family;
68 hwmgr->chip_id = adev->asic_type; 55 hwmgr->chip_id = adev->asic_type;
69 hwmgr->feature_mask = amdgpu_pp_feature_mask; 56 hwmgr->feature_mask = adev->powerplay.pp_feature;
57 hwmgr->display_config = &adev->pm.pm_display_cfg;
70 adev->powerplay.pp_handle = hwmgr; 58 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs; 59 adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 return 0; 60 return 0;
73} 61}
74 62
75 63
76static int amd_powerplay_destroy(struct amdgpu_device *adev) 64static void amd_powerplay_destroy(struct amdgpu_device *adev)
77{ 65{
78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 66 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79 67
@@ -82,8 +70,6 @@ static int amd_powerplay_destroy(struct amdgpu_device *adev)
82 70
83 kfree(hwmgr); 71 kfree(hwmgr);
84 hwmgr = NULL; 72 hwmgr = NULL;
85
86 return 0;
87} 73}
88 74
89static int pp_early_init(void *handle) 75static int pp_early_init(void *handle)
@@ -109,18 +95,9 @@ static int pp_sw_init(void *handle)
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 95 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 int ret = 0; 96 int ret = 0;
111 97
112 ret = pp_check(hwmgr); 98 ret = hwmgr_sw_init(hwmgr);
113
114 if (ret >= 0) {
115 if (hwmgr->smumgr_funcs->smu_init == NULL)
116 return -EINVAL;
117
118 ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119 99
120 phm_register_irq_handlers(hwmgr); 100 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
121
122 pr_debug("amdgpu: powerplay sw initialized\n");
123 }
124 101
125 return ret; 102 return ret;
126} 103}
@@ -129,16 +106,14 @@ static int pp_sw_fini(void *handle)
129{ 106{
130 struct amdgpu_device *adev = handle; 107 struct amdgpu_device *adev = handle;
131 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 108 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
132 int ret = 0;
133 109
134 ret = pp_check(hwmgr); 110 hwmgr_sw_fini(hwmgr);
135 if (ret >= 0) {
136 if (hwmgr->smumgr_funcs->smu_fini != NULL)
137 hwmgr->smumgr_funcs->smu_fini(hwmgr);
138 }
139 111
140 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 112 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
113 release_firmware(adev->pm.fw);
114 adev->pm.fw = NULL;
141 amdgpu_ucode_fini_bo(adev); 115 amdgpu_ucode_fini_bo(adev);
116 }
142 117
143 return 0; 118 return 0;
144} 119}
@@ -152,55 +127,76 @@ static int pp_hw_init(void *handle)
152 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 127 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
153 amdgpu_ucode_init_bo(adev); 128 amdgpu_ucode_init_bo(adev);
154 129
155 ret = pp_check(hwmgr); 130 ret = hwmgr_hw_init(hwmgr);
156 131
157 if (ret >= 0) { 132 if (ret)
158 if (hwmgr->smumgr_funcs->start_smu == NULL) 133 pr_err("powerplay hw init failed\n");
159 return -EINVAL;
160 134
161 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
162 pr_err("smc start failed\n");
163 hwmgr->smumgr_funcs->smu_fini(hwmgr);
164 return -EINVAL;
165 }
166 if (ret == PP_DPM_DISABLED)
167 goto exit;
168 ret = hwmgr_hw_init(hwmgr);
169 if (ret)
170 goto exit;
171 }
172 return ret; 135 return ret;
173exit:
174 hwmgr->pm_en = 0;
175 cgs_notify_dpm_enabled(hwmgr->device, false);
176 return 0;
177
178} 136}
179 137
180static int pp_hw_fini(void *handle) 138static int pp_hw_fini(void *handle)
181{ 139{
182 struct amdgpu_device *adev = handle; 140 struct amdgpu_device *adev = handle;
183 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 141 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184 int ret = 0;
185 142
186 ret = pp_check(hwmgr); 143 hwmgr_hw_fini(hwmgr);
187 if (ret == 0)
188 hwmgr_hw_fini(hwmgr);
189 144
190 return 0; 145 return 0;
191} 146}
192 147
148static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
149{
150 int r = -EINVAL;
151 void *cpu_ptr = NULL;
152 uint64_t gpu_addr;
153 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
154
155 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
157 &adev->pm.smu_prv_buffer,
158 &gpu_addr,
159 &cpu_ptr)) {
160 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
161 return;
162 }
163
164 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
165 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
166 lower_32_bits((unsigned long)cpu_ptr),
167 upper_32_bits((unsigned long)cpu_ptr),
168 lower_32_bits(gpu_addr),
169 upper_32_bits(gpu_addr),
170 adev->pm.smu_prv_buffer_size);
171
172 if (r) {
173 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
174 adev->pm.smu_prv_buffer = NULL;
175 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
176 }
177}
178
193static int pp_late_init(void *handle) 179static int pp_late_init(void *handle)
194{ 180{
195 struct amdgpu_device *adev = handle; 181 struct amdgpu_device *adev = handle;
196 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 182 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
197 int ret = 0; 183 int ret;
198
199 ret = pp_check(hwmgr);
200 184
201 if (ret == 0) 185 if (hwmgr && hwmgr->pm_en) {
202 pp_dpm_dispatch_tasks(hwmgr, 186 mutex_lock(&hwmgr->smu_lock);
187 hwmgr_handle_task(hwmgr,
203 AMD_PP_TASK_COMPLETE_INIT, NULL); 188 AMD_PP_TASK_COMPLETE_INIT, NULL);
189 mutex_unlock(&hwmgr->smu_lock);
190 }
191 if (adev->pm.smu_prv_buffer_size != 0)
192 pp_reserve_vram_for_smu(adev);
193
194 if (hwmgr->hwmgr_func->gfx_off_control &&
195 (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
196 ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
197 if (ret)
198 pr_err("gfx off enabling failed!\n");
199 }
204 200
205 return 0; 201 return 0;
206} 202}
@@ -209,6 +205,8 @@ static void pp_late_fini(void *handle)
209{ 205{
210 struct amdgpu_device *adev = handle; 206 struct amdgpu_device *adev = handle;
211 207
208 if (adev->pm.smu_prv_buffer)
209 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
212 amd_powerplay_destroy(adev); 210 amd_powerplay_destroy(adev);
213} 211}
214 212
@@ -233,12 +231,18 @@ static int pp_set_powergating_state(void *handle,
233{ 231{
234 struct amdgpu_device *adev = handle; 232 struct amdgpu_device *adev = handle;
235 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 233 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236 int ret = 0; 234 int ret;
237 235
238 ret = pp_check(hwmgr); 236 if (!hwmgr || !hwmgr->pm_en)
237 return 0;
239 238
240 if (ret) 239 if (hwmgr->hwmgr_func->gfx_off_control) {
241 return ret; 240 /* Enable/disable GFX off through SMU */
241 ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
242 state == AMD_PG_STATE_GATE);
243 if (ret)
244 pr_err("gfx off control failed!\n");
245 }
242 246
243 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { 247 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
244 pr_info("%s was not implemented.\n", __func__); 248 pr_info("%s was not implemented.\n", __func__);
@@ -254,38 +258,16 @@ static int pp_suspend(void *handle)
254{ 258{
255 struct amdgpu_device *adev = handle; 259 struct amdgpu_device *adev = handle;
256 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 260 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
257 int ret = 0;
258 261
259 ret = pp_check(hwmgr); 262 return hwmgr_suspend(hwmgr);
260 if (ret == 0)
261 hwmgr_hw_suspend(hwmgr);
262 return 0;
263} 263}
264 264
265static int pp_resume(void *handle) 265static int pp_resume(void *handle)
266{ 266{
267 struct amdgpu_device *adev = handle; 267 struct amdgpu_device *adev = handle;
268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269 int ret;
270
271 ret = pp_check(hwmgr);
272
273 if (ret < 0)
274 return ret;
275
276 if (hwmgr->smumgr_funcs->start_smu == NULL)
277 return -EINVAL;
278
279 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
280 pr_err("smc start failed\n");
281 hwmgr->smumgr_funcs->smu_fini(hwmgr);
282 return -EINVAL;
283 }
284
285 if (ret == PP_DPM_DISABLED)
286 return 0;
287 269
288 return hwmgr_hw_resume(hwmgr); 270 return hwmgr_resume(hwmgr);
289} 271}
290 272
291static int pp_set_clockgating_state(void *handle, 273static int pp_set_clockgating_state(void *handle,
@@ -334,12 +316,9 @@ static int pp_dpm_fw_loading_complete(void *handle)
334static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 316static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
335{ 317{
336 struct pp_hwmgr *hwmgr = handle; 318 struct pp_hwmgr *hwmgr = handle;
337 int ret = 0;
338
339 ret = pp_check(hwmgr);
340 319
341 if (ret) 320 if (!hwmgr || !hwmgr->pm_en)
342 return ret; 321 return -EINVAL;
343 322
344 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 323 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
345 pr_info("%s was not implemented.\n", __func__); 324 pr_info("%s was not implemented.\n", __func__);
@@ -362,10 +341,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
362 if (*level & profile_mode_mask) { 341 if (*level & profile_mode_mask) {
363 hwmgr->saved_dpm_level = hwmgr->dpm_level; 342 hwmgr->saved_dpm_level = hwmgr->dpm_level;
364 hwmgr->en_umd_pstate = true; 343 hwmgr->en_umd_pstate = true;
365 cgs_set_clockgating_state(hwmgr->device, 344 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
366 AMD_IP_BLOCK_TYPE_GFX, 345 AMD_IP_BLOCK_TYPE_GFX,
367 AMD_CG_STATE_UNGATE); 346 AMD_CG_STATE_UNGATE);
368 cgs_set_powergating_state(hwmgr->device, 347 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
369 AMD_IP_BLOCK_TYPE_GFX, 348 AMD_IP_BLOCK_TYPE_GFX,
370 AMD_PG_STATE_UNGATE); 349 AMD_PG_STATE_UNGATE);
371 } 350 }
@@ -375,10 +354,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
375 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 354 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
376 *level = hwmgr->saved_dpm_level; 355 *level = hwmgr->saved_dpm_level;
377 hwmgr->en_umd_pstate = false; 356 hwmgr->en_umd_pstate = false;
378 cgs_set_clockgating_state(hwmgr->device, 357 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
379 AMD_IP_BLOCK_TYPE_GFX, 358 AMD_IP_BLOCK_TYPE_GFX,
380 AMD_CG_STATE_GATE); 359 AMD_CG_STATE_GATE);
381 cgs_set_powergating_state(hwmgr->device, 360 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
382 AMD_IP_BLOCK_TYPE_GFX, 361 AMD_IP_BLOCK_TYPE_GFX,
383 AMD_PG_STATE_GATE); 362 AMD_PG_STATE_GATE);
384 } 363 }
@@ -389,12 +368,9 @@ static int pp_dpm_force_performance_level(void *handle,
389 enum amd_dpm_forced_level level) 368 enum amd_dpm_forced_level level)
390{ 369{
391 struct pp_hwmgr *hwmgr = handle; 370 struct pp_hwmgr *hwmgr = handle;
392 int ret = 0;
393 371
394 ret = pp_check(hwmgr); 372 if (!hwmgr || !hwmgr->pm_en)
395 373 return -EINVAL;
396 if (ret)
397 return ret;
398 374
399 if (level == hwmgr->dpm_level) 375 if (level == hwmgr->dpm_level)
400 return 0; 376 return 0;
@@ -412,13 +388,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
412 void *handle) 388 void *handle)
413{ 389{
414 struct pp_hwmgr *hwmgr = handle; 390 struct pp_hwmgr *hwmgr = handle;
415 int ret = 0;
416 enum amd_dpm_forced_level level; 391 enum amd_dpm_forced_level level;
417 392
418 ret = pp_check(hwmgr); 393 if (!hwmgr || !hwmgr->pm_en)
419 394 return -EINVAL;
420 if (ret)
421 return ret;
422 395
423 mutex_lock(&hwmgr->smu_lock); 396 mutex_lock(&hwmgr->smu_lock);
424 level = hwmgr->dpm_level; 397 level = hwmgr->dpm_level;
@@ -429,13 +402,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
429static uint32_t pp_dpm_get_sclk(void *handle, bool low) 402static uint32_t pp_dpm_get_sclk(void *handle, bool low)
430{ 403{
431 struct pp_hwmgr *hwmgr = handle; 404 struct pp_hwmgr *hwmgr = handle;
432 int ret = 0;
433 uint32_t clk = 0; 405 uint32_t clk = 0;
434 406
435 ret = pp_check(hwmgr); 407 if (!hwmgr || !hwmgr->pm_en)
436 408 return 0;
437 if (ret)
438 return ret;
439 409
440 if (hwmgr->hwmgr_func->get_sclk == NULL) { 410 if (hwmgr->hwmgr_func->get_sclk == NULL) {
441 pr_info("%s was not implemented.\n", __func__); 411 pr_info("%s was not implemented.\n", __func__);
@@ -450,13 +420,10 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
450static uint32_t pp_dpm_get_mclk(void *handle, bool low) 420static uint32_t pp_dpm_get_mclk(void *handle, bool low)
451{ 421{
452 struct pp_hwmgr *hwmgr = handle; 422 struct pp_hwmgr *hwmgr = handle;
453 int ret = 0;
454 uint32_t clk = 0; 423 uint32_t clk = 0;
455 424
456 ret = pp_check(hwmgr); 425 if (!hwmgr || !hwmgr->pm_en)
457 426 return 0;
458 if (ret)
459 return ret;
460 427
461 if (hwmgr->hwmgr_func->get_mclk == NULL) { 428 if (hwmgr->hwmgr_func->get_mclk == NULL) {
462 pr_info("%s was not implemented.\n", __func__); 429 pr_info("%s was not implemented.\n", __func__);
@@ -471,11 +438,8 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
471static void pp_dpm_powergate_vce(void *handle, bool gate) 438static void pp_dpm_powergate_vce(void *handle, bool gate)
472{ 439{
473 struct pp_hwmgr *hwmgr = handle; 440 struct pp_hwmgr *hwmgr = handle;
474 int ret = 0;
475 441
476 ret = pp_check(hwmgr); 442 if (!hwmgr || !hwmgr->pm_en)
477
478 if (ret)
479 return; 443 return;
480 444
481 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 445 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -490,11 +454,8 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
490static void pp_dpm_powergate_uvd(void *handle, bool gate) 454static void pp_dpm_powergate_uvd(void *handle, bool gate)
491{ 455{
492 struct pp_hwmgr *hwmgr = handle; 456 struct pp_hwmgr *hwmgr = handle;
493 int ret = 0;
494
495 ret = pp_check(hwmgr);
496 457
497 if (ret) 458 if (!hwmgr || !hwmgr->pm_en)
498 return; 459 return;
499 460
500 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 461 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -512,10 +473,8 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
512 int ret = 0; 473 int ret = 0;
513 struct pp_hwmgr *hwmgr = handle; 474 struct pp_hwmgr *hwmgr = handle;
514 475
515 ret = pp_check(hwmgr); 476 if (!hwmgr || !hwmgr->pm_en)
516 477 return -EINVAL;
517 if (ret)
518 return ret;
519 478
520 mutex_lock(&hwmgr->smu_lock); 479 mutex_lock(&hwmgr->smu_lock);
521 ret = hwmgr_handle_task(hwmgr, task_id, user_state); 480 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -528,15 +487,9 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
528{ 487{
529 struct pp_hwmgr *hwmgr = handle; 488 struct pp_hwmgr *hwmgr = handle;
530 struct pp_power_state *state; 489 struct pp_power_state *state;
531 int ret = 0;
532 enum amd_pm_state_type pm_type; 490 enum amd_pm_state_type pm_type;
533 491
534 ret = pp_check(hwmgr); 492 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
535
536 if (ret)
537 return ret;
538
539 if (hwmgr->current_ps == NULL)
540 return -EINVAL; 493 return -EINVAL;
541 494
542 mutex_lock(&hwmgr->smu_lock); 495 mutex_lock(&hwmgr->smu_lock);
@@ -568,11 +521,8 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
568static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 521static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
569{ 522{
570 struct pp_hwmgr *hwmgr = handle; 523 struct pp_hwmgr *hwmgr = handle;
571 int ret = 0;
572
573 ret = pp_check(hwmgr);
574 524
575 if (ret) 525 if (!hwmgr || !hwmgr->pm_en)
576 return; 526 return;
577 527
578 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { 528 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
@@ -587,13 +537,10 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
587static uint32_t pp_dpm_get_fan_control_mode(void *handle) 537static uint32_t pp_dpm_get_fan_control_mode(void *handle)
588{ 538{
589 struct pp_hwmgr *hwmgr = handle; 539 struct pp_hwmgr *hwmgr = handle;
590 int ret = 0;
591 uint32_t mode = 0; 540 uint32_t mode = 0;
592 541
593 ret = pp_check(hwmgr); 542 if (!hwmgr || !hwmgr->pm_en)
594 543 return 0;
595 if (ret)
596 return ret;
597 544
598 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { 545 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
599 pr_info("%s was not implemented.\n", __func__); 546 pr_info("%s was not implemented.\n", __func__);
@@ -610,10 +557,8 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
610 struct pp_hwmgr *hwmgr = handle; 557 struct pp_hwmgr *hwmgr = handle;
611 int ret = 0; 558 int ret = 0;
612 559
613 ret = pp_check(hwmgr); 560 if (!hwmgr || !hwmgr->pm_en)
614 561 return -EINVAL;
615 if (ret)
616 return ret;
617 562
618 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { 563 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
619 pr_info("%s was not implemented.\n", __func__); 564 pr_info("%s was not implemented.\n", __func__);
@@ -630,10 +575,8 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
630 struct pp_hwmgr *hwmgr = handle; 575 struct pp_hwmgr *hwmgr = handle;
631 int ret = 0; 576 int ret = 0;
632 577
633 ret = pp_check(hwmgr); 578 if (!hwmgr || !hwmgr->pm_en)
634 579 return -EINVAL;
635 if (ret)
636 return ret;
637 580
638 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { 581 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
639 pr_info("%s was not implemented.\n", __func__); 582 pr_info("%s was not implemented.\n", __func__);
@@ -651,10 +594,8 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
651 struct pp_hwmgr *hwmgr = handle; 594 struct pp_hwmgr *hwmgr = handle;
652 int ret = 0; 595 int ret = 0;
653 596
654 ret = pp_check(hwmgr); 597 if (!hwmgr || !hwmgr->pm_en)
655 598 return -EINVAL;
656 if (ret)
657 return ret;
658 599
659 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 600 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
660 return -EINVAL; 601 return -EINVAL;
@@ -670,16 +611,10 @@ static int pp_dpm_get_pp_num_states(void *handle,
670{ 611{
671 struct pp_hwmgr *hwmgr = handle; 612 struct pp_hwmgr *hwmgr = handle;
672 int i; 613 int i;
673 int ret = 0;
674 614
675 memset(data, 0, sizeof(*data)); 615 memset(data, 0, sizeof(*data));
676 616
677 ret = pp_check(hwmgr); 617 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
678
679 if (ret)
680 return ret;
681
682 if (hwmgr->ps == NULL)
683 return -EINVAL; 618 return -EINVAL;
684 619
685 mutex_lock(&hwmgr->smu_lock); 620 mutex_lock(&hwmgr->smu_lock);
@@ -713,15 +648,9 @@ static int pp_dpm_get_pp_num_states(void *handle,
713static int pp_dpm_get_pp_table(void *handle, char **table) 648static int pp_dpm_get_pp_table(void *handle, char **table)
714{ 649{
715 struct pp_hwmgr *hwmgr = handle; 650 struct pp_hwmgr *hwmgr = handle;
716 int ret = 0;
717 int size = 0; 651 int size = 0;
718 652
719 ret = pp_check(hwmgr); 653 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
720
721 if (ret)
722 return ret;
723
724 if (!hwmgr->soft_pp_table)
725 return -EINVAL; 654 return -EINVAL;
726 655
727 mutex_lock(&hwmgr->smu_lock); 656 mutex_lock(&hwmgr->smu_lock);
@@ -736,10 +665,6 @@ static int amd_powerplay_reset(void *handle)
736 struct pp_hwmgr *hwmgr = handle; 665 struct pp_hwmgr *hwmgr = handle;
737 int ret; 666 int ret;
738 667
739 ret = pp_check(hwmgr);
740 if (ret)
741 return ret;
742
743 ret = hwmgr_hw_fini(hwmgr); 668 ret = hwmgr_hw_fini(hwmgr);
744 if (ret) 669 if (ret)
745 return ret; 670 return ret;
@@ -754,40 +679,38 @@ static int amd_powerplay_reset(void *handle)
754static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 679static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
755{ 680{
756 struct pp_hwmgr *hwmgr = handle; 681 struct pp_hwmgr *hwmgr = handle;
757 int ret = 0; 682 int ret = -ENOMEM;
758 683
759 ret = pp_check(hwmgr); 684 if (!hwmgr || !hwmgr->pm_en)
760 685 return -EINVAL;
761 if (ret)
762 return ret;
763 686
764 mutex_lock(&hwmgr->smu_lock); 687 mutex_lock(&hwmgr->smu_lock);
765 if (!hwmgr->hardcode_pp_table) { 688 if (!hwmgr->hardcode_pp_table) {
766 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 689 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
767 hwmgr->soft_pp_table_size, 690 hwmgr->soft_pp_table_size,
768 GFP_KERNEL); 691 GFP_KERNEL);
769 if (!hwmgr->hardcode_pp_table) { 692 if (!hwmgr->hardcode_pp_table)
770 mutex_unlock(&hwmgr->smu_lock); 693 goto err;
771 return -ENOMEM;
772 }
773 } 694 }
774 695
775 memcpy(hwmgr->hardcode_pp_table, buf, size); 696 memcpy(hwmgr->hardcode_pp_table, buf, size);
776 697
777 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 698 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
778 mutex_unlock(&hwmgr->smu_lock);
779 699
780 ret = amd_powerplay_reset(handle); 700 ret = amd_powerplay_reset(handle);
781 if (ret) 701 if (ret)
782 return ret; 702 goto err;
783 703
784 if (hwmgr->hwmgr_func->avfs_control) { 704 if (hwmgr->hwmgr_func->avfs_control) {
785 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); 705 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
786 if (ret) 706 if (ret)
787 return ret; 707 goto err;
788 } 708 }
789 709 mutex_unlock(&hwmgr->smu_lock);
790 return 0; 710 return 0;
711err:
712 mutex_unlock(&hwmgr->smu_lock);
713 return ret;
791} 714}
792 715
793static int pp_dpm_force_clock_level(void *handle, 716static int pp_dpm_force_clock_level(void *handle,
@@ -796,10 +719,8 @@ static int pp_dpm_force_clock_level(void *handle,
796 struct pp_hwmgr *hwmgr = handle; 719 struct pp_hwmgr *hwmgr = handle;
797 int ret = 0; 720 int ret = 0;
798 721
799 ret = pp_check(hwmgr); 722 if (!hwmgr || !hwmgr->pm_en)
800 723 return -EINVAL;
801 if (ret)
802 return ret;
803 724
804 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 725 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
805 pr_info("%s was not implemented.\n", __func__); 726 pr_info("%s was not implemented.\n", __func__);
@@ -820,10 +741,8 @@ static int pp_dpm_print_clock_levels(void *handle,
820 struct pp_hwmgr *hwmgr = handle; 741 struct pp_hwmgr *hwmgr = handle;
821 int ret = 0; 742 int ret = 0;
822 743
823 ret = pp_check(hwmgr); 744 if (!hwmgr || !hwmgr->pm_en)
824 745 return -EINVAL;
825 if (ret)
826 return ret;
827 746
828 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 747 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
829 pr_info("%s was not implemented.\n", __func__); 748 pr_info("%s was not implemented.\n", __func__);
@@ -840,10 +759,8 @@ static int pp_dpm_get_sclk_od(void *handle)
840 struct pp_hwmgr *hwmgr = handle; 759 struct pp_hwmgr *hwmgr = handle;
841 int ret = 0; 760 int ret = 0;
842 761
843 ret = pp_check(hwmgr); 762 if (!hwmgr || !hwmgr->pm_en)
844 763 return -EINVAL;
845 if (ret)
846 return ret;
847 764
848 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 765 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
849 pr_info("%s was not implemented.\n", __func__); 766 pr_info("%s was not implemented.\n", __func__);
@@ -860,10 +777,8 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
860 struct pp_hwmgr *hwmgr = handle; 777 struct pp_hwmgr *hwmgr = handle;
861 int ret = 0; 778 int ret = 0;
862 779
863 ret = pp_check(hwmgr); 780 if (!hwmgr || !hwmgr->pm_en)
864 781 return -EINVAL;
865 if (ret)
866 return ret;
867 782
868 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 783 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
869 pr_info("%s was not implemented.\n", __func__); 784 pr_info("%s was not implemented.\n", __func__);
@@ -881,10 +796,8 @@ static int pp_dpm_get_mclk_od(void *handle)
881 struct pp_hwmgr *hwmgr = handle; 796 struct pp_hwmgr *hwmgr = handle;
882 int ret = 0; 797 int ret = 0;
883 798
884 ret = pp_check(hwmgr); 799 if (!hwmgr || !hwmgr->pm_en)
885 800 return -EINVAL;
886 if (ret)
887 return ret;
888 801
889 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 802 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
890 pr_info("%s was not implemented.\n", __func__); 803 pr_info("%s was not implemented.\n", __func__);
@@ -901,10 +814,8 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
901 struct pp_hwmgr *hwmgr = handle; 814 struct pp_hwmgr *hwmgr = handle;
902 int ret = 0; 815 int ret = 0;
903 816
904 ret = pp_check(hwmgr); 817 if (!hwmgr || !hwmgr->pm_en)
905 818 return -EINVAL;
906 if (ret)
907 return ret;
908 819
909 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 820 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
910 pr_info("%s was not implemented.\n", __func__); 821 pr_info("%s was not implemented.\n", __func__);
@@ -922,11 +833,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
922 struct pp_hwmgr *hwmgr = handle; 833 struct pp_hwmgr *hwmgr = handle;
923 int ret = 0; 834 int ret = 0;
924 835
925 ret = pp_check(hwmgr); 836 if (!hwmgr || !hwmgr->pm_en || !value)
926 if (ret)
927 return ret;
928
929 if (value == NULL)
930 return -EINVAL; 837 return -EINVAL;
931 838
932 switch (idx) { 839 switch (idx) {
@@ -948,14 +855,11 @@ static struct amd_vce_state*
948pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 855pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
949{ 856{
950 struct pp_hwmgr *hwmgr = handle; 857 struct pp_hwmgr *hwmgr = handle;
951 int ret = 0;
952
953 ret = pp_check(hwmgr);
954 858
955 if (ret) 859 if (!hwmgr || !hwmgr->pm_en)
956 return NULL; 860 return NULL;
957 861
958 if (hwmgr && idx < hwmgr->num_vce_state_tables) 862 if (idx < hwmgr->num_vce_state_tables)
959 return &hwmgr->vce_states[idx]; 863 return &hwmgr->vce_states[idx];
960 return NULL; 864 return NULL;
961} 865}
@@ -964,7 +868,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
964{ 868{
965 struct pp_hwmgr *hwmgr = handle; 869 struct pp_hwmgr *hwmgr = handle;
966 870
967 if (!buf || pp_check(hwmgr)) 871 if (!hwmgr || !hwmgr->pm_en || !buf)
968 return -EINVAL; 872 return -EINVAL;
969 873
970 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { 874 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
@@ -980,12 +884,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
980 struct pp_hwmgr *hwmgr = handle; 884 struct pp_hwmgr *hwmgr = handle;
981 int ret = -EINVAL; 885 int ret = -EINVAL;
982 886
983 if (pp_check(hwmgr)) 887 if (!hwmgr || !hwmgr->pm_en)
984 return -EINVAL; 888 return ret;
985 889
986 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 890 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
987 pr_info("%s was not implemented.\n", __func__); 891 pr_info("%s was not implemented.\n", __func__);
988 return -EINVAL; 892 return ret;
989 } 893 }
990 mutex_lock(&hwmgr->smu_lock); 894 mutex_lock(&hwmgr->smu_lock);
991 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 895 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
@@ -998,7 +902,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
998{ 902{
999 struct pp_hwmgr *hwmgr = handle; 903 struct pp_hwmgr *hwmgr = handle;
1000 904
1001 if (pp_check(hwmgr)) 905 if (!hwmgr || !hwmgr->pm_en)
1002 return -EINVAL; 906 return -EINVAL;
1003 907
1004 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 908 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -1016,7 +920,7 @@ static int pp_dpm_switch_power_profile(void *handle,
1016 long workload; 920 long workload;
1017 uint32_t index; 921 uint32_t index;
1018 922
1019 if (pp_check(hwmgr)) 923 if (!hwmgr || !hwmgr->pm_en)
1020 return -EINVAL; 924 return -EINVAL;
1021 925
1022 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 926 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -1048,46 +952,12 @@ static int pp_dpm_switch_power_profile(void *handle,
1048 return 0; 952 return 0;
1049} 953}
1050 954
1051static int pp_dpm_notify_smu_memory_info(void *handle,
1052 uint32_t virtual_addr_low,
1053 uint32_t virtual_addr_hi,
1054 uint32_t mc_addr_low,
1055 uint32_t mc_addr_hi,
1056 uint32_t size)
1057{
1058 struct pp_hwmgr *hwmgr = handle;
1059 int ret = 0;
1060
1061 ret = pp_check(hwmgr);
1062
1063 if (ret)
1064 return ret;
1065
1066 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1067 pr_info("%s was not implemented.\n", __func__);
1068 return -EINVAL;
1069 }
1070
1071 mutex_lock(&hwmgr->smu_lock);
1072
1073 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1074 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1075 size);
1076
1077 mutex_unlock(&hwmgr->smu_lock);
1078
1079 return ret;
1080}
1081
1082static int pp_set_power_limit(void *handle, uint32_t limit) 955static int pp_set_power_limit(void *handle, uint32_t limit)
1083{ 956{
1084 struct pp_hwmgr *hwmgr = handle; 957 struct pp_hwmgr *hwmgr = handle;
1085 int ret = 0;
1086 958
1087 ret = pp_check(hwmgr); 959 if (!hwmgr || !hwmgr->pm_en)
1088 960 return -EINVAL;
1089 if (ret)
1090 return ret;
1091 961
1092 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 962 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1093 pr_info("%s was not implemented.\n", __func__); 963 pr_info("%s was not implemented.\n", __func__);
@@ -1104,20 +974,14 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
1104 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 974 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1105 hwmgr->power_limit = limit; 975 hwmgr->power_limit = limit;
1106 mutex_unlock(&hwmgr->smu_lock); 976 mutex_unlock(&hwmgr->smu_lock);
1107 return ret; 977 return 0;
1108} 978}
1109 979
1110static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) 980static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1111{ 981{
1112 struct pp_hwmgr *hwmgr = handle; 982 struct pp_hwmgr *hwmgr = handle;
1113 int ret = 0;
1114
1115 ret = pp_check(hwmgr);
1116 983
1117 if (ret) 984 if (!hwmgr || !hwmgr->pm_en ||!limit)
1118 return ret;
1119
1120 if (limit == NULL)
1121 return -EINVAL; 985 return -EINVAL;
1122 986
1123 mutex_lock(&hwmgr->smu_lock); 987 mutex_lock(&hwmgr->smu_lock);
@@ -1129,19 +993,16 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1129 993
1130 mutex_unlock(&hwmgr->smu_lock); 994 mutex_unlock(&hwmgr->smu_lock);
1131 995
1132 return ret; 996 return 0;
1133} 997}
1134 998
1135static int pp_display_configuration_change(void *handle, 999static int pp_display_configuration_change(void *handle,
1136 const struct amd_pp_display_configuration *display_config) 1000 const struct amd_pp_display_configuration *display_config)
1137{ 1001{
1138 struct pp_hwmgr *hwmgr = handle; 1002 struct pp_hwmgr *hwmgr = handle;
1139 int ret = 0;
1140 1003
1141 ret = pp_check(hwmgr); 1004 if (!hwmgr || !hwmgr->pm_en)
1142 1005 return -EINVAL;
1143 if (ret)
1144 return ret;
1145 1006
1146 mutex_lock(&hwmgr->smu_lock); 1007 mutex_lock(&hwmgr->smu_lock);
1147 phm_store_dal_configuration_data(hwmgr, display_config); 1008 phm_store_dal_configuration_data(hwmgr, display_config);
@@ -1155,12 +1016,7 @@ static int pp_get_display_power_level(void *handle,
1155 struct pp_hwmgr *hwmgr = handle; 1016 struct pp_hwmgr *hwmgr = handle;
1156 int ret = 0; 1017 int ret = 0;
1157 1018
1158 ret = pp_check(hwmgr); 1019 if (!hwmgr || !hwmgr->pm_en ||!output)
1159
1160 if (ret)
1161 return ret;
1162
1163 if (output == NULL)
1164 return -EINVAL; 1020 return -EINVAL;
1165 1021
1166 mutex_lock(&hwmgr->smu_lock); 1022 mutex_lock(&hwmgr->smu_lock);
@@ -1177,10 +1033,8 @@ static int pp_get_current_clocks(void *handle,
1177 struct pp_hwmgr *hwmgr = handle; 1033 struct pp_hwmgr *hwmgr = handle;
1178 int ret = 0; 1034 int ret = 0;
1179 1035
1180 ret = pp_check(hwmgr); 1036 if (!hwmgr || !hwmgr->pm_en)
1181 1037 return -EINVAL;
1182 if (ret)
1183 return ret;
1184 1038
1185 mutex_lock(&hwmgr->smu_lock); 1039 mutex_lock(&hwmgr->smu_lock);
1186 1040
@@ -1225,10 +1079,8 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
1225 struct pp_hwmgr *hwmgr = handle; 1079 struct pp_hwmgr *hwmgr = handle;
1226 int ret = 0; 1080 int ret = 0;
1227 1081
1228 ret = pp_check(hwmgr); 1082 if (!hwmgr || !hwmgr->pm_en)
1229 1083 return -EINVAL;
1230 if (ret)
1231 return ret;
1232 1084
1233 if (clocks == NULL) 1085 if (clocks == NULL)
1234 return -EINVAL; 1086 return -EINVAL;
@@ -1246,11 +1098,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
1246 struct pp_hwmgr *hwmgr = handle; 1098 struct pp_hwmgr *hwmgr = handle;
1247 int ret = 0; 1099 int ret = 0;
1248 1100
1249 ret = pp_check(hwmgr); 1101 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1250 if (ret)
1251 return ret;
1252
1253 if (!clocks)
1254 return -EINVAL; 1102 return -EINVAL;
1255 1103
1256 mutex_lock(&hwmgr->smu_lock); 1104 mutex_lock(&hwmgr->smu_lock);
@@ -1266,11 +1114,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
1266 struct pp_hwmgr *hwmgr = handle; 1114 struct pp_hwmgr *hwmgr = handle;
1267 int ret = 0; 1115 int ret = 0;
1268 1116
1269 ret = pp_check(hwmgr); 1117 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1270 if (ret)
1271 return ret;
1272
1273 if (!clocks)
1274 return -EINVAL; 1118 return -EINVAL;
1275 1119
1276 mutex_lock(&hwmgr->smu_lock); 1120 mutex_lock(&hwmgr->smu_lock);
@@ -1287,11 +1131,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
1287 struct pp_hwmgr *hwmgr = handle; 1131 struct pp_hwmgr *hwmgr = handle;
1288 int ret = 0; 1132 int ret = 0;
1289 1133
1290 ret = pp_check(hwmgr); 1134 if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
1291 if (ret)
1292 return ret;
1293
1294 if (!wm_with_clock_ranges)
1295 return -EINVAL; 1135 return -EINVAL;
1296 1136
1297 mutex_lock(&hwmgr->smu_lock); 1137 mutex_lock(&hwmgr->smu_lock);
@@ -1308,11 +1148,7 @@ static int pp_display_clock_voltage_request(void *handle,
1308 struct pp_hwmgr *hwmgr = handle; 1148 struct pp_hwmgr *hwmgr = handle;
1309 int ret = 0; 1149 int ret = 0;
1310 1150
1311 ret = pp_check(hwmgr); 1151 if (!hwmgr || !hwmgr->pm_en ||!clock)
1312 if (ret)
1313 return ret;
1314
1315 if (!clock)
1316 return -EINVAL; 1152 return -EINVAL;
1317 1153
1318 mutex_lock(&hwmgr->smu_lock); 1154 mutex_lock(&hwmgr->smu_lock);
@@ -1328,12 +1164,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
1328 struct pp_hwmgr *hwmgr = handle; 1164 struct pp_hwmgr *hwmgr = handle;
1329 int ret = 0; 1165 int ret = 0;
1330 1166
1331 ret = pp_check(hwmgr); 1167 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1332
1333 if (ret)
1334 return ret;
1335
1336 if (clocks == NULL)
1337 return -EINVAL; 1168 return -EINVAL;
1338 1169
1339 mutex_lock(&hwmgr->smu_lock); 1170 mutex_lock(&hwmgr->smu_lock);
@@ -1348,12 +1179,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
1348static int pp_set_mmhub_powergating_by_smu(void *handle) 1179static int pp_set_mmhub_powergating_by_smu(void *handle)
1349{ 1180{
1350 struct pp_hwmgr *hwmgr = handle; 1181 struct pp_hwmgr *hwmgr = handle;
1351 int ret = 0;
1352 1182
1353 ret = pp_check(hwmgr); 1183 if (!hwmgr || !hwmgr->pm_en)
1354 1184 return -EINVAL;
1355 if (ret)
1356 return ret;
1357 1185
1358 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { 1186 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1359 pr_info("%s was not implemented.\n", __func__); 1187 pr_info("%s was not implemented.\n", __func__);
@@ -1390,7 +1218,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
1390 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1218 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1391 .switch_power_profile = pp_dpm_switch_power_profile, 1219 .switch_power_profile = pp_dpm_switch_power_profile,
1392 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1220 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1393 .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1394 .get_power_profile_mode = pp_get_power_profile_mode, 1221 .get_power_profile_mode = pp_get_power_profile_mode,
1395 .set_power_profile_mode = pp_set_power_profile_mode, 1222 .set_power_profile_mode = pp_set_power_profile_mode,
1396 .odn_edit_dpm_table = pp_odn_edit_dpm_table, 1223 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index ae2e9339dd6b..e411012b3dcb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -75,8 +75,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
75 75
76int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) 76int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
77{ 77{
78 int ret = 1; 78 int ret = -EINVAL;;
79 bool enabled;
80 PHM_FUNC_CHECK(hwmgr); 79 PHM_FUNC_CHECK(hwmgr);
81 80
82 if (smum_is_dpm_running(hwmgr)) { 81 if (smum_is_dpm_running(hwmgr)) {
@@ -87,17 +86,12 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
87 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 86 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
88 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 87 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
89 88
90 enabled = ret == 0;
91
92 cgs_notify_dpm_enabled(hwmgr->device, enabled);
93
94 return ret; 89 return ret;
95} 90}
96 91
97int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) 92int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
98{ 93{
99 int ret = -1; 94 int ret = -EINVAL;
100 bool enabled;
101 95
102 PHM_FUNC_CHECK(hwmgr); 96 PHM_FUNC_CHECK(hwmgr);
103 97
@@ -109,10 +103,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
109 if (hwmgr->hwmgr_func->dynamic_state_management_disable) 103 if (hwmgr->hwmgr_func->dynamic_state_management_disable)
110 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); 104 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
111 105
112 enabled = ret == 0 ? false : true;
113
114 cgs_notify_dpm_enabled(hwmgr->device, enabled);
115
116 return ret; 106 return ret;
117} 107}
118 108
@@ -275,13 +265,11 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
275 if (display_config == NULL) 265 if (display_config == NULL)
276 return -EINVAL; 266 return -EINVAL;
277 267
278 hwmgr->display_config = *display_config;
279
280 if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) 268 if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
281 hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk); 269 hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
282 270
283 for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) { 271 for (index = 0; index < display_config->num_path_including_non_display; index++) {
284 if (hwmgr->display_config.displays[index].controller_id != 0) 272 if (display_config->displays[index].controller_id != 0)
285 number_of_active_display++; 273 number_of_active_display++;
286 } 274 }
287 275
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 42982055b161..71b42331f185 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs;
40extern const struct pp_smumgr_func tonga_smu_funcs; 40extern const struct pp_smumgr_func tonga_smu_funcs;
41extern const struct pp_smumgr_func fiji_smu_funcs; 41extern const struct pp_smumgr_func fiji_smu_funcs;
42extern const struct pp_smumgr_func polaris10_smu_funcs; 42extern const struct pp_smumgr_func polaris10_smu_funcs;
43extern const struct pp_smumgr_func vegam_smu_funcs;
43extern const struct pp_smumgr_func vega10_smu_funcs; 44extern const struct pp_smumgr_func vega10_smu_funcs;
44extern const struct pp_smumgr_func vega12_smu_funcs; 45extern const struct pp_smumgr_func vega12_smu_funcs;
45extern const struct pp_smumgr_func smu10_smu_funcs; 46extern const struct pp_smumgr_func smu10_smu_funcs;
@@ -76,7 +77,7 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
76 77
77int hwmgr_early_init(struct pp_hwmgr *hwmgr) 78int hwmgr_early_init(struct pp_hwmgr *hwmgr)
78{ 79{
79 if (hwmgr == NULL) 80 if (!hwmgr)
80 return -EINVAL; 81 return -EINVAL;
81 82
82 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 83 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
@@ -95,7 +96,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
95 hwmgr->smumgr_funcs = &ci_smu_funcs; 96 hwmgr->smumgr_funcs = &ci_smu_funcs;
96 ci_set_asic_special_caps(hwmgr); 97 ci_set_asic_special_caps(hwmgr);
97 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | 98 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
98 PP_ENABLE_GFX_CG_THRU_SMU); 99 PP_ENABLE_GFX_CG_THRU_SMU |
100 PP_GFXOFF_MASK);
99 hwmgr->pp_table_version = PP_TABLE_V0; 101 hwmgr->pp_table_version = PP_TABLE_V0;
100 hwmgr->od_enabled = false; 102 hwmgr->od_enabled = false;
101 smu7_init_function_pointers(hwmgr); 103 smu7_init_function_pointers(hwmgr);
@@ -103,9 +105,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
103 case AMDGPU_FAMILY_CZ: 105 case AMDGPU_FAMILY_CZ:
104 hwmgr->od_enabled = false; 106 hwmgr->od_enabled = false;
105 hwmgr->smumgr_funcs = &smu8_smu_funcs; 107 hwmgr->smumgr_funcs = &smu8_smu_funcs;
108 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
106 smu8_init_function_pointers(hwmgr); 109 smu8_init_function_pointers(hwmgr);
107 break; 110 break;
108 case AMDGPU_FAMILY_VI: 111 case AMDGPU_FAMILY_VI:
112 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
109 switch (hwmgr->chip_id) { 113 switch (hwmgr->chip_id) {
110 case CHIP_TOPAZ: 114 case CHIP_TOPAZ:
111 hwmgr->smumgr_funcs = &iceland_smu_funcs; 115 hwmgr->smumgr_funcs = &iceland_smu_funcs;
@@ -133,12 +137,18 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
133 polaris_set_asic_special_caps(hwmgr); 137 polaris_set_asic_special_caps(hwmgr);
134 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); 138 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
135 break; 139 break;
140 case CHIP_VEGAM:
141 hwmgr->smumgr_funcs = &vegam_smu_funcs;
142 polaris_set_asic_special_caps(hwmgr);
143 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
144 break;
136 default: 145 default:
137 return -EINVAL; 146 return -EINVAL;
138 } 147 }
139 smu7_init_function_pointers(hwmgr); 148 smu7_init_function_pointers(hwmgr);
140 break; 149 break;
141 case AMDGPU_FAMILY_AI: 150 case AMDGPU_FAMILY_AI:
151 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
142 switch (hwmgr->chip_id) { 152 switch (hwmgr->chip_id) {
143 case CHIP_VEGA10: 153 case CHIP_VEGA10:
144 hwmgr->smumgr_funcs = &vega10_smu_funcs; 154 hwmgr->smumgr_funcs = &vega10_smu_funcs;
@@ -170,22 +180,58 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
170 return 0; 180 return 0;
171} 181}
172 182
183int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
184{
185 if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
186 return -EINVAL;
187
188 phm_register_irq_handlers(hwmgr);
189
190 return hwmgr->smumgr_funcs->smu_init(hwmgr);
191}
192
193
194int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
195{
196 if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
197 hwmgr->smumgr_funcs->smu_fini(hwmgr);
198
199 return 0;
200}
201
173int hwmgr_hw_init(struct pp_hwmgr *hwmgr) 202int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
174{ 203{
175 int ret = 0; 204 int ret = 0;
176 205
177 if (hwmgr == NULL) 206 if (!hwmgr || !hwmgr->smumgr_funcs)
178 return -EINVAL; 207 return -EINVAL;
179 208
180 if (hwmgr->pptable_func == NULL || 209 if (hwmgr->smumgr_funcs->start_smu) {
181 hwmgr->pptable_func->pptable_init == NULL || 210 ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
182 hwmgr->hwmgr_func->backend_init == NULL) 211 if (ret) {
183 return -EINVAL; 212 pr_err("smc start failed\n");
213 return -EINVAL;
214 }
215 }
216
217 if (!hwmgr->pm_en)
218 return 0;
219
220 if (!hwmgr->pptable_func ||
221 !hwmgr->pptable_func->pptable_init ||
222 !hwmgr->hwmgr_func->backend_init) {
223 hwmgr->pm_en = false;
224 pr_info("dpm not supported \n");
225 return 0;
226 }
184 227
185 ret = hwmgr->pptable_func->pptable_init(hwmgr); 228 ret = hwmgr->pptable_func->pptable_init(hwmgr);
186 if (ret) 229 if (ret)
187 goto err; 230 goto err;
188 231
232 ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
233 hwmgr->thermal_controller.fanInfo.bNoFan;
234
189 ret = hwmgr->hwmgr_func->backend_init(hwmgr); 235 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
190 if (ret) 236 if (ret)
191 goto err1; 237 goto err1;
@@ -206,6 +252,8 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
206 if (ret) 252 if (ret)
207 goto err2; 253 goto err2;
208 254
255 ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
256
209 return 0; 257 return 0;
210err2: 258err2:
211 if (hwmgr->hwmgr_func->backend_fini) 259 if (hwmgr->hwmgr_func->backend_fini)
@@ -214,14 +262,13 @@ err1:
214 if (hwmgr->pptable_func->pptable_fini) 262 if (hwmgr->pptable_func->pptable_fini)
215 hwmgr->pptable_func->pptable_fini(hwmgr); 263 hwmgr->pptable_func->pptable_fini(hwmgr);
216err: 264err:
217 pr_err("amdgpu: powerplay initialization failed\n");
218 return ret; 265 return ret;
219} 266}
220 267
221int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) 268int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
222{ 269{
223 if (hwmgr == NULL) 270 if (!hwmgr || !hwmgr->pm_en)
224 return -EINVAL; 271 return 0;
225 272
226 phm_stop_thermal_controller(hwmgr); 273 phm_stop_thermal_controller(hwmgr);
227 psm_set_boot_states(hwmgr); 274 psm_set_boot_states(hwmgr);
@@ -236,12 +283,12 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
236 return psm_fini_power_state_table(hwmgr); 283 return psm_fini_power_state_table(hwmgr);
237} 284}
238 285
239int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) 286int hwmgr_suspend(struct pp_hwmgr *hwmgr)
240{ 287{
241 int ret = 0; 288 int ret = 0;
242 289
243 if (hwmgr == NULL) 290 if (!hwmgr || !hwmgr->pm_en)
244 return -EINVAL; 291 return 0;
245 292
246 phm_disable_smc_firmware_ctf(hwmgr); 293 phm_disable_smc_firmware_ctf(hwmgr);
247 ret = psm_set_boot_states(hwmgr); 294 ret = psm_set_boot_states(hwmgr);
@@ -255,13 +302,23 @@ int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
255 return ret; 302 return ret;
256} 303}
257 304
258int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) 305int hwmgr_resume(struct pp_hwmgr *hwmgr)
259{ 306{
260 int ret = 0; 307 int ret = 0;
261 308
262 if (hwmgr == NULL) 309 if (!hwmgr)
263 return -EINVAL; 310 return -EINVAL;
264 311
312 if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
313 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
314 pr_err("smc start failed\n");
315 return -EINVAL;
316 }
317 }
318
319 if (!hwmgr->pm_en)
320 return 0;
321
265 ret = phm_setup_asic(hwmgr); 322 ret = phm_setup_asic(hwmgr);
266 if (ret) 323 if (ret)
267 return ret; 324 return ret;
@@ -270,9 +327,6 @@ int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
270 if (ret) 327 if (ret)
271 return ret; 328 return ret;
272 ret = phm_start_thermal_controller(hwmgr); 329 ret = phm_start_thermal_controller(hwmgr);
273 if (ret)
274 return ret;
275
276 ret |= psm_set_performance_states(hwmgr); 330 ret |= psm_set_performance_states(hwmgr);
277 if (ret) 331 if (ret)
278 return ret; 332 return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 0f2851b5b368..308bff2b5d1d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -46,7 +46,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
46 sizeof(struct pp_power_state); 46 sizeof(struct pp_power_state);
47 47
48 if (table_entries == 0 || size == 0) { 48 if (table_entries == 0 || size == 0) {
49 pr_warn("Please check whether power state management is suppported on this asic\n"); 49 pr_warn("Please check whether power state management is supported on this asic\n");
50 return 0; 50 return 0;
51 } 51 }
52 52
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index c6febbf0bf69..cf99c5eaf080 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -23,7 +23,8 @@
23#include "pp_debug.h" 23#include "pp_debug.h"
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26#include <linux/delay.h>
27#include "atom.h"
27#include "ppatomctrl.h" 28#include "ppatomctrl.h"
28#include "atombios.h" 29#include "atombios.h"
29#include "cgs_common.h" 30#include "cgs_common.h"
@@ -128,7 +129,6 @@ static int atomctrl_set_mc_reg_address_table(
128 return 0; 129 return 0;
129} 130}
130 131
131
132int atomctrl_initialize_mc_reg_table( 132int atomctrl_initialize_mc_reg_table(
133 struct pp_hwmgr *hwmgr, 133 struct pp_hwmgr *hwmgr,
134 uint8_t module_index, 134 uint8_t module_index,
@@ -141,7 +141,7 @@ int atomctrl_initialize_mc_reg_table(
141 u16 size; 141 u16 size;
142 142
143 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) 143 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
144 cgs_atom_get_data_table(hwmgr->device, 144 smu_atom_get_data_table(hwmgr->adev,
145 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); 145 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
146 146
147 if (module_index >= vram_info->ucNumOfVRAMModule) { 147 if (module_index >= vram_info->ucNumOfVRAMModule) {
@@ -174,6 +174,8 @@ int atomctrl_set_engine_dram_timings_rv770(
174 uint32_t engine_clock, 174 uint32_t engine_clock,
175 uint32_t memory_clock) 175 uint32_t memory_clock)
176{ 176{
177 struct amdgpu_device *adev = hwmgr->adev;
178
177 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; 179 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
178 180
179 /* They are both in 10KHz Units. */ 181 /* They are both in 10KHz Units. */
@@ -184,9 +186,10 @@ int atomctrl_set_engine_dram_timings_rv770(
184 /* in 10 khz units.*/ 186 /* in 10 khz units.*/
185 engine_clock_parameters.sReserved.ulClock = 187 engine_clock_parameters.sReserved.ulClock =
186 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); 188 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
187 return cgs_atom_exec_cmd_table(hwmgr->device, 189
190 return amdgpu_atom_execute_table(adev->mode_info.atom_context,
188 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 191 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
189 &engine_clock_parameters); 192 (uint32_t *)&engine_clock_parameters);
190} 193}
191 194
192/** 195/**
@@ -203,7 +206,7 @@ static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
203 union voltage_object_info *voltage_info; 206 union voltage_object_info *voltage_info;
204 207
205 voltage_info = (union voltage_object_info *) 208 voltage_info = (union voltage_object_info *)
206 cgs_atom_get_data_table(device, index, 209 smu_atom_get_data_table(device, index,
207 &size, &frev, &crev); 210 &size, &frev, &crev);
208 211
209 if (voltage_info != NULL) 212 if (voltage_info != NULL)
@@ -247,16 +250,16 @@ int atomctrl_get_memory_pll_dividers_si(
247 pp_atomctrl_memory_clock_param *mpll_param, 250 pp_atomctrl_memory_clock_param *mpll_param,
248 bool strobe_mode) 251 bool strobe_mode)
249{ 252{
253 struct amdgpu_device *adev = hwmgr->adev;
250 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; 254 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
251 int result; 255 int result;
252 256
253 mpll_parameters.ulClock = cpu_to_le32(clock_value); 257 mpll_parameters.ulClock = cpu_to_le32(clock_value);
254 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); 258 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
255 259
256 result = cgs_atom_exec_cmd_table 260 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
257 (hwmgr->device,
258 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 261 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
259 &mpll_parameters); 262 (uint32_t *)&mpll_parameters);
260 263
261 if (0 == result) { 264 if (0 == result) {
262 mpll_param->mpll_fb_divider.clk_frac = 265 mpll_param->mpll_fb_divider.clk_frac =
@@ -295,14 +298,15 @@ int atomctrl_get_memory_pll_dividers_si(
295int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, 298int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
296 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) 299 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
297{ 300{
301 struct amdgpu_device *adev = hwmgr->adev;
298 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; 302 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
299 int result; 303 int result;
300 304
301 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); 305 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
302 306
303 result = cgs_atom_exec_cmd_table(hwmgr->device, 307 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
304 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 308 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
305 &mpll_parameters); 309 (uint32_t *)&mpll_parameters);
306 310
307 if (!result) 311 if (!result)
308 mpll_param->mpll_post_divider = 312 mpll_param->mpll_post_divider =
@@ -311,19 +315,49 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
311 return result; 315 return result;
312} 316}
313 317
318int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
319 uint32_t clock_value,
320 pp_atomctrl_memory_clock_param_ai *mpll_param)
321{
322 struct amdgpu_device *adev = hwmgr->adev;
323 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
324 int result;
325
326 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
327
328 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
329 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
330 (uint32_t *)&mpll_parameters);
331
332 /* VEGAM's mpll takes sometime to finish computing */
333 udelay(10);
334
335 if (!result) {
336 mpll_param->ulMclk_fcw_int =
337 le16_to_cpu(mpll_parameters.usMclk_fcw_int);
338 mpll_param->ulMclk_fcw_frac =
339 le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
340 mpll_param->ulClock =
341 le32_to_cpu(mpll_parameters.ulClock.ulClock);
342 mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
343 }
344
345 return result;
346}
347
314int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, 348int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
315 uint32_t clock_value, 349 uint32_t clock_value,
316 pp_atomctrl_clock_dividers_kong *dividers) 350 pp_atomctrl_clock_dividers_kong *dividers)
317{ 351{
352 struct amdgpu_device *adev = hwmgr->adev;
318 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; 353 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
319 int result; 354 int result;
320 355
321 pll_parameters.ulClock = cpu_to_le32(clock_value); 356 pll_parameters.ulClock = cpu_to_le32(clock_value);
322 357
323 result = cgs_atom_exec_cmd_table 358 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
324 (hwmgr->device,
325 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 359 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
326 &pll_parameters); 360 (uint32_t *)&pll_parameters);
327 361
328 if (0 == result) { 362 if (0 == result) {
329 dividers->pll_post_divider = pll_parameters.ucPostDiv; 363 dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -338,16 +372,16 @@ int atomctrl_get_engine_pll_dividers_vi(
338 uint32_t clock_value, 372 uint32_t clock_value,
339 pp_atomctrl_clock_dividers_vi *dividers) 373 pp_atomctrl_clock_dividers_vi *dividers)
340{ 374{
375 struct amdgpu_device *adev = hwmgr->adev;
341 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 376 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
342 int result; 377 int result;
343 378
344 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); 379 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
345 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 380 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
346 381
347 result = cgs_atom_exec_cmd_table 382 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
348 (hwmgr->device,
349 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 383 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
350 &pll_patameters); 384 (uint32_t *)&pll_patameters);
351 385
352 if (0 == result) { 386 if (0 == result) {
353 dividers->pll_post_divider = 387 dividers->pll_post_divider =
@@ -375,16 +409,16 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
375 uint32_t clock_value, 409 uint32_t clock_value,
376 pp_atomctrl_clock_dividers_ai *dividers) 410 pp_atomctrl_clock_dividers_ai *dividers)
377{ 411{
412 struct amdgpu_device *adev = hwmgr->adev;
378 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; 413 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
379 int result; 414 int result;
380 415
381 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); 416 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
382 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 417 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
383 418
384 result = cgs_atom_exec_cmd_table 419 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
385 (hwmgr->device,
386 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 420 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
387 &pll_patameters); 421 (uint32_t *)&pll_patameters);
388 422
389 if (0 == result) { 423 if (0 == result) {
390 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); 424 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -407,6 +441,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
407 uint32_t clock_value, 441 uint32_t clock_value,
408 pp_atomctrl_clock_dividers_vi *dividers) 442 pp_atomctrl_clock_dividers_vi *dividers)
409{ 443{
444 struct amdgpu_device *adev = hwmgr->adev;
410 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 445 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
411 int result; 446 int result;
412 447
@@ -414,10 +449,9 @@ int atomctrl_get_dfs_pll_dividers_vi(
414 pll_patameters.ulClock.ucPostDiv = 449 pll_patameters.ulClock.ucPostDiv =
415 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; 450 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
416 451
417 result = cgs_atom_exec_cmd_table 452 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
418 (hwmgr->device,
419 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 453 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
420 &pll_patameters); 454 (uint32_t *)&pll_patameters);
421 455
422 if (0 == result) { 456 if (0 == result) {
423 dividers->pll_post_divider = 457 dividers->pll_post_divider =
@@ -452,7 +486,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
452 uint32_t clock; 486 uint32_t clock;
453 487
454 fw_info = (ATOM_FIRMWARE_INFO *) 488 fw_info = (ATOM_FIRMWARE_INFO *)
455 cgs_atom_get_data_table(hwmgr->device, 489 smu_atom_get_data_table(hwmgr->adev,
456 GetIndexIntoMasterTable(DATA, FirmwareInfo), 490 GetIndexIntoMasterTable(DATA, FirmwareInfo),
457 &size, &frev, &crev); 491 &size, &frev, &crev);
458 492
@@ -476,7 +510,7 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
476 uint8_t voltage_mode) 510 uint8_t voltage_mode)
477{ 511{
478 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = 512 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
479 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); 513 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
480 bool ret; 514 bool ret;
481 515
482 PP_ASSERT_WITH_CODE((NULL != voltage_info), 516 PP_ASSERT_WITH_CODE((NULL != voltage_info),
@@ -495,7 +529,7 @@ int atomctrl_get_voltage_table_v3(
495 pp_atomctrl_voltage_table *voltage_table) 529 pp_atomctrl_voltage_table *voltage_table)
496{ 530{
497 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = 531 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
498 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); 532 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
499 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; 533 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
500 unsigned int i; 534 unsigned int i;
501 535
@@ -572,7 +606,7 @@ static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
572 void *table_address; 606 void *table_address;
573 607
574 table_address = (ATOM_GPIO_PIN_LUT *) 608 table_address = (ATOM_GPIO_PIN_LUT *)
575 cgs_atom_get_data_table(device, 609 smu_atom_get_data_table(device,
576 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT), 610 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
577 &size, &frev, &crev); 611 &size, &frev, &crev);
578 612
@@ -592,7 +626,7 @@ bool atomctrl_get_pp_assign_pin(
592{ 626{
593 bool bRet = false; 627 bool bRet = false;
594 ATOM_GPIO_PIN_LUT *gpio_lookup_table = 628 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
595 get_gpio_lookup_table(hwmgr->device); 629 get_gpio_lookup_table(hwmgr->adev);
596 630
597 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), 631 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
598 "Could not find GPIO lookup Table in BIOS.", return false); 632 "Could not find GPIO lookup Table in BIOS.", return false);
@@ -613,7 +647,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
613 bool debug) 647 bool debug)
614{ 648{
615 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; 649 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
616 650 struct amdgpu_device *adev = hwmgr->adev;
617 EFUSE_LINEAR_FUNC_PARAM sRO_fuse; 651 EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
618 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; 652 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
619 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; 653 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
@@ -640,7 +674,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
640 int result; 674 int result;
641 675
642 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) 676 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
643 cgs_atom_get_data_table(hwmgr->device, 677 smu_atom_get_data_table(hwmgr->adev,
644 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), 678 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
645 NULL, NULL, NULL); 679 NULL, NULL, NULL);
646 680
@@ -706,9 +740,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
706 740
707 sOutput_FuseValues.sEfuse = sInput_FuseValues; 741 sOutput_FuseValues.sEfuse = sInput_FuseValues;
708 742
709 result = cgs_atom_exec_cmd_table(hwmgr->device, 743 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
710 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 744 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
711 &sOutput_FuseValues); 745 (uint32_t *)&sOutput_FuseValues);
712 746
713 if (result) 747 if (result)
714 return result; 748 return result;
@@ -727,9 +761,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
727 761
728 sOutput_FuseValues.sEfuse = sInput_FuseValues; 762 sOutput_FuseValues.sEfuse = sInput_FuseValues;
729 763
730 result = cgs_atom_exec_cmd_table(hwmgr->device, 764 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
731 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 765 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
732 &sOutput_FuseValues); 766 (uint32_t *)&sOutput_FuseValues);
733 767
734 if (result) 768 if (result)
735 return result; 769 return result;
@@ -747,9 +781,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
747 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; 781 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
748 sOutput_FuseValues.sEfuse = sInput_FuseValues; 782 sOutput_FuseValues.sEfuse = sInput_FuseValues;
749 783
750 result = cgs_atom_exec_cmd_table(hwmgr->device, 784 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
751 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 785 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
752 &sOutput_FuseValues); 786 (uint32_t *)&sOutput_FuseValues);
753 787
754 if (result) 788 if (result)
755 return result; 789 return result;
@@ -768,9 +802,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
768 802
769 sOutput_FuseValues.sEfuse = sInput_FuseValues; 803 sOutput_FuseValues.sEfuse = sInput_FuseValues;
770 804
771 result = cgs_atom_exec_cmd_table(hwmgr->device, 805 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
772 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 806 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
773 &sOutput_FuseValues); 807 (uint32_t *)&sOutput_FuseValues);
774 808
775 if (result) 809 if (result)
776 return result; 810 return result;
@@ -790,9 +824,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
790 824
791 sOutput_FuseValues.sEfuse = sInput_FuseValues; 825 sOutput_FuseValues.sEfuse = sInput_FuseValues;
792 826
793 result = cgs_atom_exec_cmd_table(hwmgr->device, 827 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
794 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 828 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
795 &sOutput_FuseValues); 829 (uint32_t *)&sOutput_FuseValues);
796 if (result) 830 if (result)
797 return result; 831 return result;
798 832
@@ -811,9 +845,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
811 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; 845 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
812 sOutput_FuseValues.sEfuse = sInput_FuseValues; 846 sOutput_FuseValues.sEfuse = sInput_FuseValues;
813 847
814 result = cgs_atom_exec_cmd_table(hwmgr->device, 848 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
815 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 849 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
816 &sOutput_FuseValues); 850 (uint32_t *)&sOutput_FuseValues);
817 851
818 if (result) 852 if (result)
819 return result; 853 return result;
@@ -842,9 +876,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
842 876
843 sOutput_FuseValues.sEfuse = sInput_FuseValues; 877 sOutput_FuseValues.sEfuse = sInput_FuseValues;
844 878
845 result = cgs_atom_exec_cmd_table(hwmgr->device, 879 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
846 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 880 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
847 &sOutput_FuseValues); 881 (uint32_t *)&sOutput_FuseValues);
848 882
849 if (result) 883 if (result)
850 return result; 884 return result;
@@ -1053,8 +1087,9 @@ int atomctrl_get_voltage_evv_on_sclk(
1053 uint32_t sclk, uint16_t virtual_voltage_Id, 1087 uint32_t sclk, uint16_t virtual_voltage_Id,
1054 uint16_t *voltage) 1088 uint16_t *voltage)
1055{ 1089{
1056 int result; 1090 struct amdgpu_device *adev = hwmgr->adev;
1057 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; 1091 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1092 int result;
1058 1093
1059 get_voltage_info_param_space.ucVoltageType = 1094 get_voltage_info_param_space.ucVoltageType =
1060 voltage_type; 1095 voltage_type;
@@ -1065,9 +1100,9 @@ int atomctrl_get_voltage_evv_on_sclk(
1065 get_voltage_info_param_space.ulSCLKFreq = 1100 get_voltage_info_param_space.ulSCLKFreq =
1066 cpu_to_le32(sclk); 1101 cpu_to_le32(sclk);
1067 1102
1068 result = cgs_atom_exec_cmd_table(hwmgr->device, 1103 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1069 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1104 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1070 &get_voltage_info_param_space); 1105 (uint32_t *)&get_voltage_info_param_space);
1071 1106
1072 if (0 != result) 1107 if (0 != result)
1073 return result; 1108 return result;
@@ -1088,9 +1123,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1088 uint16_t virtual_voltage_id, 1123 uint16_t virtual_voltage_id,
1089 uint16_t *voltage) 1124 uint16_t *voltage)
1090{ 1125{
1126 struct amdgpu_device *adev = hwmgr->adev;
1127 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1091 int result; 1128 int result;
1092 int entry_id; 1129 int entry_id;
1093 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1094 1130
1095 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ 1131 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1096 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { 1132 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
@@ -1111,9 +1147,9 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1111 get_voltage_info_param_space.ulSCLKFreq = 1147 get_voltage_info_param_space.ulSCLKFreq =
1112 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); 1148 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1113 1149
1114 result = cgs_atom_exec_cmd_table(hwmgr->device, 1150 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1115 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1151 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1116 &get_voltage_info_param_space); 1152 (uint32_t *)&get_voltage_info_param_space);
1117 1153
1118 if (0 != result) 1154 if (0 != result)
1119 return result; 1155 return result;
@@ -1135,7 +1171,7 @@ uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1135 u16 size; 1171 u16 size;
1136 1172
1137 fw_info = (ATOM_COMMON_TABLE_HEADER *) 1173 fw_info = (ATOM_COMMON_TABLE_HEADER *)
1138 cgs_atom_get_data_table(hwmgr->device, 1174 smu_atom_get_data_table(hwmgr->adev,
1139 GetIndexIntoMasterTable(DATA, FirmwareInfo), 1175 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1140 &size, &frev, &crev); 1176 &size, &frev, &crev);
1141 1177
@@ -1167,7 +1203,7 @@ static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1167 u16 size; 1203 u16 size;
1168 1204
1169 table = (ATOM_ASIC_INTERNAL_SS_INFO *) 1205 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1170 cgs_atom_get_data_table(device, 1206 smu_atom_get_data_table(device,
1171 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info), 1207 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1172 &size, &frev, &crev); 1208 &size, &frev, &crev);
1173 1209
@@ -1188,7 +1224,7 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1188 1224
1189 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info)); 1225 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1190 1226
1191 table = asic_internal_ss_get_ss_table(hwmgr->device); 1227 table = asic_internal_ss_get_ss_table(hwmgr->adev);
1192 1228
1193 if (NULL == table) 1229 if (NULL == table)
1194 return -1; 1230 return -1;
@@ -1260,9 +1296,10 @@ int atomctrl_get_engine_clock_spread_spectrum(
1260 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo); 1296 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1261} 1297}
1262 1298
1263int atomctrl_read_efuse(void *device, uint16_t start_index, 1299int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1264 uint16_t end_index, uint32_t mask, uint32_t *efuse) 1300 uint16_t end_index, uint32_t mask, uint32_t *efuse)
1265{ 1301{
1302 struct amdgpu_device *adev = hwmgr->adev;
1266 int result; 1303 int result;
1267 READ_EFUSE_VALUE_PARAMETER efuse_param; 1304 READ_EFUSE_VALUE_PARAMETER efuse_param;
1268 1305
@@ -1272,9 +1309,9 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
1272 efuse_param.sEfuse.ucBitLength = (uint8_t) 1309 efuse_param.sEfuse.ucBitLength = (uint8_t)
1273 ((end_index - start_index) + 1); 1310 ((end_index - start_index) + 1);
1274 1311
1275 result = cgs_atom_exec_cmd_table(device, 1312 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1276 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 1313 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1277 &efuse_param); 1314 (uint32_t *)&efuse_param);
1278 if (!result) 1315 if (!result)
1279 *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; 1316 *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1280 1317
@@ -1284,6 +1321,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
1284int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, 1321int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1285 uint8_t level) 1322 uint8_t level)
1286{ 1323{
1324 struct amdgpu_device *adev = hwmgr->adev;
1287 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; 1325 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1288 int result; 1326 int result;
1289 1327
@@ -1293,10 +1331,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1293 ADJUST_MC_SETTING_PARAM; 1331 ADJUST_MC_SETTING_PARAM;
1294 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; 1332 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1295 1333
1296 result = cgs_atom_exec_cmd_table 1334 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1297 (hwmgr->device,
1298 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 1335 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1299 &memory_clock_parameters); 1336 (uint32_t *)&memory_clock_parameters);
1300 1337
1301 return result; 1338 return result;
1302} 1339}
@@ -1304,7 +1341,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1304int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 1341int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1305 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) 1342 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1306{ 1343{
1307 1344 struct amdgpu_device *adev = hwmgr->adev;
1308 int result; 1345 int result;
1309 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space; 1346 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1310 1347
@@ -1313,9 +1350,9 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1313 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); 1350 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1314 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); 1351 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1315 1352
1316 result = cgs_atom_exec_cmd_table(hwmgr->device, 1353 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1317 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1354 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1318 &get_voltage_info_param_space); 1355 (uint32_t *)&get_voltage_info_param_space);
1319 1356
1320 if (0 != result) 1357 if (0 != result)
1321 return result; 1358 return result;
@@ -1334,7 +1371,7 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
1334 u16 size; 1371 u16 size;
1335 1372
1336 ATOM_SMU_INFO_V2_1 *psmu_info = 1373 ATOM_SMU_INFO_V2_1 *psmu_info =
1337 (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device, 1374 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1338 GetIndexIntoMasterTable(DATA, SMU_Info), 1375 GetIndexIntoMasterTable(DATA, SMU_Info),
1339 &size, &frev, &crev); 1376 &size, &frev, &crev);
1340 1377
@@ -1362,7 +1399,7 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1362 return -EINVAL; 1399 return -EINVAL;
1363 1400
1364 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) 1401 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1365 cgs_atom_get_data_table(hwmgr->device, 1402 smu_atom_get_data_table(hwmgr->adev,
1366 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), 1403 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1367 NULL, NULL, NULL); 1404 NULL, NULL, NULL);
1368 if (!profile) 1405 if (!profile)
@@ -1402,7 +1439,7 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1402 uint16_t *load_line) 1439 uint16_t *load_line)
1403{ 1440{
1404 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = 1441 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1405 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); 1442 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1406 1443
1407 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; 1444 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1408 1445
@@ -1421,16 +1458,17 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1421 1458
1422int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) 1459int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1423{ 1460{
1424 int result; 1461 struct amdgpu_device *adev = hwmgr->adev;
1425 SET_VOLTAGE_PS_ALLOCATION allocation; 1462 SET_VOLTAGE_PS_ALLOCATION allocation;
1426 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = 1463 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1427 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; 1464 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1465 int result;
1428 1466
1429 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID; 1467 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1430 1468
1431 result = cgs_atom_exec_cmd_table(hwmgr->device, 1469 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1432 GetIndexIntoMasterTable(COMMAND, SetVoltage), 1470 GetIndexIntoMasterTable(COMMAND, SetVoltage),
1433 voltage_parameters); 1471 (uint32_t *)voltage_parameters);
1434 1472
1435 *virtual_voltage_id = voltage_parameters->usVoltageLevel; 1473 *virtual_voltage_id = voltage_parameters->usVoltageLevel;
1436 1474
@@ -1453,7 +1491,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1453 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); 1491 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1454 1492
1455 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) 1493 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1456 cgs_atom_get_data_table(hwmgr->device, 1494 smu_atom_get_data_table(hwmgr->adev,
1457 ix, 1495 ix,
1458 NULL, NULL, NULL); 1496 NULL, NULL, NULL);
1459 if (!profile) 1497 if (!profile)
@@ -1498,3 +1536,31 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1498 1536
1499 return 0; 1537 return 0;
1500} 1538}
1539
1540void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1541 uint32_t *min_vddc)
1542{
1543 void *profile;
1544
1545 profile = smu_atom_get_data_table(hwmgr->adev,
1546 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1547 NULL, NULL, NULL);
1548
1549 if (profile) {
1550 switch (hwmgr->chip_id) {
1551 case CHIP_TONGA:
1552 case CHIP_FIJI:
1553 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
1554 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
1555 break;
1556 case CHIP_POLARIS11:
1557 case CHIP_POLARIS10:
1558 case CHIP_POLARIS12:
1559 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
1560 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
1561 break;
1562 default:
1563 return;
1564 }
1565 }
1566}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index c44a92064cf1..3ee54f182943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param {
146}; 146};
147typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param; 147typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param;
148 148
149struct pp_atomctrl_memory_clock_param_ai {
150 uint32_t ulClock;
151 uint32_t ulPostDiv;
152 uint16_t ulMclk_fcw_frac;
153 uint16_t ulMclk_fcw_int;
154};
155typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai;
156
149struct pp_atomctrl_internal_ss_info { 157struct pp_atomctrl_internal_ss_info {
150 uint32_t speed_spectrum_percentage; /* in 1/100 percentage */ 158 uint32_t speed_spectrum_percentage; /* in 1/100 percentage */
151 uint32_t speed_spectrum_rate; /* in KHz */ 159 uint32_t speed_spectrum_rate; /* in KHz */
@@ -295,10 +303,12 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui
295extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); 303extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
296extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, 304extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
297 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); 305 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
306extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
307 uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param);
298extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, 308extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
299 uint32_t clock_value, 309 uint32_t clock_value,
300 pp_atomctrl_clock_dividers_kong *dividers); 310 pp_atomctrl_clock_dividers_kong *dividers);
301extern int atomctrl_read_efuse(void *device, uint16_t start_index, 311extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
302 uint16_t end_index, uint32_t mask, uint32_t *efuse); 312 uint16_t end_index, uint32_t mask, uint32_t *efuse);
303extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 313extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
304 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); 314 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
@@ -320,5 +330,8 @@ extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
320 uint16_t virtual_voltage_id, 330 uint16_t virtual_voltage_id,
321 uint16_t efuse_voltage_id); 331 uint16_t efuse_voltage_id);
322extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id); 332extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
333
334extern void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
335 uint32_t *min_vddc);
323#endif 336#endif
324 337
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index ad42caac033e..c97b0e5ba43b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -23,9 +23,9 @@
23 23
24#include "ppatomfwctrl.h" 24#include "ppatomfwctrl.h"
25#include "atomfirmware.h" 25#include "atomfirmware.h"
26#include "atom.h"
26#include "pp_debug.h" 27#include "pp_debug.h"
27 28
28
29static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( 29static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
30 const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table, 30 const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table,
31 uint8_t voltage_type, uint8_t voltage_mode) 31 uint8_t voltage_type, uint8_t voltage_mode)
@@ -38,35 +38,34 @@ static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
38 38
39 while (offset < size) { 39 while (offset < size) {
40 const union atom_voltage_object_v4 *voltage_object = 40 const union atom_voltage_object_v4 *voltage_object =
41 (const union atom_voltage_object_v4 *)(start + offset); 41 (const union atom_voltage_object_v4 *)(start + offset);
42 42
43 if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && 43 if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
44 voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) 44 voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
45 return voltage_object; 45 return voltage_object;
46 46
47 offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); 47 offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
48 48
49 } 49 }
50 50
51 return NULL; 51 return NULL;
52} 52}
53 53
54static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table( 54static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table(
55 struct pp_hwmgr *hwmgr) 55 struct pp_hwmgr *hwmgr)
56{ 56{
57 const void *table_address; 57 const void *table_address;
58 uint16_t idx; 58 uint16_t idx;
59 59
60 idx = GetIndexIntoMasterDataTable(voltageobject_info); 60 idx = GetIndexIntoMasterDataTable(voltageobject_info);
61 table_address = cgs_atom_get_data_table(hwmgr->device, 61 table_address = smu_atom_get_data_table(hwmgr->adev,
62 idx, NULL, NULL, NULL); 62 idx, NULL, NULL, NULL);
63 63
64 PP_ASSERT_WITH_CODE( 64 PP_ASSERT_WITH_CODE(table_address,
65 table_address, 65 "Error retrieving BIOS Table Address!",
66 "Error retrieving BIOS Table Address!", 66 return NULL);
67 return NULL);
68 67
69 return (struct atom_voltage_objects_info_v4_1 *)table_address; 68 return (struct atom_voltage_objects_info_v4_1 *)table_address;
70} 69}
71 70
72/** 71/**
@@ -167,7 +166,7 @@ static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
167 uint16_t idx; 166 uint16_t idx;
168 167
169 idx = GetIndexIntoMasterDataTable(gpio_pin_lut); 168 idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
170 table_address = cgs_atom_get_data_table(hwmgr->device, 169 table_address = smu_atom_get_data_table(hwmgr->adev,
171 idx, NULL, NULL, NULL); 170 idx, NULL, NULL, NULL);
172 PP_ASSERT_WITH_CODE(table_address, 171 PP_ASSERT_WITH_CODE(table_address,
173 "Error retrieving BIOS Table Address!", 172 "Error retrieving BIOS Table Address!",
@@ -248,28 +247,30 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
248 uint32_t clock_type, uint32_t clock_value, 247 uint32_t clock_type, uint32_t clock_value,
249 struct pp_atomfwctrl_clock_dividers_soc15 *dividers) 248 struct pp_atomfwctrl_clock_dividers_soc15 *dividers)
250{ 249{
250 struct amdgpu_device *adev = hwmgr->adev;
251 struct compute_gpu_clock_input_parameter_v1_8 pll_parameters; 251 struct compute_gpu_clock_input_parameter_v1_8 pll_parameters;
252 struct compute_gpu_clock_output_parameter_v1_8 *pll_output; 252 struct compute_gpu_clock_output_parameter_v1_8 *pll_output;
253 int result;
254 uint32_t idx; 253 uint32_t idx;
255 254
256 pll_parameters.gpuclock_10khz = (uint32_t)clock_value; 255 pll_parameters.gpuclock_10khz = (uint32_t)clock_value;
257 pll_parameters.gpu_clock_type = clock_type; 256 pll_parameters.gpu_clock_type = clock_type;
258 257
259 idx = GetIndexIntoMasterCmdTable(computegpuclockparam); 258 idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
260 result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters); 259
261 260 if (amdgpu_atom_execute_table(
262 if (!result) { 261 adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
263 pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) 262 return -EINVAL;
264 &pll_parameters; 263
265 dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); 264 pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
266 dividers->ulDid = le32_to_cpu(pll_output->dfs_did); 265 &pll_parameters;
267 dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); 266 dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
268 dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); 267 dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
269 dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); 268 dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
270 dividers->ucPll_ss_enable = pll_output->pll_ss_enable; 269 dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
271 } 270 dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
272 return result; 271 dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
272
273 return 0;
273} 274}
274 275
275int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, 276int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
@@ -283,7 +284,7 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
283 284
284 idx = GetIndexIntoMasterDataTable(asic_profiling_info); 285 idx = GetIndexIntoMasterDataTable(asic_profiling_info);
285 profile = (struct atom_asic_profiling_info_v4_1 *) 286 profile = (struct atom_asic_profiling_info_v4_1 *)
286 cgs_atom_get_data_table(hwmgr->device, 287 smu_atom_get_data_table(hwmgr->adev,
287 idx, NULL, NULL, NULL); 288 idx, NULL, NULL, NULL);
288 289
289 if (!profile) 290 if (!profile)
@@ -467,7 +468,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
467 468
468 idx = GetIndexIntoMasterDataTable(smu_info); 469 idx = GetIndexIntoMasterDataTable(smu_info);
469 info = (struct atom_smu_info_v3_1 *) 470 info = (struct atom_smu_info_v3_1 *)
470 cgs_atom_get_data_table(hwmgr->device, 471 smu_atom_get_data_table(hwmgr->adev,
471 idx, NULL, NULL, NULL); 472 idx, NULL, NULL, NULL);
472 473
473 if (!info) { 474 if (!info) {
@@ -487,8 +488,9 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
487 return 0; 488 return 0;
488} 489}
489 490
490int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) 491int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
491{ 492{
493 struct amdgpu_device *adev = hwmgr->adev;
492 struct atom_get_smu_clock_info_parameters_v3_1 parameters; 494 struct atom_get_smu_clock_info_parameters_v3_1 parameters;
493 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 495 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
494 uint32_t ix; 496 uint32_t ix;
@@ -497,13 +499,13 @@ int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLK
497 parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 499 parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
498 500
499 ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); 501 ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
500 if (!cgs_atom_exec_cmd_table(hwmgr->device, ix, &parameters)) { 502
501 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters; 503 if (amdgpu_atom_execute_table(
502 *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; 504 adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
503 } else { 505 return -EINVAL;
504 pr_info("Error execute_table getsmuclockinfo!"); 506
505 return -1; 507 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
506 } 508 *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
507 509
508 return 0; 510 return 0;
509} 511}
@@ -513,11 +515,10 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
513{ 515{
514 struct atom_firmware_info_v3_1 *info = NULL; 516 struct atom_firmware_info_v3_1 *info = NULL;
515 uint16_t ix; 517 uint16_t ix;
516 uint32_t frequency = 0;
517 518
518 ix = GetIndexIntoMasterDataTable(firmwareinfo); 519 ix = GetIndexIntoMasterDataTable(firmwareinfo);
519 info = (struct atom_firmware_info_v3_1 *) 520 info = (struct atom_firmware_info_v3_1 *)
520 cgs_atom_get_data_table(hwmgr->device, 521 smu_atom_get_data_table(hwmgr->adev,
521 ix, NULL, NULL, NULL); 522 ix, NULL, NULL, NULL);
522 523
523 if (!info) { 524 if (!info) {
@@ -536,12 +537,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
536 boot_values->ulSocClk = 0; 537 boot_values->ulSocClk = 0;
537 boot_values->ulDCEFClk = 0; 538 boot_values->ulDCEFClk = 0;
538 539
539 if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
540 boot_values->ulSocClk = frequency;
541
542 if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
543 boot_values->ulDCEFClk = frequency;
544
545 return 0; 540 return 0;
546} 541}
547 542
@@ -553,7 +548,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
553 548
554 ix = GetIndexIntoMasterDataTable(smc_dpm_info); 549 ix = GetIndexIntoMasterDataTable(smc_dpm_info);
555 info = (struct atom_smc_dpm_info_v4_1 *) 550 info = (struct atom_smc_dpm_info_v4_1 *)
556 cgs_atom_get_data_table(hwmgr->device, 551 smu_atom_get_data_table(hwmgr->adev,
557 ix, NULL, NULL, NULL); 552 ix, NULL, NULL, NULL);
558 if (!info) { 553 if (!info) {
559 pr_info("Error retrieving BIOS Table Address!"); 554 pr_info("Error retrieving BIOS Table Address!");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 8df1e84f27c9..fe10aa4db5e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -230,6 +230,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
230 struct pp_atomfwctrl_bios_boot_up_values *boot_values); 230 struct pp_atomfwctrl_bios_boot_up_values *boot_values);
231int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, 231int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
232 struct pp_atomfwctrl_smc_dpm_parameters *param); 232 struct pp_atomfwctrl_smc_dpm_parameters *param);
233int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
234 BIOS_CLKID id, uint32_t *frequency);
233 235
234#endif 236#endif
235 237
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index c9eecce5683f..f0d48b183d22 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -141,7 +141,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
141 141
142 if (!table_address) { 142 if (!table_address) {
143 table_address = (ATOM_Tonga_POWERPLAYTABLE *) 143 table_address = (ATOM_Tonga_POWERPLAYTABLE *)
144 cgs_atom_get_data_table(hwmgr->device, 144 smu_atom_get_data_table(hwmgr->adev,
145 index, &size, &frev, &crev); 145 index, &size, &frev, &crev);
146 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ 146 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
147 hwmgr->soft_pp_table_size = size; 147 hwmgr->soft_pp_table_size = size;
@@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table(
728 return 0; 728 return 0;
729} 729}
730 730
731static int get_gpio_table(struct pp_hwmgr *hwmgr,
732 struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table,
733 const ATOM_Tonga_GPIO_Table *atom_gpio_table)
734{
735 uint32_t table_size;
736 struct phm_ppt_v1_gpio_table *pp_gpio_table;
737 struct phm_ppt_v1_information *pp_table_information =
738 (struct phm_ppt_v1_information *)(hwmgr->pptable);
739
740 table_size = sizeof(struct phm_ppt_v1_gpio_table);
741 pp_gpio_table = kzalloc(table_size, GFP_KERNEL);
742 if (!pp_gpio_table)
743 return -ENOMEM;
744
745 if (pp_table_information->vdd_dep_on_sclk->count <
746 atom_gpio_table->ucVRHotTriggeredSclkDpmIndex)
747 PP_ASSERT_WITH_CODE(false,
748 "SCLK DPM index for VRHot cannot exceed the total sclk level count!",);
749 else
750 pp_gpio_table->vrhot_triggered_sclk_dpm_index =
751 atom_gpio_table->ucVRHotTriggeredSclkDpmIndex;
752
753 *pp_tonga_gpio_table = pp_gpio_table;
754
755 return 0;
756}
731/** 757/**
732 * Private Function used during initialization. 758 * Private Function used during initialization.
733 * Initialize clock voltage dependency 759 * Initialize clock voltage dependency
@@ -761,11 +787,15 @@ static int init_clock_voltage_dependency(
761 const PPTable_Generic_SubTable_Header *pcie_table = 787 const PPTable_Generic_SubTable_Header *pcie_table =
762 (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + 788 (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
763 le16_to_cpu(powerplay_table->usPCIETableOffset)); 789 le16_to_cpu(powerplay_table->usPCIETableOffset));
790 const ATOM_Tonga_GPIO_Table *gpio_table =
791 (const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) +
792 le16_to_cpu(powerplay_table->usGPIOTableOffset));
764 793
765 pp_table_information->vdd_dep_on_sclk = NULL; 794 pp_table_information->vdd_dep_on_sclk = NULL;
766 pp_table_information->vdd_dep_on_mclk = NULL; 795 pp_table_information->vdd_dep_on_mclk = NULL;
767 pp_table_information->mm_dep_table = NULL; 796 pp_table_information->mm_dep_table = NULL;
768 pp_table_information->pcie_table = NULL; 797 pp_table_information->pcie_table = NULL;
798 pp_table_information->gpio_table = NULL;
769 799
770 if (powerplay_table->usMMDependencyTableOffset != 0) 800 if (powerplay_table->usMMDependencyTableOffset != 0)
771 result = get_mm_clock_voltage_table(hwmgr, 801 result = get_mm_clock_voltage_table(hwmgr,
@@ -810,6 +840,10 @@ static int init_clock_voltage_dependency(
810 result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values, 840 result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
811 pp_table_information->vdd_dep_on_sclk); 841 pp_table_information->vdd_dep_on_sclk);
812 842
843 if (!result && gpio_table)
844 result = get_gpio_table(hwmgr, &pp_table_information->gpio_table,
845 gpio_table);
846
813 return result; 847 return result;
814} 848}
815 849
@@ -1116,6 +1150,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
1116 kfree(pp_table_information->pcie_table); 1150 kfree(pp_table_information->pcie_table);
1117 pp_table_information->pcie_table = NULL; 1151 pp_table_information->pcie_table = NULL;
1118 1152
1153 kfree(pp_table_information->gpio_table);
1154 pp_table_information->gpio_table = NULL;
1155
1119 kfree(hwmgr->pptable); 1156 kfree(hwmgr->pptable);
1120 hwmgr->pptable = NULL; 1157 hwmgr->pptable = NULL;
1121 1158
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 36ca7c419c90..ce64dfabd34b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -837,7 +837,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
837 hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; 837 hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
838 hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); 838 hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
839 } else { 839 } else {
840 table_addr = cgs_atom_get_data_table(hwmgr->device, 840 table_addr = smu_atom_get_data_table(hwmgr->adev,
841 GetIndexIntoMasterTable(DATA, PowerPlayInfo), 841 GetIndexIntoMasterTable(DATA, PowerPlayInfo),
842 &size, &frev, &crev); 842 &size, &frev, &crev);
843 hwmgr->soft_pp_table = table_addr; 843 hwmgr->soft_pp_table = table_addr;
@@ -1058,7 +1058,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
1058 return 0; 1058 return 0;
1059 1059
1060 /* We assume here that fw_info is unchanged if this call fails.*/ 1060 /* We assume here that fw_info is unchanged if this call fails.*/
1061 fw_info = cgs_atom_get_data_table(hwmgr->device, 1061 fw_info = smu_atom_get_data_table(hwmgr->adev,
1062 GetIndexIntoMasterTable(DATA, FirmwareInfo), 1062 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1063 &size, &frev, &crev); 1063 &size, &frev, &crev);
1064 1064
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 10253b89b3d8..2f69bfa478a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -34,7 +34,7 @@
34#include "rv_ppsmc.h" 34#include "rv_ppsmc.h"
35#include "smu10_hwmgr.h" 35#include "smu10_hwmgr.h"
36#include "power_state.h" 36#include "power_state.h"
37#include "pp_soc15.h" 37#include "soc15_common.h"
38 38
39#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 39#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ 40#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -42,6 +42,13 @@
42#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ 42#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
44 44
45#define mmPWR_MISC_CNTL_STATUS 0x0183
46#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
47#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
48#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
49#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
50#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
51
45static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; 52static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
46 53
47 54
@@ -74,11 +81,15 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
74 smu10_data->thermal_auto_throttling_treshold = 0; 81 smu10_data->thermal_auto_throttling_treshold = 0;
75 smu10_data->is_nb_dpm_enabled = 1; 82 smu10_data->is_nb_dpm_enabled = 1;
76 smu10_data->dpm_flags = 1; 83 smu10_data->dpm_flags = 1;
77 smu10_data->gfx_off_controled_by_driver = false;
78 smu10_data->need_min_deep_sleep_dcefclk = true; 84 smu10_data->need_min_deep_sleep_dcefclk = true;
79 smu10_data->num_active_display = 0; 85 smu10_data->num_active_display = 0;
80 smu10_data->deep_sleep_dcefclk = 0; 86 smu10_data->deep_sleep_dcefclk = 0;
81 87
88 if (hwmgr->feature_mask & PP_GFXOFF_MASK)
89 smu10_data->gfx_off_controled_by_driver = true;
90 else
91 smu10_data->gfx_off_controled_by_driver = false;
92
82 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 93 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
83 PHM_PlatformCaps_SclkDeepSleep); 94 PHM_PlatformCaps_SclkDeepSleep);
84 95
@@ -161,7 +172,7 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
161 struct PP_Clocks clocks = {0}; 172 struct PP_Clocks clocks = {0};
162 struct pp_display_clock_request clock_req; 173 struct pp_display_clock_request clock_req;
163 174
164 clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; 175 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
165 clock_req.clock_type = amd_pp_dcf_clock; 176 clock_req.clock_type = amd_pp_dcf_clock;
166 clock_req.clock_freq_in_khz = clocks.dcefClock * 10; 177 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
167 178
@@ -206,12 +217,18 @@ static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input
206static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr) 217static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
207{ 218{
208 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 219 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
220 struct amdgpu_device *adev = hwmgr->adev;
209 221
210 smu10_data->vcn_power_gated = true; 222 smu10_data->vcn_power_gated = true;
211 smu10_data->isp_tileA_power_gated = true; 223 smu10_data->isp_tileA_power_gated = true;
212 smu10_data->isp_tileB_power_gated = true; 224 smu10_data->isp_tileB_power_gated = true;
213 225
214 return 0; 226 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
227 return smum_send_msg_to_smc_with_parameter(hwmgr,
228 PPSMC_MSG_SetGfxCGPG,
229 true);
230 else
231 return 0;
215} 232}
216 233
217 234
@@ -237,13 +254,31 @@ static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
237 return smu10_reset_cc6_data(hwmgr); 254 return smu10_reset_cc6_data(hwmgr);
238} 255}
239 256
257static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
258{
259 uint32_t reg;
260 struct amdgpu_device *adev = hwmgr->adev;
261
262 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
263 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
264 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
265 return true;
266
267 return false;
268}
269
240static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) 270static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
241{ 271{
242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 272 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
243 273
244 if (smu10_data->gfx_off_controled_by_driver) 274 if (smu10_data->gfx_off_controled_by_driver) {
245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); 275 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
246 276
277 /* confirm gfx is back to "on" state */
278 while (!smu10_is_gfx_on(hwmgr))
279 msleep(1);
280 }
281
247 return 0; 282 return 0;
248} 283}
249 284
@@ -267,6 +302,14 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
267 return smu10_enable_gfx_off(hwmgr); 302 return smu10_enable_gfx_off(hwmgr);
268} 303}
269 304
305static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
306{
307 if (enable)
308 return smu10_enable_gfx_off(hwmgr);
309 else
310 return smu10_disable_gfx_off(hwmgr);
311}
312
270static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 313static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
271 struct pp_power_state *prequest_ps, 314 struct pp_power_state *prequest_ps,
272 const struct pp_power_state *pcurrent_ps) 315 const struct pp_power_state *pcurrent_ps)
@@ -340,7 +383,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
340 383
341static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) 384static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
342{ 385{
343 int result; 386 uint32_t result;
344 387
345 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 388 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
346 DpmClocks_t *table = &(smu10_data->clock_table); 389 DpmClocks_t *table = &(smu10_data->clock_table);
@@ -386,11 +429,11 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
386 429
387 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); 430 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
388 result = smum_get_argument(hwmgr); 431 result = smum_get_argument(hwmgr);
389 smu10_data->gfx_min_freq_limit = result * 100; 432 smu10_data->gfx_min_freq_limit = result / 10 * 1000;
390 433
391 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); 434 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
392 result = smum_get_argument(hwmgr); 435 result = smum_get_argument(hwmgr);
393 smu10_data->gfx_max_freq_limit = result * 100; 436 smu10_data->gfx_max_freq_limit = result / 10 * 1000;
394 437
395 return 0; 438 return 0;
396} 439}
@@ -436,8 +479,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
436 479
437 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 480 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
438 481
439 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK; 482 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
440 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK; 483 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
441 484
442 return result; 485 return result;
443} 486}
@@ -472,6 +515,8 @@ static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
472static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 515static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
473 enum amd_dpm_forced_level level) 516 enum amd_dpm_forced_level level)
474{ 517{
518 struct smu10_hwmgr *data = hwmgr->backend;
519
475 if (hwmgr->smu_version < 0x1E3700) { 520 if (hwmgr->smu_version < 0x1E3700) {
476 pr_info("smu firmware version too old, can not set dpm level\n"); 521 pr_info("smu firmware version too old, can not set dpm level\n");
477 return 0; 522 return 0;
@@ -482,7 +527,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
482 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 527 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
483 smum_send_msg_to_smc_with_parameter(hwmgr, 528 smum_send_msg_to_smc_with_parameter(hwmgr,
484 PPSMC_MSG_SetHardMinGfxClk, 529 PPSMC_MSG_SetHardMinGfxClk,
485 SMU10_UMD_PSTATE_PEAK_GFXCLK); 530 data->gfx_max_freq_limit/100);
486 smum_send_msg_to_smc_with_parameter(hwmgr, 531 smum_send_msg_to_smc_with_parameter(hwmgr,
487 PPSMC_MSG_SetHardMinFclkByFreq, 532 PPSMC_MSG_SetHardMinFclkByFreq,
488 SMU10_UMD_PSTATE_PEAK_FCLK); 533 SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -495,7 +540,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
495 540
496 smum_send_msg_to_smc_with_parameter(hwmgr, 541 smum_send_msg_to_smc_with_parameter(hwmgr,
497 PPSMC_MSG_SetSoftMaxGfxClk, 542 PPSMC_MSG_SetSoftMaxGfxClk,
498 SMU10_UMD_PSTATE_PEAK_GFXCLK); 543 data->gfx_max_freq_limit/100);
499 smum_send_msg_to_smc_with_parameter(hwmgr, 544 smum_send_msg_to_smc_with_parameter(hwmgr,
500 PPSMC_MSG_SetSoftMaxFclkByFreq, 545 PPSMC_MSG_SetSoftMaxFclkByFreq,
501 SMU10_UMD_PSTATE_PEAK_FCLK); 546 SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -509,10 +554,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
509 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 554 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
510 smum_send_msg_to_smc_with_parameter(hwmgr, 555 smum_send_msg_to_smc_with_parameter(hwmgr,
511 PPSMC_MSG_SetHardMinGfxClk, 556 PPSMC_MSG_SetHardMinGfxClk,
512 SMU10_UMD_PSTATE_MIN_GFXCLK); 557 data->gfx_min_freq_limit/100);
513 smum_send_msg_to_smc_with_parameter(hwmgr, 558 smum_send_msg_to_smc_with_parameter(hwmgr,
514 PPSMC_MSG_SetSoftMaxGfxClk, 559 PPSMC_MSG_SetSoftMaxGfxClk,
515 SMU10_UMD_PSTATE_MIN_GFXCLK); 560 data->gfx_min_freq_limit/100);
516 break; 561 break;
517 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 562 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
518 smum_send_msg_to_smc_with_parameter(hwmgr, 563 smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -552,7 +597,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
552 case AMD_DPM_FORCED_LEVEL_AUTO: 597 case AMD_DPM_FORCED_LEVEL_AUTO:
553 smum_send_msg_to_smc_with_parameter(hwmgr, 598 smum_send_msg_to_smc_with_parameter(hwmgr,
554 PPSMC_MSG_SetHardMinGfxClk, 599 PPSMC_MSG_SetHardMinGfxClk,
555 SMU10_UMD_PSTATE_MIN_GFXCLK); 600 data->gfx_min_freq_limit/100);
556 smum_send_msg_to_smc_with_parameter(hwmgr, 601 smum_send_msg_to_smc_with_parameter(hwmgr,
557 PPSMC_MSG_SetHardMinFclkByFreq, 602 PPSMC_MSG_SetHardMinFclkByFreq,
558 SMU10_UMD_PSTATE_MIN_FCLK); 603 SMU10_UMD_PSTATE_MIN_FCLK);
@@ -565,7 +610,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
565 610
566 smum_send_msg_to_smc_with_parameter(hwmgr, 611 smum_send_msg_to_smc_with_parameter(hwmgr,
567 PPSMC_MSG_SetSoftMaxGfxClk, 612 PPSMC_MSG_SetSoftMaxGfxClk,
568 SMU10_UMD_PSTATE_PEAK_GFXCLK); 613 data->gfx_max_freq_limit/100);
569 smum_send_msg_to_smc_with_parameter(hwmgr, 614 smum_send_msg_to_smc_with_parameter(hwmgr,
570 PPSMC_MSG_SetSoftMaxFclkByFreq, 615 PPSMC_MSG_SetSoftMaxFclkByFreq,
571 SMU10_UMD_PSTATE_PEAK_FCLK); 616 SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -579,10 +624,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
579 case AMD_DPM_FORCED_LEVEL_LOW: 624 case AMD_DPM_FORCED_LEVEL_LOW:
580 smum_send_msg_to_smc_with_parameter(hwmgr, 625 smum_send_msg_to_smc_with_parameter(hwmgr,
581 PPSMC_MSG_SetHardMinGfxClk, 626 PPSMC_MSG_SetHardMinGfxClk,
582 SMU10_UMD_PSTATE_MIN_GFXCLK); 627 data->gfx_min_freq_limit/100);
583 smum_send_msg_to_smc_with_parameter(hwmgr, 628 smum_send_msg_to_smc_with_parameter(hwmgr,
584 PPSMC_MSG_SetSoftMaxGfxClk, 629 PPSMC_MSG_SetSoftMaxGfxClk,
585 SMU10_UMD_PSTATE_MIN_GFXCLK); 630 data->gfx_min_freq_limit/100);
586 smum_send_msg_to_smc_with_parameter(hwmgr, 631 smum_send_msg_to_smc_with_parameter(hwmgr,
587 PPSMC_MSG_SetHardMinFclkByFreq, 632 PPSMC_MSG_SetHardMinFclkByFreq,
588 SMU10_UMD_PSTATE_MIN_FCLK); 633 SMU10_UMD_PSTATE_MIN_FCLK);
@@ -699,6 +744,16 @@ static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
699static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 744static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
700 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 745 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
701{ 746{
747 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
748
749 if (separation_time != data->separation_time ||
750 cc6_disable != data->cc6_disable ||
751 pstate_disable != data->pstate_disable) {
752 data->separation_time = separation_time;
753 data->cc6_disable = cc6_disable;
754 data->pstate_disable = pstate_disable;
755 data->cc6_setting_changed = true;
756 }
702 return 0; 757 return 0;
703} 758}
704 759
@@ -711,6 +766,51 @@ static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
711static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, 766static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
712 enum pp_clock_type type, uint32_t mask) 767 enum pp_clock_type type, uint32_t mask)
713{ 768{
769 struct smu10_hwmgr *data = hwmgr->backend;
770 struct smu10_voltage_dependency_table *mclk_table =
771 data->clock_vol_info.vdd_dep_on_fclk;
772 uint32_t low, high;
773
774 low = mask ? (ffs(mask) - 1) : 0;
775 high = mask ? (fls(mask) - 1) : 0;
776
777 switch (type) {
778 case PP_SCLK:
779 if (low > 2 || high > 2) {
780 pr_info("Currently sclk only support 3 levels on RV\n");
781 return -EINVAL;
782 }
783
784 smum_send_msg_to_smc_with_parameter(hwmgr,
785 PPSMC_MSG_SetHardMinGfxClk,
786 low == 2 ? data->gfx_max_freq_limit/100 :
787 low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
788 data->gfx_min_freq_limit/100);
789
790 smum_send_msg_to_smc_with_parameter(hwmgr,
791 PPSMC_MSG_SetSoftMaxGfxClk,
792 high == 0 ? data->gfx_min_freq_limit/100 :
793 high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
794 data->gfx_max_freq_limit/100);
795 break;
796
797 case PP_MCLK:
798 if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
799 return -EINVAL;
800
801 smum_send_msg_to_smc_with_parameter(hwmgr,
802 PPSMC_MSG_SetHardMinFclkByFreq,
803 mclk_table->entries[low].clk/100);
804
805 smum_send_msg_to_smc_with_parameter(hwmgr,
806 PPSMC_MSG_SetSoftMaxFclkByFreq,
807 mclk_table->entries[high].clk/100);
808 break;
809
810 case PP_PCIE:
811 default:
812 break;
813 }
714 return 0; 814 return 0;
715} 815}
716 816
@@ -720,21 +820,30 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
720 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); 820 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
721 struct smu10_voltage_dependency_table *mclk_table = 821 struct smu10_voltage_dependency_table *mclk_table =
722 data->clock_vol_info.vdd_dep_on_fclk; 822 data->clock_vol_info.vdd_dep_on_fclk;
723 int i, now, size = 0; 823 uint32_t i, now, size = 0;
724 824
725 switch (type) { 825 switch (type) {
726 case PP_SCLK: 826 case PP_SCLK:
727 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); 827 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
728 now = smum_get_argument(hwmgr); 828 now = smum_get_argument(hwmgr);
729 829
830 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
831 if (now == data->gfx_max_freq_limit/100)
832 i = 2;
833 else if (now == data->gfx_min_freq_limit/100)
834 i = 0;
835 else
836 i = 1;
837
730 size += sprintf(buf + size, "0: %uMhz %s\n", 838 size += sprintf(buf + size, "0: %uMhz %s\n",
731 data->gfx_min_freq_limit / 100, 839 data->gfx_min_freq_limit/100,
732 ((data->gfx_min_freq_limit / 100) 840 i == 0 ? "*" : "");
733 == now) ? "*" : "");
734 size += sprintf(buf + size, "1: %uMhz %s\n", 841 size += sprintf(buf + size, "1: %uMhz %s\n",
735 data->gfx_max_freq_limit / 100, 842 i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
736 ((data->gfx_max_freq_limit / 100) 843 i == 1 ? "*" : "");
737 == now) ? "*" : ""); 844 size += sprintf(buf + size, "2: %uMhz %s\n",
845 data->gfx_max_freq_limit/100,
846 i == 2 ? "*" : "");
738 break; 847 break;
739 case PP_MCLK: 848 case PP_MCLK:
740 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); 849 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
@@ -947,9 +1056,8 @@ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simpl
947 1056
948static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) 1057static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
949{ 1058{
950 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, 1059 struct amdgpu_device *adev = hwmgr->adev;
951 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); 1060 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
952 uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
953 int cur_temp = 1061 int cur_temp =
954 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT; 1062 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
955 1063
@@ -993,6 +1101,25 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
993 return ret; 1101 return ret;
994} 1102}
995 1103
1104static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1105 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1106{
1107 struct smu10_hwmgr *data = hwmgr->backend;
1108 Watermarks_t *table = &(data->water_marks_table);
1109 int result = 0;
1110
1111 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1112 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1113 data->water_marks_exist = true;
1114 return result;
1115}
1116
1117static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1118{
1119
1120 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1121}
1122
996static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) 1123static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
997{ 1124{
998 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); 1125 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
@@ -1022,6 +1149,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1022 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks, 1149 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1023 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency, 1150 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1024 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage, 1151 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1152 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1025 .get_max_high_clocks = smu10_get_max_high_clocks, 1153 .get_max_high_clocks = smu10_get_max_high_clocks,
1026 .read_sensor = smu10_read_sensor, 1154 .read_sensor = smu10_read_sensor,
1027 .set_active_display_count = smu10_set_active_display_count, 1155 .set_active_display_count = smu10_set_active_display_count,
@@ -1032,6 +1160,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1032 .power_state_set = smu10_set_power_state_tasks, 1160 .power_state_set = smu10_set_power_state_tasks,
1033 .dynamic_state_management_disable = smu10_disable_dpm_tasks, 1161 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1034 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, 1162 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
1163 .smus_notify_pwe = smu10_smus_notify_pwe,
1164 .gfx_off_control = smu10_gfx_off_control,
1035}; 1165};
1036 1166
1037int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) 1167int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 175c3a592b6c..1fb296a996f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -290,6 +290,7 @@ struct smu10_hwmgr {
290 bool vcn_dpg_mode; 290 bool vcn_dpg_mode;
291 291
292 bool gfx_off_controled_by_driver; 292 bool gfx_off_controled_by_driver;
293 bool water_marks_exist;
293 Watermarks_t water_marks_table; 294 Watermarks_t water_marks_table;
294 struct smu10_clock_voltage_information clock_vol_info; 295 struct smu10_clock_voltage_information clock_vol_info;
295 DpmClocks_t clock_table; 296 DpmClocks_t clock_table;
@@ -310,11 +311,9 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
310#define SMU10_UMD_PSTATE_FCLK 933 311#define SMU10_UMD_PSTATE_FCLK 933
311#define SMU10_UMD_PSTATE_VCE 0x03C00320 312#define SMU10_UMD_PSTATE_VCE 0x03C00320
312 313
313#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
314#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757 314#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
315#define SMU10_UMD_PSTATE_PEAK_FCLK 1200 315#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
316 316
317#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
318#define SMU10_UMD_PSTATE_MIN_FCLK 400 317#define SMU10_UMD_PSTATE_MIN_FCLK 400
319#define SMU10_UMD_PSTATE_MIN_SOCCLK 200 318#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
320#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C 319#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index f4cbaee4e2ca..6d72a5600917 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -147,20 +147,20 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
147 data->uvd_power_gated = bgate; 147 data->uvd_power_gated = bgate;
148 148
149 if (bgate) { 149 if (bgate) {
150 cgs_set_powergating_state(hwmgr->device, 150 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
151 AMD_IP_BLOCK_TYPE_UVD, 151 AMD_IP_BLOCK_TYPE_UVD,
152 AMD_PG_STATE_GATE); 152 AMD_PG_STATE_GATE);
153 cgs_set_clockgating_state(hwmgr->device, 153 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
154 AMD_IP_BLOCK_TYPE_UVD, 154 AMD_IP_BLOCK_TYPE_UVD,
155 AMD_CG_STATE_GATE); 155 AMD_CG_STATE_GATE);
156 smu7_update_uvd_dpm(hwmgr, true); 156 smu7_update_uvd_dpm(hwmgr, true);
157 smu7_powerdown_uvd(hwmgr); 157 smu7_powerdown_uvd(hwmgr);
158 } else { 158 } else {
159 smu7_powerup_uvd(hwmgr); 159 smu7_powerup_uvd(hwmgr);
160 cgs_set_clockgating_state(hwmgr->device, 160 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
161 AMD_IP_BLOCK_TYPE_UVD, 161 AMD_IP_BLOCK_TYPE_UVD,
162 AMD_CG_STATE_UNGATE); 162 AMD_CG_STATE_UNGATE);
163 cgs_set_powergating_state(hwmgr->device, 163 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
164 AMD_IP_BLOCK_TYPE_UVD, 164 AMD_IP_BLOCK_TYPE_UVD,
165 AMD_PG_STATE_UNGATE); 165 AMD_PG_STATE_UNGATE);
166 smu7_update_uvd_dpm(hwmgr, false); 166 smu7_update_uvd_dpm(hwmgr, false);
@@ -175,20 +175,20 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
175 data->vce_power_gated = bgate; 175 data->vce_power_gated = bgate;
176 176
177 if (bgate) { 177 if (bgate) {
178 cgs_set_powergating_state(hwmgr->device, 178 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
179 AMD_IP_BLOCK_TYPE_VCE, 179 AMD_IP_BLOCK_TYPE_VCE,
180 AMD_PG_STATE_GATE); 180 AMD_PG_STATE_GATE);
181 cgs_set_clockgating_state(hwmgr->device, 181 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
182 AMD_IP_BLOCK_TYPE_VCE, 182 AMD_IP_BLOCK_TYPE_VCE,
183 AMD_CG_STATE_GATE); 183 AMD_CG_STATE_GATE);
184 smu7_update_vce_dpm(hwmgr, true); 184 smu7_update_vce_dpm(hwmgr, true);
185 smu7_powerdown_vce(hwmgr); 185 smu7_powerdown_vce(hwmgr);
186 } else { 186 } else {
187 smu7_powerup_vce(hwmgr); 187 smu7_powerup_vce(hwmgr);
188 cgs_set_clockgating_state(hwmgr->device, 188 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
189 AMD_IP_BLOCK_TYPE_VCE, 189 AMD_IP_BLOCK_TYPE_VCE,
190 AMD_CG_STATE_UNGATE); 190 AMD_CG_STATE_UNGATE);
191 cgs_set_powergating_state(hwmgr->device, 191 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
192 AMD_IP_BLOCK_TYPE_VCE, 192 AMD_IP_BLOCK_TYPE_VCE,
193 AMD_PG_STATE_UNGATE); 193 AMD_PG_STATE_UNGATE);
194 smu7_update_vce_dpm(hwmgr, false); 194 smu7_update_vce_dpm(hwmgr, false);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 26fbeafc3c96..8eb3f5176646 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -61,10 +61,6 @@
61#define SMC_CG_IND_START 0xc0030000 61#define SMC_CG_IND_START 0xc0030000
62#define SMC_CG_IND_END 0xc0040000 62#define SMC_CG_IND_END 0xc0040000
63 63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68#define MEM_FREQ_LOW_LATENCY 25000 64#define MEM_FREQ_LOW_LATENCY 25000
69#define MEM_FREQ_HIGH_LATENCY 80000 65#define MEM_FREQ_HIGH_LATENCY 80000
70 66
@@ -79,14 +75,23 @@
79#define PCIE_BUS_CLK 10000 75#define PCIE_BUS_CLK 10000
80#define TCLK (PCIE_BUS_CLK / 10) 76#define TCLK (PCIE_BUS_CLK / 10)
81 77
82static const struct profile_mode_setting smu7_profiling[5] = 78static const struct profile_mode_setting smu7_profiling[6] =
83 {{1, 0, 100, 30, 1, 0, 100, 10}, 79 {{1, 0, 100, 30, 1, 0, 100, 10},
84 {1, 10, 0, 30, 0, 0, 0, 0}, 80 {1, 10, 0, 30, 0, 0, 0, 0},
85 {0, 0, 0, 0, 1, 10, 16, 31}, 81 {0, 0, 0, 0, 1, 10, 16, 31},
86 {1, 0, 11, 50, 1, 0, 100, 10}, 82 {1, 0, 11, 50, 1, 0, 100, 10},
87 {1, 0, 5, 30, 0, 0, 0, 0}, 83 {1, 0, 5, 30, 0, 0, 0, 0},
84 {0, 0, 0, 0, 0, 0, 0, 0},
88 }; 85 };
89 86
87#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
88
89#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
90#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
91#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
92#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
93#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
94
90/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ 95/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
91enum DPM_EVENT_SRC { 96enum DPM_EVENT_SRC {
92 DPM_EVENT_SRC_ANALOG = 0, 97 DPM_EVENT_SRC_ANALOG = 0,
@@ -168,6 +173,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
168*/ 173*/
169static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) 174static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
170{ 175{
176 if (hwmgr->chip_id == CHIP_VEGAM) {
177 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
178 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
179 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
180 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
181 }
182
171 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) 183 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
172 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); 184 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
173 185
@@ -797,32 +809,6 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
797 return 0; 809 return 0;
798} 810}
799 811
800static int smu7_get_voltage_dependency_table(
801 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
802 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
803{
804 uint8_t i = 0;
805 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
806 "Voltage Lookup Table empty",
807 return -EINVAL);
808
809 dep_table->count = allowed_dep_table->count;
810 for (i=0; i<dep_table->count; i++) {
811 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
812 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
813 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
814 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
815 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
816 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
817 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
818 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
819 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
820 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
821 }
822
823 return 0;
824}
825
826static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) 812static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
827{ 813{
828 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 814 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -850,7 +836,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
850 entries[i].vddc = dep_sclk_table->entries[i].vddc; 836 entries[i].vddc = dep_sclk_table->entries[i].vddc;
851 } 837 }
852 838
853 smu7_get_voltage_dependency_table(dep_sclk_table, 839 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
854 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); 840 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
855 841
856 odn_table->odn_memory_clock_dpm_levels.num_of_pl = 842 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
@@ -862,12 +848,39 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
862 entries[i].vddc = dep_mclk_table->entries[i].vddc; 848 entries[i].vddc = dep_mclk_table->entries[i].vddc;
863 } 849 }
864 850
865 smu7_get_voltage_dependency_table(dep_mclk_table, 851 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
866 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); 852 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
867 853
868 return 0; 854 return 0;
869} 855}
870 856
857static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
858{
859 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
860 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
861 struct phm_ppt_v1_information *table_info =
862 (struct phm_ppt_v1_information *)(hwmgr->pptable);
863 uint32_t min_vddc, max_vddc;
864
865 if (!table_info)
866 return;
867
868 dep_sclk_table = table_info->vdd_dep_on_sclk;
869
870 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
871
872 if (min_vddc == 0 || min_vddc > 2000
873 || min_vddc > dep_sclk_table->entries[0].vddc)
874 min_vddc = dep_sclk_table->entries[0].vddc;
875
876 if (max_vddc == 0 || max_vddc > 2000
877 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
878 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
879
880 data->odn_dpm_table.min_vddc = min_vddc;
881 data->odn_dpm_table.max_vddc = max_vddc;
882}
883
871static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 884static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
872{ 885{
873 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 886 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -886,8 +899,10 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
886 sizeof(struct smu7_dpm_table)); 899 sizeof(struct smu7_dpm_table));
887 900
888 /* initialize ODN table */ 901 /* initialize ODN table */
889 if (hwmgr->od_enabled) 902 if (hwmgr->od_enabled) {
903 smu7_setup_voltage_range_from_vbios(hwmgr);
890 smu7_odn_initial_default_setting(hwmgr); 904 smu7_odn_initial_default_setting(hwmgr);
905 }
891 906
892 return 0; 907 return 0;
893} 908}
@@ -965,6 +980,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
965 return 0; 980 return 0;
966} 981}
967 982
983static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
984{
985 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
986 uint32_t soft_register_value = 0;
987 uint32_t handshake_disables_offset = data->soft_regs_start
988 + smum_get_offsetof(hwmgr,
989 SMU_SoftRegisters, HandshakeDisables);
990
991 soft_register_value = cgs_read_ind_register(hwmgr->device,
992 CGS_IND_REG__SMC, handshake_disables_offset);
993 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
994 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
995 handshake_disables_offset, soft_register_value);
996 return 0;
997}
998
968static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) 999static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
969{ 1000{
970 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1001 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -988,6 +1019,9 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
988 1019
989 /* enable SCLK dpm */ 1020 /* enable SCLK dpm */
990 if (!data->sclk_dpm_key_disabled) 1021 if (!data->sclk_dpm_key_disabled)
1022 if (hwmgr->chip_id == CHIP_VEGAM)
1023 smu7_disable_sclk_vce_handshake(hwmgr);
1024
991 PP_ASSERT_WITH_CODE( 1025 PP_ASSERT_WITH_CODE(
992 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), 1026 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
993 "Failed to enable SCLK DPM during DPM Start Function!", 1027 "Failed to enable SCLK DPM during DPM Start Function!",
@@ -997,13 +1031,15 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
997 if (0 == data->mclk_dpm_key_disabled) { 1031 if (0 == data->mclk_dpm_key_disabled) {
998 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) 1032 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
999 smu7_disable_handshake_uvd(hwmgr); 1033 smu7_disable_handshake_uvd(hwmgr);
1034
1000 PP_ASSERT_WITH_CODE( 1035 PP_ASSERT_WITH_CODE(
1001 (0 == smum_send_msg_to_smc(hwmgr, 1036 (0 == smum_send_msg_to_smc(hwmgr,
1002 PPSMC_MSG_MCLKDPM_Enable)), 1037 PPSMC_MSG_MCLKDPM_Enable)),
1003 "Failed to enable MCLK DPM during DPM Start Function!", 1038 "Failed to enable MCLK DPM during DPM Start Function!",
1004 return -EINVAL); 1039 return -EINVAL);
1005 1040
1006 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 1041 if (hwmgr->chip_family != CHIP_VEGAM)
1042 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1007 1043
1008 1044
1009 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1045 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
@@ -1019,8 +1055,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1019 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); 1055 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1020 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); 1056 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1021 udelay(10); 1057 udelay(10);
1022 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); 1058 if (hwmgr->chip_id == CHIP_VEGAM) {
1023 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); 1059 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1060 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1061 } else {
1062 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1063 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1064 }
1024 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); 1065 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1025 } 1066 }
1026 } 1067 }
@@ -1229,7 +1270,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1229 1270
1230 tmp_result = smu7_construct_voltage_tables(hwmgr); 1271 tmp_result = smu7_construct_voltage_tables(hwmgr);
1231 PP_ASSERT_WITH_CODE((0 == tmp_result), 1272 PP_ASSERT_WITH_CODE((0 == tmp_result),
1232 "Failed to contruct voltage tables!", 1273 "Failed to construct voltage tables!",
1233 result = tmp_result); 1274 result = tmp_result);
1234 } 1275 }
1235 smum_initialize_mc_reg_table(hwmgr); 1276 smum_initialize_mc_reg_table(hwmgr);
@@ -1261,10 +1302,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1261 PP_ASSERT_WITH_CODE((0 == tmp_result), 1302 PP_ASSERT_WITH_CODE((0 == tmp_result),
1262 "Failed to process firmware header!", result = tmp_result); 1303 "Failed to process firmware header!", result = tmp_result);
1263 1304
1264 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); 1305 if (hwmgr->chip_id != CHIP_VEGAM) {
1265 PP_ASSERT_WITH_CODE((0 == tmp_result), 1306 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1266 "Failed to initialize switch from ArbF0 to F1!", 1307 PP_ASSERT_WITH_CODE((0 == tmp_result),
1267 result = tmp_result); 1308 "Failed to initialize switch from ArbF0 to F1!",
1309 result = tmp_result);
1310 }
1268 1311
1269 result = smu7_setup_default_dpm_tables(hwmgr); 1312 result = smu7_setup_default_dpm_tables(hwmgr);
1270 PP_ASSERT_WITH_CODE(0 == result, 1313 PP_ASSERT_WITH_CODE(0 == result,
@@ -2754,6 +2797,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2754 case CHIP_POLARIS12: 2797 case CHIP_POLARIS12:
2755 switch_limit_us = data->is_memory_gddr5 ? 190 : 150; 2798 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2756 break; 2799 break;
2800 case CHIP_VEGAM:
2801 switch_limit_us = 30;
2802 break;
2757 default: 2803 default:
2758 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 2804 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2759 break; 2805 break;
@@ -2777,8 +2823,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2777 struct PP_Clocks minimum_clocks = {0}; 2823 struct PP_Clocks minimum_clocks = {0};
2778 bool disable_mclk_switching; 2824 bool disable_mclk_switching;
2779 bool disable_mclk_switching_for_frame_lock; 2825 bool disable_mclk_switching_for_frame_lock;
2780 struct cgs_display_info info = {0};
2781 struct cgs_mode_info mode_info = {0};
2782 const struct phm_clock_and_voltage_limits *max_limits; 2826 const struct phm_clock_and_voltage_limits *max_limits;
2783 uint32_t i; 2827 uint32_t i;
2784 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2828 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2787,7 +2831,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2787 int32_t count; 2831 int32_t count;
2788 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 2832 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2789 2833
2790 info.mode_info = &mode_info;
2791 data->battery_state = (PP_StateUILabel_Battery == 2834 data->battery_state = (PP_StateUILabel_Battery ==
2792 request_ps->classification.ui_label); 2835 request_ps->classification.ui_label);
2793 2836
@@ -2809,10 +2852,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2809 } 2852 }
2810 } 2853 }
2811 2854
2812 cgs_get_active_displays_info(hwmgr->device, &info); 2855 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2813 2856 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2814 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2815 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2816 2857
2817 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2858 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2818 PHM_PlatformCaps_StablePState)) { 2859 PHM_PlatformCaps_StablePState)) {
@@ -2843,12 +2884,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2843 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2884 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2844 2885
2845 2886
2846 if (info.display_count == 0) 2887 if (hwmgr->display_config->num_display == 0)
2847 disable_mclk_switching = false; 2888 disable_mclk_switching = false;
2848 else 2889 else
2849 disable_mclk_switching = ((1 < info.display_count) || 2890 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
2850 disable_mclk_switching_for_frame_lock || 2891 disable_mclk_switching_for_frame_lock ||
2851 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us)); 2892 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
2852 2893
2853 sclk = smu7_ps->performance_levels[0].engine_clock; 2894 sclk = smu7_ps->performance_levels[0].engine_clock;
2854 mclk = smu7_ps->performance_levels[0].memory_clock; 2895 mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -2957,8 +2998,7 @@ static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2957 /* First retrieve the Boot clocks and VDDC from the firmware info table. 2998 /* First retrieve the Boot clocks and VDDC from the firmware info table.
2958 * We assume here that fw_info is unchanged if this call fails. 2999 * We assume here that fw_info is unchanged if this call fails.
2959 */ 3000 */
2960 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( 3001 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
2961 hwmgr->device, index,
2962 &size, &frev, &crev); 3002 &size, &frev, &crev);
2963 if (!fw_info) 3003 if (!fw_info)
2964 /* During a test, there is no firmware info table. */ 3004 /* During a test, there is no firmware info table. */
@@ -3366,34 +3406,35 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3366 return 0; 3406 return 0;
3367} 3407}
3368 3408
3369static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, 3409static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3370 struct pp_gpu_power *query)
3371{ 3410{
3372 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 3411 int i;
3373 PPSMC_MSG_PmStatusLogStart), 3412 u32 tmp = 0;
3374 "Failed to start pm status log!",
3375 return -1);
3376 3413
3377 /* Sampling period from 50ms to 4sec */ 3414 if (!query)
3378 msleep_interruptible(200); 3415 return -EINVAL;
3379 3416
3380 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 3417 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3381 PPSMC_MSG_PmStatusLogSample), 3418 tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3382 "Failed to sample pm status log!", 3419 *query = tmp;
3383 return -1);
3384 3420
3385 query->vddc_power = cgs_read_ind_register(hwmgr->device, 3421 if (tmp != 0)
3386 CGS_IND_REG__SMC, 3422 return 0;
3387 ixSMU_PM_STATUS_40); 3423
3388 query->vddci_power = cgs_read_ind_register(hwmgr->device, 3424 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3389 CGS_IND_REG__SMC, 3425 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3390 ixSMU_PM_STATUS_49); 3426 ixSMU_PM_STATUS_94, 0);
3391 query->max_gpu_power = cgs_read_ind_register(hwmgr->device, 3427
3392 CGS_IND_REG__SMC, 3428 for (i = 0; i < 10; i++) {
3393 ixSMU_PM_STATUS_94); 3429 mdelay(1);
3394 query->average_gpu_power = cgs_read_ind_register(hwmgr->device, 3430 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3395 CGS_IND_REG__SMC, 3431 tmp = cgs_read_ind_register(hwmgr->device,
3396 ixSMU_PM_STATUS_95); 3432 CGS_IND_REG__SMC,
3433 ixSMU_PM_STATUS_94);
3434 if (tmp != 0)
3435 break;
3436 }
3437 *query = tmp;
3397 3438
3398 return 0; 3439 return 0;
3399} 3440}
@@ -3446,10 +3487,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3446 *size = 4; 3487 *size = 4;
3447 return 0; 3488 return 0;
3448 case AMDGPU_PP_SENSOR_GPU_POWER: 3489 case AMDGPU_PP_SENSOR_GPU_POWER:
3449 if (*size < sizeof(struct pp_gpu_power)) 3490 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3450 return -EINVAL;
3451 *size = sizeof(struct pp_gpu_power);
3452 return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3453 case AMDGPU_PP_SENSOR_VDDGFX: 3491 case AMDGPU_PP_SENSOR_VDDGFX:
3454 if ((data->vr_config & 0xff) == 0x2) 3492 if ((data->vr_config & 0xff) == 0x2)
3455 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 3493 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
@@ -3480,7 +3518,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
3480 [smu7_ps->performance_level_count - 1].memory_clock; 3518 [smu7_ps->performance_level_count - 1].memory_clock;
3481 struct PP_Clocks min_clocks = {0}; 3519 struct PP_Clocks min_clocks = {0};
3482 uint32_t i; 3520 uint32_t i;
3483 struct cgs_display_info info = {0};
3484 3521
3485 for (i = 0; i < sclk_table->count; i++) { 3522 for (i = 0; i < sclk_table->count; i++) {
3486 if (sclk == sclk_table->dpm_levels[i].value) 3523 if (sclk == sclk_table->dpm_levels[i].value)
@@ -3507,9 +3544,8 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
3507 if (i >= mclk_table->count) 3544 if (i >= mclk_table->count)
3508 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3545 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3509 3546
3510 cgs_get_active_displays_info(hwmgr->device, &info);
3511 3547
3512 if (data->display_timing.num_existing_displays != info.display_count) 3548 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3513 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3549 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3514 3550
3515 return 0; 3551 return 0;
@@ -3812,9 +3848,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3812{ 3848{
3813 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3849 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3814 3850
3815 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) 3851 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3816 smum_send_msg_to_smc_with_parameter(hwmgr, 3852 if (hwmgr->chip_id == CHIP_VEGAM)
3817 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); 3853 smum_send_msg_to_smc_with_parameter(hwmgr,
3854 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3855 else
3856 smum_send_msg_to_smc_with_parameter(hwmgr,
3857 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3858 }
3818 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; 3859 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3819} 3860}
3820 3861
@@ -3908,15 +3949,8 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3908static int 3949static int
3909smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) 3950smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3910{ 3951{
3911 uint32_t num_active_displays = 0; 3952 if (hwmgr->display_config->num_display > 1 &&
3912 struct cgs_display_info info = {0}; 3953 !hwmgr->display_config->multi_monitor_in_sync)
3913
3914 info.mode_info = NULL;
3915 cgs_get_active_displays_info(hwmgr->device, &info);
3916
3917 num_active_displays = info.display_count;
3918
3919 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3920 smu7_notify_smc_display_change(hwmgr, false); 3954 smu7_notify_smc_display_change(hwmgr, false);
3921 3955
3922 return 0; 3956 return 0;
@@ -3931,33 +3965,24 @@ smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3931static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) 3965static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3932{ 3966{
3933 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3967 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3934 uint32_t num_active_displays = 0;
3935 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); 3968 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3936 uint32_t display_gap2; 3969 uint32_t display_gap2;
3937 uint32_t pre_vbi_time_in_us; 3970 uint32_t pre_vbi_time_in_us;
3938 uint32_t frame_time_in_us; 3971 uint32_t frame_time_in_us;
3939 uint32_t ref_clock; 3972 uint32_t ref_clock, refresh_rate;
3940 uint32_t refresh_rate = 0;
3941 struct cgs_display_info info = {0};
3942 struct cgs_mode_info mode_info = {0};
3943 3973
3944 info.mode_info = &mode_info; 3974 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3945 cgs_get_active_displays_info(hwmgr->device, &info);
3946 num_active_displays = info.display_count;
3947
3948 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3949 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); 3975 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3950 3976
3951 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 3977 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
3952 3978 refresh_rate = hwmgr->display_config->vrefresh;
3953 refresh_rate = mode_info.refresh_rate;
3954 3979
3955 if (0 == refresh_rate) 3980 if (0 == refresh_rate)
3956 refresh_rate = 60; 3981 refresh_rate = 60;
3957 3982
3958 frame_time_in_us = 1000000 / refresh_rate; 3983 frame_time_in_us = 1000000 / refresh_rate;
3959 3984
3960 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; 3985 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
3961 3986
3962 data->frame_time_x2 = frame_time_in_us * 2 / 100; 3987 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3963 3988
@@ -4037,17 +4062,14 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4037{ 4062{
4038 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4063 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4039 bool is_update_required = false; 4064 bool is_update_required = false;
4040 struct cgs_display_info info = {0, 0, NULL};
4041 4065
4042 cgs_get_active_displays_info(hwmgr->device, &info); 4066 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4043
4044 if (data->display_timing.num_existing_displays != info.display_count)
4045 is_update_required = true; 4067 is_update_required = true;
4046 4068
4047 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 4069 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4048 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && 4070 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4049 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || 4071 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4050 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 4072 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4051 is_update_required = true; 4073 is_update_required = true;
4052 } 4074 }
4053 return is_update_required; 4075 return is_update_required;
@@ -4102,7 +4124,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4102 return 0; 4124 return 0;
4103} 4125}
4104 4126
4105static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) 4127static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4106{ 4128{
4107 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4108 4130
@@ -4181,13 +4203,9 @@ static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4181static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) 4203static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4182{ 4204{
4183 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4205 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4184 uint32_t temp; 4206 struct amdgpu_device *adev = hwmgr->adev;
4185
4186 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4187 4207
4188 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == 4208 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4189 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4190 MC_SEQ_MISC0_GDDR5_SHIFT));
4191 4209
4192 return 0; 4210 return 0;
4193} 4211}
@@ -4235,7 +4253,7 @@ static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4235{ 4253{
4236 int tmp_result, result = 0; 4254 int tmp_result, result = 0;
4237 4255
4238 smu7_upload_mc_firmware(hwmgr); 4256 smu7_check_mc_firmware(hwmgr);
4239 4257
4240 tmp_result = smu7_read_clock_registers(hwmgr); 4258 tmp_result = smu7_read_clock_registers(hwmgr);
4241 PP_ASSERT_WITH_CODE((0 == tmp_result), 4259 PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -4370,22 +4388,36 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4370 break; 4388 break;
4371 case OD_SCLK: 4389 case OD_SCLK:
4372 if (hwmgr->od_enabled) { 4390 if (hwmgr->od_enabled) {
4373 size = sprintf(buf, "%s: \n", "OD_SCLK"); 4391 size = sprintf(buf, "%s:\n", "OD_SCLK");
4374 for (i = 0; i < odn_sclk_table->num_of_pl; i++) 4392 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4375 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", 4393 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4376 i, odn_sclk_table->entries[i].clock / 100, 4394 i, odn_sclk_table->entries[i].clock/100,
4377 odn_sclk_table->entries[i].vddc); 4395 odn_sclk_table->entries[i].vddc);
4378 } 4396 }
4379 break; 4397 break;
4380 case OD_MCLK: 4398 case OD_MCLK:
4381 if (hwmgr->od_enabled) { 4399 if (hwmgr->od_enabled) {
4382 size = sprintf(buf, "%s: \n", "OD_MCLK"); 4400 size = sprintf(buf, "%s:\n", "OD_MCLK");
4383 for (i = 0; i < odn_mclk_table->num_of_pl; i++) 4401 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4384 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", 4402 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4385 i, odn_mclk_table->entries[i].clock / 100, 4403 i, odn_mclk_table->entries[i].clock/100,
4386 odn_mclk_table->entries[i].vddc); 4404 odn_mclk_table->entries[i].vddc);
4387 } 4405 }
4388 break; 4406 break;
4407 case OD_RANGE:
4408 if (hwmgr->od_enabled) {
4409 size = sprintf(buf, "%s:\n", "OD_RANGE");
4410 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4411 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4412 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4413 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4414 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4415 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4416 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4417 data->odn_dpm_table.min_vddc,
4418 data->odn_dpm_table.max_vddc);
4419 }
4420 break;
4389 default: 4421 default:
4390 break; 4422 break;
4391 } 4423 }
@@ -4669,36 +4701,27 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4669{ 4701{
4670 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4702 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4671 4703
4672 struct phm_ppt_v1_information *table_info = 4704 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4673 (struct phm_ppt_v1_information *)(hwmgr->pptable); 4705 pr_info("OD voltage is out of range [%d - %d] mV\n",
4674 uint32_t min_vddc; 4706 data->odn_dpm_table.min_vddc,
4675 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 4707 data->odn_dpm_table.max_vddc);
4676
4677 if (table_info == NULL)
4678 return false;
4679
4680 dep_sclk_table = table_info->vdd_dep_on_sclk;
4681 min_vddc = dep_sclk_table->entries[0].vddc;
4682
4683 if (voltage < min_vddc || voltage > 2000) {
4684 pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
4685 return false; 4708 return false;
4686 } 4709 }
4687 4710
4688 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 4711 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4689 if (data->vbios_boot_state.sclk_bootup_value > clk || 4712 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4690 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { 4713 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4691 pr_info("OD engine clock is out of range [%d - %d] MHz\n", 4714 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4692 data->vbios_boot_state.sclk_bootup_value, 4715 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4693 hwmgr->platform_descriptor.overdriveLimit.engineClock / 100); 4716 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4694 return false; 4717 return false;
4695 } 4718 }
4696 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 4719 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4697 if (data->vbios_boot_state.mclk_bootup_value > clk || 4720 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4698 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { 4721 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4699 pr_info("OD memory clock is out of range [%d - %d] MHz\n", 4722 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4700 data->vbios_boot_state.mclk_bootup_value/100, 4723 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4701 hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100); 4724 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4702 return false; 4725 return false;
4703 } 4726 }
4704 } else { 4727 } else {
@@ -4747,10 +4770,6 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4747 return; 4770 return;
4748 } 4771 }
4749 } 4772 }
4750 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4752 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4753 }
4754 4773
4755 dep_table = table_info->vdd_dep_on_sclk; 4774 dep_table = table_info->vdd_dep_on_sclk;
4756 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 4775 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
@@ -4760,9 +4779,9 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4760 return; 4779 return;
4761 } 4780 }
4762 } 4781 }
4763 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 4782 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4764 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4783 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4765 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 4784 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
4766 } 4785 }
4767} 4786}
4768 4787
@@ -4864,6 +4883,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4864 len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); 4883 len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
4865 4884
4866 for (i = 0; i < len; i++) { 4885 for (i = 0; i < len; i++) {
4886 if (i == hwmgr->power_profile_mode) {
4887 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4888 i, profile_name[i], "*",
4889 data->current_profile_setting.sclk_up_hyst,
4890 data->current_profile_setting.sclk_down_hyst,
4891 data->current_profile_setting.sclk_activity,
4892 data->current_profile_setting.mclk_up_hyst,
4893 data->current_profile_setting.mclk_down_hyst,
4894 data->current_profile_setting.mclk_activity);
4895 continue;
4896 }
4867 if (smu7_profiling[i].bupdate_sclk) 4897 if (smu7_profiling[i].bupdate_sclk)
4868 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", 4898 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4869 i, profile_name[i], smu7_profiling[i].sclk_up_hyst, 4899 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
@@ -4883,24 +4913,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4883 "-", "-", "-"); 4913 "-", "-", "-");
4884 } 4914 }
4885 4915
4886 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
4887 i, profile_name[i],
4888 data->custom_profile_setting.sclk_up_hyst,
4889 data->custom_profile_setting.sclk_down_hyst,
4890 data->custom_profile_setting.sclk_activity,
4891 data->custom_profile_setting.mclk_up_hyst,
4892 data->custom_profile_setting.mclk_down_hyst,
4893 data->custom_profile_setting.mclk_activity);
4894
4895 size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
4896 "*", "CURRENT",
4897 data->current_profile_setting.sclk_up_hyst,
4898 data->current_profile_setting.sclk_down_hyst,
4899 data->current_profile_setting.sclk_activity,
4900 data->current_profile_setting.mclk_up_hyst,
4901 data->current_profile_setting.mclk_down_hyst,
4902 data->current_profile_setting.mclk_activity);
4903
4904 return size; 4916 return size;
4905} 4917}
4906 4918
@@ -4939,16 +4951,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
4939 if (size < 8) 4951 if (size < 8)
4940 return -EINVAL; 4952 return -EINVAL;
4941 4953
4942 data->custom_profile_setting.bupdate_sclk = input[0]; 4954 tmp.bupdate_sclk = input[0];
4943 data->custom_profile_setting.sclk_up_hyst = input[1]; 4955 tmp.sclk_up_hyst = input[1];
4944 data->custom_profile_setting.sclk_down_hyst = input[2]; 4956 tmp.sclk_down_hyst = input[2];
4945 data->custom_profile_setting.sclk_activity = input[3]; 4957 tmp.sclk_activity = input[3];
4946 data->custom_profile_setting.bupdate_mclk = input[4]; 4958 tmp.bupdate_mclk = input[4];
4947 data->custom_profile_setting.mclk_up_hyst = input[5]; 4959 tmp.mclk_up_hyst = input[5];
4948 data->custom_profile_setting.mclk_down_hyst = input[6]; 4960 tmp.mclk_down_hyst = input[6];
4949 data->custom_profile_setting.mclk_activity = input[7]; 4961 tmp.mclk_activity = input[7];
4950 if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { 4962 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4951 memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); 4963 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
4952 hwmgr->power_profile_mode = mode; 4964 hwmgr->power_profile_mode = mode;
4953 } 4965 }
4954 break; 4966 break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f40179c9ca97..c91e75db6a8e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -184,6 +184,8 @@ struct smu7_odn_dpm_table {
184 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; 184 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
185 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; 185 struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
186 uint32_t odn_mclk_min_limit; 186 uint32_t odn_mclk_min_limit;
187 uint32_t min_vddc;
188 uint32_t max_vddc;
187}; 189};
188 190
189struct profile_mode_setting { 191struct profile_mode_setting {
@@ -325,7 +327,6 @@ struct smu7_hwmgr {
325 uint16_t mem_latency_high; 327 uint16_t mem_latency_high;
326 uint16_t mem_latency_low; 328 uint16_t mem_latency_low;
327 uint32_t vr_config; 329 uint32_t vr_config;
328 struct profile_mode_setting custom_profile_setting;
329 struct profile_mode_setting current_profile_setting; 330 struct profile_mode_setting current_profile_setting;
330}; 331};
331 332
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 03bc7453f3b1..99b29ff45d91 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
623 { 0xFFFFFFFF } /* End of list */ 623 { 0xFFFFFFFF } /* End of list */
624}; 624};
625 625
626static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
627{
628// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
629// Offset Mask Shift Value Type
630// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
631 // DIDT_SQ
632 //
633 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
634 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
635 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
636 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
637 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
638 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
639 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
640 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
641 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
642
643 // DIDT_TD
644 //
645 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
646 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
647 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
648 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
649 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
650
651 // DIDT_TCP
652 //
653 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
654 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
655 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
656 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
657 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
658 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
659
660 { 0xFFFFFFFF } // End of list
661};
662
663static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
664{
665// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
666// Offset Mask Shift Value Type
667// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
668 // DIDT_SQ
669 //
670 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
671 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
672 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
673 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
674
675 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
676 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
677 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
678 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
679
680 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
681 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
682 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
683 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
684
685 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
686 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
687
688 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
689 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
690
691 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
692 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
693 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
694 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
695 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
696 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
697
698 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
699 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
700 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
701 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
702 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
703
704 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
705 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
706 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
707 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
708
709 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
710 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
711 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
712 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
713 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
714 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
715 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
716 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
717
718 // DIDT_TD
719 //
720 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
721 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
722 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
723 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
724
725 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
726 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
727 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
728 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
729
730 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
731 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
732
733 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
734 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
735
736 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
737 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
738 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
739 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
740 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
741 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
742
743 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
744 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
745 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
746 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
747 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
748
749 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
750 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
751 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
752 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
753
754 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
755 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
756 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
757 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
758 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
759 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
760 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
761 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
762
763 // DIDT_TCP
764 //
765 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
766 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
767 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
768 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
769
770 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
771 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
772 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
773 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
774
775 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
776 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
777
778 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
779 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
780
781 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
782 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
783 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
784 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
785 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
786 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
787
788 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
789 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
790 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
791 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
792 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
793
794 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
795 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
796 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
797 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
798
799 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
800 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
801 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
802 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
803 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
804 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
805 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
806 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
807
808 { 0xFFFFFFFF } // End of list
809};
626static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) 810static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
627{ 811{
628 uint32_t en = enable ? 1 : 0; 812 uint32_t en = enable ? 1 : 0;
@@ -740,8 +924,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
740 PP_CAP(PHM_PlatformCaps_TDRamping) || 924 PP_CAP(PHM_PlatformCaps_TDRamping) ||
741 PP_CAP(PHM_PlatformCaps_TCPRamping)) { 925 PP_CAP(PHM_PlatformCaps_TCPRamping)) {
742 926
743 cgs_enter_safe_mode(hwmgr->device, true); 927 adev->gfx.rlc.funcs->enter_safe_mode(adev);
744 cgs_lock_grbm_idx(hwmgr->device, true); 928 mutex_lock(&adev->grbm_idx_mutex);
745 value = 0; 929 value = 0;
746 value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); 930 value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
747 for (count = 0; count < num_se; count++) { 931 for (count = 0; count < num_se; count++) {
@@ -768,6 +952,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
768 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 952 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
769 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12); 953 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
770 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 954 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
955 } else if (hwmgr->chip_id == CHIP_VEGAM) {
956 result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
957 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
958 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
959 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
771 } 960 }
772 } 961 }
773 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); 962 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
@@ -781,8 +970,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
781 PP_ASSERT_WITH_CODE((0 == result), 970 PP_ASSERT_WITH_CODE((0 == result),
782 "Failed to enable DPM DIDT.", return result); 971 "Failed to enable DPM DIDT.", return result);
783 } 972 }
784 cgs_lock_grbm_idx(hwmgr->device, false); 973 mutex_unlock(&adev->grbm_idx_mutex);
785 cgs_enter_safe_mode(hwmgr->device, false); 974 adev->gfx.rlc.funcs->exit_safe_mode(adev);
786 } 975 }
787 976
788 return 0; 977 return 0;
@@ -791,13 +980,14 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
791int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) 980int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
792{ 981{
793 int result; 982 int result;
983 struct amdgpu_device *adev = hwmgr->adev;
794 984
795 if (PP_CAP(PHM_PlatformCaps_SQRamping) || 985 if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
796 PP_CAP(PHM_PlatformCaps_DBRamping) || 986 PP_CAP(PHM_PlatformCaps_DBRamping) ||
797 PP_CAP(PHM_PlatformCaps_TDRamping) || 987 PP_CAP(PHM_PlatformCaps_TDRamping) ||
798 PP_CAP(PHM_PlatformCaps_TCPRamping)) { 988 PP_CAP(PHM_PlatformCaps_TCPRamping)) {
799 989
800 cgs_enter_safe_mode(hwmgr->device, true); 990 adev->gfx.rlc.funcs->enter_safe_mode(adev);
801 991
802 result = smu7_enable_didt(hwmgr, false); 992 result = smu7_enable_didt(hwmgr, false);
803 PP_ASSERT_WITH_CODE((result == 0), 993 PP_ASSERT_WITH_CODE((result == 0),
@@ -809,7 +999,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
809 PP_ASSERT_WITH_CODE((0 == result), 999 PP_ASSERT_WITH_CODE((0 == result),
810 "Failed to disable DPM DIDT.", return result); 1000 "Failed to disable DPM DIDT.", return result);
811 } 1001 }
812 cgs_enter_safe_mode(hwmgr->device, false); 1002 adev->gfx.rlc.funcs->exit_safe_mode(adev);
813 } 1003 }
814 1004
815 return 0; 1005 return 0;
@@ -852,12 +1042,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
852{ 1042{
853 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1043 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
854 1044
855 n = (n & 0xff) << 8;
856
857 if (data->power_containment_features & 1045 if (data->power_containment_features &
858 POWERCONTAINMENT_FEATURE_PkgPwrLimit) 1046 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
859 return smum_send_msg_to_smc_with_parameter(hwmgr, 1047 return smum_send_msg_to_smc_with_parameter(hwmgr,
860 PPSMC_MSG_PkgPwrSetLimit, n); 1048 PPSMC_MSG_PkgPwrSetLimit, n<<8);
861 return 0; 1049 return 0;
862} 1050}
863 1051
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 7b26607c646a..50690c72b2ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -314,8 +314,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
314 uint8_t frev, crev; 314 uint8_t frev, crev;
315 uint16_t size; 315 uint16_t size;
316 316
317 info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table( 317 info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
318 hwmgr->device,
319 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), 318 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
320 &size, &frev, &crev); 319 &size, &frev, &crev);
321 320
@@ -694,7 +693,7 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
694 else 693 else
695 data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; 694 data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
696 695
697 clock = hwmgr->display_config.min_core_set_clock; 696 clock = hwmgr->display_config->min_core_set_clock;
698 if (clock == 0) 697 if (clock == 0)
699 pr_debug("min_core_set_clock not set\n"); 698 pr_debug("min_core_set_clock not set\n");
700 699
@@ -749,7 +748,7 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
749{ 748{
750 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 749 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
751 PHM_PlatformCaps_SclkDeepSleep)) { 750 PHM_PlatformCaps_SclkDeepSleep)) {
752 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; 751 uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
753 if (clks == 0) 752 if (clks == 0)
754 clks = SMU8_MIN_DEEP_SLEEP_SCLK; 753 clks = SMU8_MIN_DEEP_SLEEP_SCLK;
755 754
@@ -1041,25 +1040,21 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1041 struct smu8_hwmgr *data = hwmgr->backend; 1040 struct smu8_hwmgr *data = hwmgr->backend;
1042 struct PP_Clocks clocks = {0, 0, 0, 0}; 1041 struct PP_Clocks clocks = {0, 0, 0, 0};
1043 bool force_high; 1042 bool force_high;
1044 uint32_t num_of_active_displays = 0;
1045 struct cgs_display_info info = {0};
1046 1043
1047 smu8_ps->need_dfs_bypass = true; 1044 smu8_ps->need_dfs_bypass = true;
1048 1045
1049 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); 1046 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1050 1047
1051 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? 1048 clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1052 hwmgr->display_config.min_mem_set_clock : 1049 hwmgr->display_config->min_mem_set_clock :
1053 data->sys_info.nbp_memory_clock[1]; 1050 data->sys_info.nbp_memory_clock[1];
1054 1051
1055 cgs_get_active_displays_info(hwmgr->device, &info);
1056 num_of_active_displays = info.display_count;
1057 1052
1058 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 1053 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1059 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; 1054 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1060 1055
1061 force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]) 1056 force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1062 || (num_of_active_displays >= 3); 1057 || (hwmgr->display_config->num_display >= 3);
1063 1058
1064 smu8_ps->action = smu8_current_ps->action; 1059 smu8_ps->action = smu8_current_ps->action;
1065 1060
@@ -1897,20 +1892,20 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1897 data->uvd_power_gated = bgate; 1892 data->uvd_power_gated = bgate;
1898 1893
1899 if (bgate) { 1894 if (bgate) {
1900 cgs_set_powergating_state(hwmgr->device, 1895 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1901 AMD_IP_BLOCK_TYPE_UVD, 1896 AMD_IP_BLOCK_TYPE_UVD,
1902 AMD_PG_STATE_GATE); 1897 AMD_PG_STATE_GATE);
1903 cgs_set_clockgating_state(hwmgr->device, 1898 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1904 AMD_IP_BLOCK_TYPE_UVD, 1899 AMD_IP_BLOCK_TYPE_UVD,
1905 AMD_CG_STATE_GATE); 1900 AMD_CG_STATE_GATE);
1906 smu8_dpm_update_uvd_dpm(hwmgr, true); 1901 smu8_dpm_update_uvd_dpm(hwmgr, true);
1907 smu8_dpm_powerdown_uvd(hwmgr); 1902 smu8_dpm_powerdown_uvd(hwmgr);
1908 } else { 1903 } else {
1909 smu8_dpm_powerup_uvd(hwmgr); 1904 smu8_dpm_powerup_uvd(hwmgr);
1910 cgs_set_clockgating_state(hwmgr->device, 1905 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1911 AMD_IP_BLOCK_TYPE_UVD, 1906 AMD_IP_BLOCK_TYPE_UVD,
1912 AMD_CG_STATE_UNGATE); 1907 AMD_CG_STATE_UNGATE);
1913 cgs_set_powergating_state(hwmgr->device, 1908 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1914 AMD_IP_BLOCK_TYPE_UVD, 1909 AMD_IP_BLOCK_TYPE_UVD,
1915 AMD_PG_STATE_UNGATE); 1910 AMD_PG_STATE_UNGATE);
1916 smu8_dpm_update_uvd_dpm(hwmgr, false); 1911 smu8_dpm_update_uvd_dpm(hwmgr, false);
@@ -1923,12 +1918,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1923 struct smu8_hwmgr *data = hwmgr->backend; 1918 struct smu8_hwmgr *data = hwmgr->backend;
1924 1919
1925 if (bgate) { 1920 if (bgate) {
1926 cgs_set_powergating_state( 1921 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1927 hwmgr->device,
1928 AMD_IP_BLOCK_TYPE_VCE, 1922 AMD_IP_BLOCK_TYPE_VCE,
1929 AMD_PG_STATE_GATE); 1923 AMD_PG_STATE_GATE);
1930 cgs_set_clockgating_state( 1924 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1931 hwmgr->device,
1932 AMD_IP_BLOCK_TYPE_VCE, 1925 AMD_IP_BLOCK_TYPE_VCE,
1933 AMD_CG_STATE_GATE); 1926 AMD_CG_STATE_GATE);
1934 smu8_enable_disable_vce_dpm(hwmgr, false); 1927 smu8_enable_disable_vce_dpm(hwmgr, false);
@@ -1937,12 +1930,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1937 } else { 1930 } else {
1938 smu8_dpm_powerup_vce(hwmgr); 1931 smu8_dpm_powerup_vce(hwmgr);
1939 data->vce_power_gated = false; 1932 data->vce_power_gated = false;
1940 cgs_set_clockgating_state( 1933 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1941 hwmgr->device,
1942 AMD_IP_BLOCK_TYPE_VCE, 1934 AMD_IP_BLOCK_TYPE_VCE,
1943 AMD_CG_STATE_UNGATE); 1935 AMD_CG_STATE_UNGATE);
1944 cgs_set_powergating_state( 1936 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1945 hwmgr->device,
1946 AMD_IP_BLOCK_TYPE_VCE, 1937 AMD_IP_BLOCK_TYPE_VCE,
1947 AMD_PG_STATE_UNGATE); 1938 AMD_PG_STATE_UNGATE);
1948 smu8_dpm_update_vce_dpm(hwmgr); 1939 smu8_dpm_update_vce_dpm(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 598122854ab5..93a3d022ba47 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -24,6 +24,7 @@
24#include "pp_debug.h" 24#include "pp_debug.h"
25#include "ppatomctrl.h" 25#include "ppatomctrl.h"
26#include "ppsmc.h" 26#include "ppsmc.h"
27#include "atom.h"
27 28
28uint8_t convert_to_vid(uint16_t vddc) 29uint8_t convert_to_vid(uint16_t vddc)
29{ 30{
@@ -608,3 +609,100 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
608 609
609 return 0; 610 return 0;
610} 611}
612
613void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
614 uint8_t *frev, uint8_t *crev)
615{
616 struct amdgpu_device *adev = dev;
617 uint16_t data_start;
618
619 if (amdgpu_atom_parse_data_header(
620 adev->mode_info.atom_context, table, size,
621 frev, crev, &data_start))
622 return (uint8_t *)adev->mode_info.atom_context->bios +
623 data_start;
624
625 return NULL;
626}
627
628int smu_get_voltage_dependency_table_ppt_v1(
629 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
630 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
631{
632 uint8_t i = 0;
633 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
634 "Voltage Lookup Table empty",
635 return -EINVAL);
636
637 dep_table->count = allowed_dep_table->count;
638 for (i=0; i<dep_table->count; i++) {
639 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
640 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
641 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
642 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
643 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
644 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
645 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
646 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
647 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
648 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
649 }
650
651 return 0;
652}
653
654int smu_set_watermarks_for_clocks_ranges(void *wt_table,
655 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
656{
657 uint32_t i;
658 struct watermarks *table = wt_table;
659
660 if (!table || !wm_with_clock_ranges)
661 return -EINVAL;
662
663 if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
664 return -EINVAL;
665
666 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
667 table->WatermarkRow[1][i].MinClock =
668 cpu_to_le16((uint16_t)
669 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
670 100);
671 table->WatermarkRow[1][i].MaxClock =
672 cpu_to_le16((uint16_t)
673 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
674 100);
675 table->WatermarkRow[1][i].MinUclk =
676 cpu_to_le16((uint16_t)
677 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
678 100);
679 table->WatermarkRow[1][i].MaxUclk =
680 cpu_to_le16((uint16_t)
681 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
682 100);
683 table->WatermarkRow[1][i].WmSetting = (uint8_t)
684 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
685 }
686
687 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
688 table->WatermarkRow[0][i].MinClock =
689 cpu_to_le16((uint16_t)
690 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
691 100);
692 table->WatermarkRow[0][i].MaxClock =
693 cpu_to_le16((uint16_t)
694 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
695 100);
696 table->WatermarkRow[0][i].MinUclk =
697 cpu_to_le16((uint16_t)
698 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
699 100);
700 table->WatermarkRow[0][i].MaxUclk =
701 cpu_to_le16((uint16_t)
702 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
703 100);
704 table->WatermarkRow[0][i].WmSetting = (uint8_t)
705 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
706 }
707 return 0;
708}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index d37d16e4b613..916cc01e7652 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -26,10 +26,27 @@
26struct pp_atomctrl_voltage_table; 26struct pp_atomctrl_voltage_table;
27struct pp_hwmgr; 27struct pp_hwmgr;
28struct phm_ppt_v1_voltage_lookup_table; 28struct phm_ppt_v1_voltage_lookup_table;
29struct Watermarks_t;
30struct pp_wm_sets_with_clock_ranges_soc15;
29 31
30uint8_t convert_to_vid(uint16_t vddc); 32uint8_t convert_to_vid(uint16_t vddc);
31uint16_t convert_to_vddc(uint8_t vid); 33uint16_t convert_to_vddc(uint8_t vid);
32 34
35struct watermark_row_generic_t {
36 uint16_t MinClock;
37 uint16_t MaxClock;
38 uint16_t MinUclk;
39 uint16_t MaxUclk;
40
41 uint8_t WmSetting;
42 uint8_t Padding[3];
43};
44
45struct watermarks {
46 struct watermark_row_generic_t WatermarkRow[2][4];
47 uint32_t padding[7];
48};
49
33extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, 50extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
34 uint32_t index, 51 uint32_t index,
35 uint32_t value, uint32_t mask); 52 uint32_t value, uint32_t mask);
@@ -82,6 +99,16 @@ int phm_irq_process(struct amdgpu_device *adev,
82 99
83int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); 100int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
84 101
102void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
103 uint8_t *frev, uint8_t *crev);
104
105int smu_get_voltage_dependency_table_ppt_v1(
106 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
107 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
108
109int smu_set_watermarks_for_clocks_ranges(void *wt_table,
110 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
111
85#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 112#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
86#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK 113#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
87 114
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 7cbb56ba6fab..d156b7bb92ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -36,7 +36,7 @@
36#include "smu9.h" 36#include "smu9.h"
37#include "smu9_driver_if.h" 37#include "smu9_driver_if.h"
38#include "vega10_inc.h" 38#include "vega10_inc.h"
39#include "pp_soc15.h" 39#include "soc15_common.h"
40#include "pppcielanes.h" 40#include "pppcielanes.h"
41#include "vega10_hwmgr.h" 41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h" 42#include "vega10_processpptables.h"
@@ -51,10 +51,6 @@
51#include "smuio/smuio_9_0_offset.h" 51#include "smuio/smuio_9_0_offset.h"
52#include "smuio/smuio_9_0_sh_mask.h" 52#include "smuio/smuio_9_0_sh_mask.h"
53 53
54#define VOLTAGE_SCALE 4
55#define VOLTAGE_VID_OFFSET_SCALE1 625
56#define VOLTAGE_VID_OFFSET_SCALE2 100
57
58#define HBM_MEMORY_CHANNEL_WIDTH 128 54#define HBM_MEMORY_CHANNEL_WIDTH 128
59 55
60static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; 56static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@@ -79,8 +75,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
79#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L 75#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
80#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L 76#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
81#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L 77#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
82static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
83 enum pp_clock_type type, uint32_t mask);
84 78
85static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); 79static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
86 80
@@ -291,6 +285,48 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
291 return 0; 285 return 0;
292} 286}
293 287
288static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
289{
290 struct vega10_hwmgr *data = hwmgr->backend;
291 struct phm_ppt_v2_information *table_info =
292 (struct phm_ppt_v2_information *)(hwmgr->pptable);
293 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
294 struct vega10_odn_vddc_lookup_table *od_lookup_table;
295 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
296 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
297 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
298 uint32_t i;
299
300 od_lookup_table = &odn_table->vddc_lookup_table;
301 vddc_lookup_table = table_info->vddc_lookup_table;
302
303 for (i = 0; i < vddc_lookup_table->count; i++)
304 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
305
306 od_lookup_table->count = vddc_lookup_table->count;
307
308 dep_table[0] = table_info->vdd_dep_on_sclk;
309 dep_table[1] = table_info->vdd_dep_on_mclk;
310 dep_table[2] = table_info->vdd_dep_on_socclk;
311 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
312 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
313 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
314
315 for (i = 0; i < 3; i++)
316 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
317
318 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
319 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
320 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
321 odn_table->min_vddc = dep_table[0]->entries[0].vddc;
322
323 i = od_table[2]->count - 1;
324 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock;
325 od_table[2]->entries[i].vddc = odn_table->max_vddc;
326
327 return 0;
328}
329
294static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) 330static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
295{ 331{
296 struct vega10_hwmgr *data = hwmgr->backend; 332 struct vega10_hwmgr *data = hwmgr->backend;
@@ -427,7 +463,6 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
427 /* ACG firmware has major version 5 */ 463 /* ACG firmware has major version 5 */
428 if ((hwmgr->smu_version & 0xff000000) == 0x5000000) 464 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
429 data->smu_features[GNLD_ACG].supported = true; 465 data->smu_features[GNLD_ACG].supported = true;
430
431 if (data->registry_data.didt_support) 466 if (data->registry_data.didt_support)
432 data->smu_features[GNLD_DIDT].supported = true; 467 data->smu_features[GNLD_DIDT].supported = true;
433 468
@@ -754,7 +789,6 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
754 uint32_t config_telemetry = 0; 789 uint32_t config_telemetry = 0;
755 struct pp_atomfwctrl_voltage_table vol_table; 790 struct pp_atomfwctrl_voltage_table vol_table;
756 struct amdgpu_device *adev = hwmgr->adev; 791 struct amdgpu_device *adev = hwmgr->adev;
757 uint32_t reg;
758 792
759 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); 793 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
760 if (data == NULL) 794 if (data == NULL)
@@ -860,10 +894,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
860 advanceFanControlParameters.usFanPWMMinLimit * 894 advanceFanControlParameters.usFanPWMMinLimit *
861 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; 895 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
862 896
863 reg = soc15_get_register_offset(DF_HWID, 0, 897 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
864 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
865 mmDF_CS_AON0_DramBaseAddress0);
866 data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
867 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> 898 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
868 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; 899 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
869 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), 900 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
@@ -1370,48 +1401,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1370 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 1401 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1371 sizeof(struct vega10_dpm_table)); 1402 sizeof(struct vega10_dpm_table));
1372 1403
1373 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
1374 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
1375 data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
1376 data->dpm_table.gfx_table.count;
1377 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1378 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
1379 data->dpm_table.gfx_table.dpm_levels[i].value;
1380 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
1381 }
1382
1383 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1384 dep_gfx_table->count;
1385 for (i = 0; i < dep_gfx_table->count; i++) {
1386 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1387 dep_gfx_table->entries[i].clk;
1388 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1389 dep_gfx_table->entries[i].vddInd;
1390 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1391 dep_gfx_table->entries[i].cks_enable;
1392 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1393 dep_gfx_table->entries[i].cks_voffset;
1394 }
1395
1396 data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
1397 data->dpm_table.mem_table.count;
1398 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1399 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
1400 data->dpm_table.mem_table.dpm_levels[i].value;
1401 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
1402 }
1403
1404 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1405 for (i = 0; i < dep_mclk_table->count; i++) {
1406 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1407 dep_mclk_table->entries[i].clk;
1408 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1409 dep_mclk_table->entries[i].vddInd;
1410 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1411 dep_mclk_table->entries[i].vddci;
1412 }
1413 }
1414
1415 return 0; 1404 return 0;
1416} 1405}
1417 1406
@@ -1514,18 +1503,18 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1514{ 1503{
1515 struct phm_ppt_v2_information *table_info = 1504 struct phm_ppt_v2_information *table_info =
1516 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1505 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1517 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk = 1506 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
1518 table_info->vdd_dep_on_sclk;
1519 struct vega10_hwmgr *data = hwmgr->backend; 1507 struct vega10_hwmgr *data = hwmgr->backend;
1520 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1508 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1521 uint32_t gfx_max_clock = 1509 uint32_t gfx_max_clock =
1522 hwmgr->platform_descriptor.overdriveLimit.engineClock; 1510 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1523 uint32_t i = 0; 1511 uint32_t i = 0;
1524 1512
1525 if (data->apply_overdrive_next_settings_mask & 1513 if (hwmgr->od_enabled)
1526 DPMTABLE_OD_UPDATE_VDDC)
1527 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) 1514 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1528 &(data->odn_dpm_table.vdd_dependency_on_sclk); 1515 &(data->odn_dpm_table.vdd_dep_on_sclk);
1516 else
1517 dep_on_sclk = table_info->vdd_dep_on_sclk;
1529 1518
1530 PP_ASSERT_WITH_CODE(dep_on_sclk, 1519 PP_ASSERT_WITH_CODE(dep_on_sclk,
1531 "Invalid SOC_VDD-GFX_CLK Dependency Table!", 1520 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
@@ -1577,23 +1566,32 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1577 uint32_t soc_clock, uint8_t *current_soc_did, 1566 uint32_t soc_clock, uint8_t *current_soc_did,
1578 uint8_t *current_vol_index) 1567 uint8_t *current_vol_index)
1579{ 1568{
1569 struct vega10_hwmgr *data = hwmgr->backend;
1580 struct phm_ppt_v2_information *table_info = 1570 struct phm_ppt_v2_information *table_info =
1581 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1571 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1582 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc = 1572 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
1583 table_info->vdd_dep_on_socclk;
1584 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1573 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1585 uint32_t i; 1574 uint32_t i;
1586 1575
1587 PP_ASSERT_WITH_CODE(dep_on_soc, 1576 if (hwmgr->od_enabled) {
1588 "Invalid SOC_VDD-SOC_CLK Dependency Table!", 1577 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1589 return -EINVAL); 1578 &data->odn_dpm_table.vdd_dep_on_socclk;
1590 for (i = 0; i < dep_on_soc->count; i++) { 1579 for (i = 0; i < dep_on_soc->count; i++) {
1591 if (dep_on_soc->entries[i].clk == soc_clock) 1580 if (dep_on_soc->entries[i].clk >= soc_clock)
1592 break; 1581 break;
1582 }
1583 } else {
1584 dep_on_soc = table_info->vdd_dep_on_socclk;
1585 for (i = 0; i < dep_on_soc->count; i++) {
1586 if (dep_on_soc->entries[i].clk == soc_clock)
1587 break;
1588 }
1593 } 1589 }
1590
1594 PP_ASSERT_WITH_CODE(dep_on_soc->count > i, 1591 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1595 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", 1592 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1596 return -EINVAL); 1593 return -EINVAL);
1594
1597 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 1595 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1598 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1596 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1599 soc_clock, &dividers), 1597 soc_clock, &dividers),
@@ -1602,22 +1600,6 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1602 1600
1603 *current_soc_did = (uint8_t)dividers.ulDid; 1601 *current_soc_did = (uint8_t)dividers.ulDid;
1604 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); 1602 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1605
1606 return 0;
1607}
1608
1609uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1610 uint32_t clk,
1611 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1612{
1613 uint16_t i;
1614
1615 for (i = 0; i < dep_table->count; i++) {
1616 if (dep_table->entries[i].clk == clk)
1617 return dep_table->entries[i].vddc;
1618 }
1619
1620 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1621 return 0; 1603 return 0;
1622} 1604}
1623 1605
@@ -1631,8 +1613,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1631 struct vega10_hwmgr *data = hwmgr->backend; 1613 struct vega10_hwmgr *data = hwmgr->backend;
1632 struct phm_ppt_v2_information *table_info = 1614 struct phm_ppt_v2_information *table_info =
1633 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1615 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1634 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1635 table_info->vdd_dep_on_socclk;
1636 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1616 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1637 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 1617 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1638 int result = 0; 1618 int result = 0;
@@ -1663,11 +1643,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1663 1643
1664 dpm_table = &(data->dpm_table.soc_table); 1644 dpm_table = &(data->dpm_table.soc_table);
1665 for (i = 0; i < dpm_table->count; i++) { 1645 for (i = 0; i < dpm_table->count; i++) {
1666 pp_table->SocVid[i] =
1667 (uint8_t)convert_to_vid(
1668 vega10_locate_vddc_given_clock(hwmgr,
1669 dpm_table->dpm_levels[i].value,
1670 dep_table));
1671 result = vega10_populate_single_soc_level(hwmgr, 1646 result = vega10_populate_single_soc_level(hwmgr,
1672 dpm_table->dpm_levels[i].value, 1647 dpm_table->dpm_levels[i].value,
1673 &(pp_table->SocclkDid[i]), 1648 &(pp_table->SocclkDid[i]),
@@ -1678,7 +1653,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1678 1653
1679 j = i - 1; 1654 j = i - 1;
1680 while (i < NUM_SOCCLK_DPM_LEVELS) { 1655 while (i < NUM_SOCCLK_DPM_LEVELS) {
1681 pp_table->SocVid[i] = pp_table->SocVid[j];
1682 result = vega10_populate_single_soc_level(hwmgr, 1656 result = vega10_populate_single_soc_level(hwmgr,
1683 dpm_table->dpm_levels[j].value, 1657 dpm_table->dpm_levels[j].value,
1684 &(pp_table->SocclkDid[i]), 1658 &(pp_table->SocclkDid[i]),
@@ -1691,6 +1665,32 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1691 return result; 1665 return result;
1692} 1666}
1693 1667
1668static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1669{
1670 struct vega10_hwmgr *data = hwmgr->backend;
1671 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1672 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1673 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1674
1675 uint8_t soc_vid = 0;
1676 uint32_t i, max_vddc_level;
1677
1678 if (hwmgr->od_enabled)
1679 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1680 else
1681 vddc_lookup_table = table_info->vddc_lookup_table;
1682
1683 max_vddc_level = vddc_lookup_table->count;
1684 for (i = 0; i < max_vddc_level; i++) {
1685 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1686 pp_table->SocVid[i] = soc_vid;
1687 }
1688 while (i < MAX_REGULAR_DPM_NUMBER) {
1689 pp_table->SocVid[i] = soc_vid;
1690 i++;
1691 }
1692}
1693
1694/** 1694/**
1695 * @brief Populates single SMC GFXCLK structure using the provided clock. 1695 * @brief Populates single SMC GFXCLK structure using the provided clock.
1696 * 1696 *
@@ -1705,25 +1705,25 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1705 struct vega10_hwmgr *data = hwmgr->backend; 1705 struct vega10_hwmgr *data = hwmgr->backend;
1706 struct phm_ppt_v2_information *table_info = 1706 struct phm_ppt_v2_information *table_info =
1707 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1707 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1708 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk = 1708 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
1709 table_info->vdd_dep_on_mclk;
1710 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1709 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1711 uint32_t mem_max_clock = 1710 uint32_t mem_max_clock =
1712 hwmgr->platform_descriptor.overdriveLimit.memoryClock; 1711 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1713 uint32_t i = 0; 1712 uint32_t i = 0;
1714 1713
1715 if (data->apply_overdrive_next_settings_mask & 1714 if (hwmgr->od_enabled)
1716 DPMTABLE_OD_UPDATE_VDDC)
1717 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) 1715 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1718 &data->odn_dpm_table.vdd_dependency_on_mclk; 1716 &data->odn_dpm_table.vdd_dep_on_mclk;
1717 else
1718 dep_on_mclk = table_info->vdd_dep_on_mclk;
1719 1719
1720 PP_ASSERT_WITH_CODE(dep_on_mclk, 1720 PP_ASSERT_WITH_CODE(dep_on_mclk,
1721 "Invalid SOC_VDD-UCLK Dependency Table!", 1721 "Invalid SOC_VDD-UCLK Dependency Table!",
1722 return -EINVAL); 1722 return -EINVAL);
1723 1723
1724 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 1724 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
1725 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock; 1725 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1726 else { 1726 } else {
1727 for (i = 0; i < dep_on_mclk->count; i++) { 1727 for (i = 0; i < dep_on_mclk->count; i++) {
1728 if (dep_on_mclk->entries[i].clk == mem_clock) 1728 if (dep_on_mclk->entries[i].clk == mem_clock)
1729 break; 1729 break;
@@ -2067,6 +2067,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2067 if (data->smu_features[GNLD_AVFS].supported) { 2067 if (data->smu_features[GNLD_AVFS].supported) {
2068 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); 2068 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2069 if (!result) { 2069 if (!result) {
2070 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
2071 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
2072
2070 pp_table->MinVoltageVid = (uint8_t) 2073 pp_table->MinVoltageVid = (uint8_t)
2071 convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); 2074 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2072 pp_table->MaxVoltageVid = (uint8_t) 2075 pp_table->MaxVoltageVid = (uint8_t)
@@ -2345,6 +2348,22 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2345 return 0; 2348 return 0;
2346} 2349}
2347 2350
2351static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2352{
2353 struct vega10_hwmgr *data = hwmgr->backend;
2354
2355 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2356 vega10_avfs_enable(hwmgr, false);
2357 } else if (data->need_update_dpm_table) {
2358 vega10_avfs_enable(hwmgr, false);
2359 vega10_avfs_enable(hwmgr, true);
2360 } else {
2361 vega10_avfs_enable(hwmgr, true);
2362 }
2363
2364 return 0;
2365}
2366
2348static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) 2367static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2349{ 2368{
2350 int result = 0; 2369 int result = 0;
@@ -2406,6 +2425,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2406 "Failed to setup default DPM tables!", 2425 "Failed to setup default DPM tables!",
2407 return result); 2426 return result);
2408 2427
2428 /* initialize ODN table */
2429 if (hwmgr->od_enabled)
2430 vega10_odn_initial_default_setting(hwmgr);
2431
2409 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, 2432 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2410 VOLTAGE_OBJ_SVID2, &voltage_table); 2433 VOLTAGE_OBJ_SVID2, &voltage_table);
2411 pp_table->MaxVidStep = voltage_table.max_vid_step; 2434 pp_table->MaxVidStep = voltage_table.max_vid_step;
@@ -2452,6 +2475,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2452 "Failed to initialize Memory Level!", 2475 "Failed to initialize Memory Level!",
2453 return result); 2476 return result);
2454 2477
2478 vega10_populate_vddc_soc_levels(hwmgr);
2479
2455 result = vega10_populate_all_display_clock_levels(hwmgr); 2480 result = vega10_populate_all_display_clock_levels(hwmgr);
2456 PP_ASSERT_WITH_CODE(!result, 2481 PP_ASSERT_WITH_CODE(!result,
2457 "Failed to initialize Display Level!", 2482 "Failed to initialize Display Level!",
@@ -2481,6 +2506,12 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2481 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 2506 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2482 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 2507 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2483 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 2508 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2509 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2510 SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2511
2512 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2513 SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2514
2484 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 2515 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2485 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 2516 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2486 if (0 != boot_up_values.usVddc) { 2517 if (0 != boot_up_values.usVddc) {
@@ -2829,7 +2860,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2829 2860
2830 tmp_result = vega10_construct_voltage_tables(hwmgr); 2861 tmp_result = vega10_construct_voltage_tables(hwmgr);
2831 PP_ASSERT_WITH_CODE(!tmp_result, 2862 PP_ASSERT_WITH_CODE(!tmp_result,
2832 "Failed to contruct voltage tables!", 2863 "Failed to construct voltage tables!",
2833 result = tmp_result); 2864 result = tmp_result);
2834 2865
2835 tmp_result = vega10_init_smc_table(hwmgr); 2866 tmp_result = vega10_init_smc_table(hwmgr);
@@ -3028,7 +3059,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3028 bool disable_mclk_switching_for_frame_lock; 3059 bool disable_mclk_switching_for_frame_lock;
3029 bool disable_mclk_switching_for_vr; 3060 bool disable_mclk_switching_for_vr;
3030 bool force_mclk_high; 3061 bool force_mclk_high;
3031 struct cgs_display_info info = {0};
3032 const struct phm_clock_and_voltage_limits *max_limits; 3062 const struct phm_clock_and_voltage_limits *max_limits;
3033 uint32_t i; 3063 uint32_t i;
3034 struct vega10_hwmgr *data = hwmgr->backend; 3064 struct vega10_hwmgr *data = hwmgr->backend;
@@ -3063,11 +3093,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3063 } 3093 }
3064 } 3094 }
3065 3095
3066 cgs_get_active_displays_info(hwmgr->device, &info);
3067
3068 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ 3096 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3069 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; 3097 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3070 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 3098 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3071 3099
3072 if (PP_CAP(PHM_PlatformCaps_StablePState)) { 3100 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3073 stable_pstate_sclk_dpm_percentage = 3101 stable_pstate_sclk_dpm_percentage =
@@ -3107,10 +3135,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3107 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); 3135 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3108 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); 3136 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3109 3137
3110 if (info.display_count == 0) 3138 if (hwmgr->display_config->num_display == 0)
3111 disable_mclk_switching = false; 3139 disable_mclk_switching = false;
3112 else 3140 else
3113 disable_mclk_switching = (info.display_count > 1) || 3141 disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
3114 disable_mclk_switching_for_frame_lock || 3142 disable_mclk_switching_for_frame_lock ||
3115 disable_mclk_switching_for_vr || 3143 disable_mclk_switching_for_vr ||
3116 force_mclk_high; 3144 force_mclk_high;
@@ -3171,87 +3199,11 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3171 3199
3172static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) 3200static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3173{ 3201{
3174 const struct phm_set_power_state_input *states =
3175 (const struct phm_set_power_state_input *)input;
3176 const struct vega10_power_state *vega10_ps =
3177 cast_const_phw_vega10_power_state(states->pnew_state);
3178 struct vega10_hwmgr *data = hwmgr->backend; 3202 struct vega10_hwmgr *data = hwmgr->backend;
3179 struct vega10_single_dpm_table *sclk_table =
3180 &(data->dpm_table.gfx_table);
3181 uint32_t sclk = vega10_ps->performance_levels
3182 [vega10_ps->performance_level_count - 1].gfx_clock;
3183 struct vega10_single_dpm_table *mclk_table =
3184 &(data->dpm_table.mem_table);
3185 uint32_t mclk = vega10_ps->performance_levels
3186 [vega10_ps->performance_level_count - 1].mem_clock;
3187 struct PP_Clocks min_clocks = {0};
3188 uint32_t i;
3189 struct cgs_display_info info = {0};
3190
3191 data->need_update_dpm_table = 0;
3192
3193 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3194 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
3195 for (i = 0; i < sclk_table->count; i++) {
3196 if (sclk == sclk_table->dpm_levels[i].value)
3197 break;
3198 }
3199
3200 if (!(data->apply_overdrive_next_settings_mask &
3201 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3202 /* Check SCLK in DAL's minimum clocks
3203 * in case DeepSleep divider update is required.
3204 */
3205 if (data->display_timing.min_clock_in_sr !=
3206 min_clocks.engineClockInSR &&
3207 (min_clocks.engineClockInSR >=
3208 VEGA10_MINIMUM_ENGINE_CLOCK ||
3209 data->display_timing.min_clock_in_sr >=
3210 VEGA10_MINIMUM_ENGINE_CLOCK))
3211 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3212 }
3213
3214 cgs_get_active_displays_info(hwmgr->device, &info);
3215
3216 if (data->display_timing.num_existing_displays !=
3217 info.display_count)
3218 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3219 } else {
3220 for (i = 0; i < sclk_table->count; i++) {
3221 if (sclk == sclk_table->dpm_levels[i].value)
3222 break;
3223 }
3224
3225 if (i >= sclk_table->count)
3226 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3227 else {
3228 /* Check SCLK in DAL's minimum clocks
3229 * in case DeepSleep divider update is required.
3230 */
3231 if (data->display_timing.min_clock_in_sr !=
3232 min_clocks.engineClockInSR &&
3233 (min_clocks.engineClockInSR >=
3234 VEGA10_MINIMUM_ENGINE_CLOCK ||
3235 data->display_timing.min_clock_in_sr >=
3236 VEGA10_MINIMUM_ENGINE_CLOCK))
3237 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3238 }
3239 3203
3240 for (i = 0; i < mclk_table->count; i++) { 3204 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3241 if (mclk == mclk_table->dpm_levels[i].value) 3205 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3242 break;
3243 }
3244
3245 cgs_get_active_displays_info(hwmgr->device, &info);
3246 3206
3247 if (i >= mclk_table->count)
3248 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3249
3250 if (data->display_timing.num_existing_displays !=
3251 info.display_count ||
3252 i >= mclk_table->count)
3253 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3254 }
3255 return 0; 3207 return 0;
3256} 3208}
3257 3209
@@ -3259,194 +3211,29 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3259 struct pp_hwmgr *hwmgr, const void *input) 3211 struct pp_hwmgr *hwmgr, const void *input)
3260{ 3212{
3261 int result = 0; 3213 int result = 0;
3262 const struct phm_set_power_state_input *states =
3263 (const struct phm_set_power_state_input *)input;
3264 const struct vega10_power_state *vega10_ps =
3265 cast_const_phw_vega10_power_state(states->pnew_state);
3266 struct vega10_hwmgr *data = hwmgr->backend; 3214 struct vega10_hwmgr *data = hwmgr->backend;
3267 uint32_t sclk = vega10_ps->performance_levels
3268 [vega10_ps->performance_level_count - 1].gfx_clock;
3269 uint32_t mclk = vega10_ps->performance_levels
3270 [vega10_ps->performance_level_count - 1].mem_clock;
3271 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3272 struct vega10_dpm_table *golden_dpm_table =
3273 &data->golden_dpm_table;
3274 uint32_t dpm_count, clock_percent;
3275 uint32_t i;
3276 3215
3277 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || 3216 if (!data->need_update_dpm_table)
3278 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { 3217 return 0;
3279
3280 if (!data->need_update_dpm_table &&
3281 !data->apply_optimized_settings &&
3282 !data->apply_overdrive_next_settings_mask)
3283 return 0;
3284
3285 if (data->apply_overdrive_next_settings_mask &
3286 DPMTABLE_OD_UPDATE_SCLK) {
3287 for (dpm_count = 0;
3288 dpm_count < dpm_table->gfx_table.count;
3289 dpm_count++) {
3290 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3291 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
3292 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3293 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
3294 }
3295 }
3296
3297 if (data->apply_overdrive_next_settings_mask &
3298 DPMTABLE_OD_UPDATE_MCLK) {
3299 for (dpm_count = 0;
3300 dpm_count < dpm_table->mem_table.count;
3301 dpm_count++) {
3302 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3303 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
3304 dpm_table->mem_table.dpm_levels[dpm_count].value =
3305 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
3306 }
3307 }
3308
3309 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3310 data->apply_optimized_settings ||
3311 (data->apply_overdrive_next_settings_mask &
3312 DPMTABLE_OD_UPDATE_SCLK)) {
3313 result = vega10_populate_all_graphic_levels(hwmgr);
3314 PP_ASSERT_WITH_CODE(!result,
3315 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3316 return result);
3317 }
3318
3319 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3320 (data->apply_overdrive_next_settings_mask &
3321 DPMTABLE_OD_UPDATE_MCLK)){
3322 result = vega10_populate_all_memory_levels(hwmgr);
3323 PP_ASSERT_WITH_CODE(!result,
3324 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3325 return result);
3326 }
3327 } else {
3328 if (!data->need_update_dpm_table &&
3329 !data->apply_optimized_settings)
3330 return 0;
3331
3332 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3333 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3334 dpm_table->
3335 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3336 value = sclk;
3337 if (hwmgr->od_enabled) {
3338 /* Need to do calculation based on the golden DPM table
3339 * as the Heatmap GPU Clock axis is also based on
3340 * the default values
3341 */
3342 PP_ASSERT_WITH_CODE(
3343 golden_dpm_table->gfx_table.dpm_levels
3344 [golden_dpm_table->gfx_table.count - 1].value,
3345 "Divide by 0!",
3346 return -1);
3347
3348 dpm_count = dpm_table->gfx_table.count < 2 ?
3349 0 : dpm_table->gfx_table.count - 2;
3350 for (i = dpm_count; i > 1; i--) {
3351 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3352 [golden_dpm_table->gfx_table.count - 1].value) {
3353 clock_percent =
3354 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3355 [golden_dpm_table->gfx_table.count - 1].value) *
3356 100) /
3357 golden_dpm_table->gfx_table.dpm_levels
3358 [golden_dpm_table->gfx_table.count - 1].value;
3359
3360 dpm_table->gfx_table.dpm_levels[i].value =
3361 golden_dpm_table->gfx_table.dpm_levels[i].value +
3362 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3363 clock_percent) / 100;
3364 } else if (golden_dpm_table->
3365 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3366 sclk) {
3367 clock_percent =
3368 ((golden_dpm_table->gfx_table.dpm_levels
3369 [golden_dpm_table->gfx_table.count - 1].value -
3370 sclk) * 100) /
3371 golden_dpm_table->gfx_table.dpm_levels
3372 [golden_dpm_table->gfx_table.count-1].value;
3373
3374 dpm_table->gfx_table.dpm_levels[i].value =
3375 golden_dpm_table->gfx_table.dpm_levels[i].value -
3376 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3377 clock_percent) / 100;
3378 } else
3379 dpm_table->gfx_table.dpm_levels[i].value =
3380 golden_dpm_table->gfx_table.dpm_levels[i].value;
3381 }
3382 }
3383 }
3384 3218
3385 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK && 3219 if (data->need_update_dpm_table &
3386 data->smu_features[GNLD_DPM_UCLK].supported) { 3220 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3387 dpm_table-> 3221 result = vega10_populate_all_graphic_levels(hwmgr);
3388 mem_table.dpm_levels[dpm_table->mem_table.count - 1]. 3222 PP_ASSERT_WITH_CODE((0 == result),
3389 value = mclk; 3223 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3224 return result);
3225 }
3390 3226
3391 if (hwmgr->od_enabled) { 3227 if (data->need_update_dpm_table &
3392 PP_ASSERT_WITH_CODE( 3228 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3393 golden_dpm_table->mem_table.dpm_levels 3229 result = vega10_populate_all_memory_levels(hwmgr);
3394 [golden_dpm_table->mem_table.count - 1].value, 3230 PP_ASSERT_WITH_CODE((0 == result),
3395 "Divide by 0!", 3231 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3396 return -1); 3232 return result);
3233 }
3397 3234
3398 dpm_count = dpm_table->mem_table.count < 2 ? 3235 vega10_populate_vddc_soc_levels(hwmgr);
3399 0 : dpm_table->mem_table.count - 2;
3400 for (i = dpm_count; i > 1; i--) {
3401 if (mclk > golden_dpm_table->mem_table.dpm_levels
3402 [golden_dpm_table->mem_table.count-1].value) {
3403 clock_percent = ((mclk -
3404 golden_dpm_table->mem_table.dpm_levels
3405 [golden_dpm_table->mem_table.count-1].value) *
3406 100) /
3407 golden_dpm_table->mem_table.dpm_levels
3408 [golden_dpm_table->mem_table.count-1].value;
3409
3410 dpm_table->mem_table.dpm_levels[i].value =
3411 golden_dpm_table->mem_table.dpm_levels[i].value +
3412 (golden_dpm_table->mem_table.dpm_levels[i].value *
3413 clock_percent) / 100;
3414 } else if (golden_dpm_table->mem_table.dpm_levels
3415 [dpm_table->mem_table.count-1].value > mclk) {
3416 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3417 [golden_dpm_table->mem_table.count-1].value - mclk) *
3418 100) /
3419 golden_dpm_table->mem_table.dpm_levels
3420 [golden_dpm_table->mem_table.count-1].value;
3421
3422 dpm_table->mem_table.dpm_levels[i].value =
3423 golden_dpm_table->mem_table.dpm_levels[i].value -
3424 (golden_dpm_table->mem_table.dpm_levels[i].value *
3425 clock_percent) / 100;
3426 } else
3427 dpm_table->mem_table.dpm_levels[i].value =
3428 golden_dpm_table->mem_table.dpm_levels[i].value;
3429 }
3430 }
3431 }
3432 3236
3433 if ((data->need_update_dpm_table &
3434 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3435 data->apply_optimized_settings) {
3436 result = vega10_populate_all_graphic_levels(hwmgr);
3437 PP_ASSERT_WITH_CODE(!result,
3438 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3439 return result);
3440 }
3441
3442 if (data->need_update_dpm_table &
3443 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3444 result = vega10_populate_all_memory_levels(hwmgr);
3445 PP_ASSERT_WITH_CODE(!result,
3446 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3447 return result);
3448 }
3449 }
3450 return result; 3237 return result;
3451} 3238}
3452 3239
@@ -3742,8 +3529,9 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3742 PP_ASSERT_WITH_CODE(!result, 3529 PP_ASSERT_WITH_CODE(!result,
3743 "Failed to upload PPtable!", return result); 3530 "Failed to upload PPtable!", return result);
3744 3531
3745 data->apply_optimized_settings = false; 3532 vega10_update_avfs(hwmgr);
3746 data->apply_overdrive_next_settings_mask = 0; 3533
3534 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3747 3535
3748 return 0; 3536 return 0;
3749} 3537}
@@ -3793,16 +3581,18 @@ static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3793} 3581}
3794 3582
3795static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, 3583static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3796 struct pp_gpu_power *query) 3584 uint32_t *query)
3797{ 3585{
3798 uint32_t value; 3586 uint32_t value;
3799 3587
3588 if (!query)
3589 return -EINVAL;
3590
3800 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr); 3591 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3801 value = smum_get_argument(hwmgr); 3592 value = smum_get_argument(hwmgr);
3802 3593
3803 /* power value is an integer */ 3594 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3804 memset(query, 0, sizeof *query); 3595 *query = value << 8;
3805 query->average_gpu_power = value << 8;
3806 3596
3807 return 0; 3597 return 0;
3808} 3598}
@@ -3810,22 +3600,18 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3810static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, 3600static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3811 void *value, int *size) 3601 void *value, int *size)
3812{ 3602{
3813 uint32_t sclk_idx, mclk_idx, activity_percent = 0; 3603 struct amdgpu_device *adev = hwmgr->adev;
3604 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
3814 struct vega10_hwmgr *data = hwmgr->backend; 3605 struct vega10_hwmgr *data = hwmgr->backend;
3815 struct vega10_dpm_table *dpm_table = &data->dpm_table; 3606 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3816 int ret = 0; 3607 int ret = 0;
3817 uint32_t reg, val_vid; 3608 uint32_t val_vid;
3818 3609
3819 switch (idx) { 3610 switch (idx) {
3820 case AMDGPU_PP_SENSOR_GFX_SCLK: 3611 case AMDGPU_PP_SENSOR_GFX_SCLK:
3821 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); 3612 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3822 sclk_idx = smum_get_argument(hwmgr); 3613 sclk_mhz = smum_get_argument(hwmgr);
3823 if (sclk_idx < dpm_table->gfx_table.count) { 3614 *((uint32_t *)value) = sclk_mhz * 100;
3824 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3825 *size = 4;
3826 } else {
3827 ret = -EINVAL;
3828 }
3829 break; 3615 break;
3830 case AMDGPU_PP_SENSOR_GFX_MCLK: 3616 case AMDGPU_PP_SENSOR_GFX_MCLK:
3831 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); 3617 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
@@ -3856,18 +3642,10 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3856 *size = 4; 3642 *size = 4;
3857 break; 3643 break;
3858 case AMDGPU_PP_SENSOR_GPU_POWER: 3644 case AMDGPU_PP_SENSOR_GPU_POWER:
3859 if (*size < sizeof(struct pp_gpu_power)) 3645 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
3860 ret = -EINVAL;
3861 else {
3862 *size = sizeof(struct pp_gpu_power);
3863 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3864 }
3865 break; 3646 break;
3866 case AMDGPU_PP_SENSOR_VDDGFX: 3647 case AMDGPU_PP_SENSOR_VDDGFX:
3867 reg = soc15_get_register_offset(SMUIO_HWID, 0, 3648 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
3868 mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX,
3869 mmSMUSVI0_PLANE0_CURRENTVID);
3870 val_vid = (cgs_read_register(hwmgr->device, reg) &
3871 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >> 3649 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3872 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT; 3650 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3873 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid); 3651 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
@@ -3956,26 +3734,18 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
3956 (struct phm_ppt_v2_information *)hwmgr->pptable; 3734 (struct phm_ppt_v2_information *)hwmgr->pptable;
3957 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk; 3735 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3958 uint32_t idx; 3736 uint32_t idx;
3959 uint32_t num_active_disps = 0;
3960 struct cgs_display_info info = {0};
3961 struct PP_Clocks min_clocks = {0}; 3737 struct PP_Clocks min_clocks = {0};
3962 uint32_t i; 3738 uint32_t i;
3963 struct pp_display_clock_request clock_req; 3739 struct pp_display_clock_request clock_req;
3964 3740
3965 info.mode_info = NULL; 3741 if (hwmgr->display_config->num_display > 1)
3966
3967 cgs_get_active_displays_info(hwmgr->device, &info);
3968
3969 num_active_disps = info.display_count;
3970
3971 if (num_active_disps > 1)
3972 vega10_notify_smc_display_change(hwmgr, false); 3742 vega10_notify_smc_display_change(hwmgr, false);
3973 else 3743 else
3974 vega10_notify_smc_display_change(hwmgr, true); 3744 vega10_notify_smc_display_change(hwmgr, true);
3975 3745
3976 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; 3746 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3977 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; 3747 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3978 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 3748 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3979 3749
3980 for (i = 0; i < dpm_table->count; i++) { 3750 for (i = 0; i < dpm_table->count; i++) {
3981 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) 3751 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
@@ -4120,6 +3890,47 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4120 } 3890 }
4121} 3891}
4122 3892
3893static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
3894 enum pp_clock_type type, uint32_t mask)
3895{
3896 struct vega10_hwmgr *data = hwmgr->backend;
3897
3898 switch (type) {
3899 case PP_SCLK:
3900 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
3901 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
3902
3903 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3904 "Failed to upload boot level to lowest!",
3905 return -EINVAL);
3906
3907 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3908 "Failed to upload dpm max level to highest!",
3909 return -EINVAL);
3910 break;
3911
3912 case PP_MCLK:
3913 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
3914 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
3915
3916 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3917 "Failed to upload boot level to lowest!",
3918 return -EINVAL);
3919
3920 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3921 "Failed to upload dpm max level to highest!",
3922 return -EINVAL);
3923
3924 break;
3925
3926 case PP_PCIE:
3927 default:
3928 break;
3929 }
3930
3931 return 0;
3932}
3933
4123static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 3934static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4124 enum amd_dpm_forced_level level) 3935 enum amd_dpm_forced_level level)
4125{ 3936{
@@ -4356,97 +4167,15 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4356 struct vega10_hwmgr *data = hwmgr->backend; 4167 struct vega10_hwmgr *data = hwmgr->backend;
4357 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 4168 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4358 int result = 0; 4169 int result = 0;
4359 uint32_t i;
4360 4170
4361 if (!data->registry_data.disable_water_mark) { 4171 if (!data->registry_data.disable_water_mark) {
4362 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { 4172 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
4363 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4364 cpu_to_le16((uint16_t)
4365 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4366 100);
4367 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4368 cpu_to_le16((uint16_t)
4369 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4370 100);
4371 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4372 cpu_to_le16((uint16_t)
4373 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4374 100);
4375 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4376 cpu_to_le16((uint16_t)
4377 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4378 100);
4379 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4380 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4381 }
4382
4383 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4384 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4385 cpu_to_le16((uint16_t)
4386 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4387 100);
4388 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4389 cpu_to_le16((uint16_t)
4390 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4391 100);
4392 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4393 cpu_to_le16((uint16_t)
4394 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4395 100);
4396 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4397 cpu_to_le16((uint16_t)
4398 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4399 100);
4400 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4401 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4402 }
4403 data->water_marks_bitmap = WaterMarksExist; 4173 data->water_marks_bitmap = WaterMarksExist;
4404 } 4174 }
4405 4175
4406 return result; 4176 return result;
4407} 4177}
4408 4178
4409static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4410 enum pp_clock_type type, uint32_t mask)
4411{
4412 struct vega10_hwmgr *data = hwmgr->backend;
4413
4414 switch (type) {
4415 case PP_SCLK:
4416 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4417 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4418
4419 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4420 "Failed to upload boot level to lowest!",
4421 return -EINVAL);
4422
4423 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4424 "Failed to upload dpm max level to highest!",
4425 return -EINVAL);
4426 break;
4427
4428 case PP_MCLK:
4429 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4430 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4431
4432 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4433 "Failed to upload boot level to lowest!",
4434 return -EINVAL);
4435
4436 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4437 "Failed to upload dpm max level to highest!",
4438 return -EINVAL);
4439
4440 break;
4441
4442 case PP_PCIE:
4443 default:
4444 break;
4445 }
4446
4447 return 0;
4448}
4449
4450static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, 4179static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4451 enum pp_clock_type type, char *buf) 4180 enum pp_clock_type type, char *buf)
4452{ 4181{
@@ -4454,6 +4183,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4454 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4183 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4455 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4184 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4456 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 4185 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4186 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4187
4457 int i, now, size = 0; 4188 int i, now, size = 0;
4458 4189
4459 switch (type) { 4190 switch (type) {
@@ -4492,6 +4223,40 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4492 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", 4223 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4493 (i == now) ? "*" : ""); 4224 (i == now) ? "*" : "");
4494 break; 4225 break;
4226 case OD_SCLK:
4227 if (hwmgr->od_enabled) {
4228 size = sprintf(buf, "%s:\n", "OD_SCLK");
4229 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4230 for (i = 0; i < podn_vdd_dep->count; i++)
4231 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4232 i, podn_vdd_dep->entries[i].clk / 100,
4233 podn_vdd_dep->entries[i].vddc);
4234 }
4235 break;
4236 case OD_MCLK:
4237 if (hwmgr->od_enabled) {
4238 size = sprintf(buf, "%s:\n", "OD_MCLK");
4239 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4240 for (i = 0; i < podn_vdd_dep->count; i++)
4241 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4242 i, podn_vdd_dep->entries[i].clk/100,
4243 podn_vdd_dep->entries[i].vddc);
4244 }
4245 break;
4246 case OD_RANGE:
4247 if (hwmgr->od_enabled) {
4248 size = sprintf(buf, "%s:\n", "OD_RANGE");
4249 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4250 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4251 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4252 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4253 data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4254 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4255 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4256 data->odn_dpm_table.min_vddc,
4257 data->odn_dpm_table.max_vddc);
4258 }
4259 break;
4495 default: 4260 default:
4496 break; 4261 break;
4497 } 4262 }
@@ -4501,10 +4266,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4501static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 4266static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4502{ 4267{
4503 struct vega10_hwmgr *data = hwmgr->backend; 4268 struct vega10_hwmgr *data = hwmgr->backend;
4504 int result = 0;
4505 uint32_t num_turned_on_displays = 1;
4506 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 4269 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4507 struct cgs_display_info info = {0}; 4270 int result = 0;
4508 4271
4509 if ((data->water_marks_bitmap & WaterMarksExist) && 4272 if ((data->water_marks_bitmap & WaterMarksExist) &&
4510 !(data->water_marks_bitmap & WaterMarksLoaded)) { 4273 !(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -4514,10 +4277,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4514 } 4277 }
4515 4278
4516 if (data->water_marks_bitmap & WaterMarksLoaded) { 4279 if (data->water_marks_bitmap & WaterMarksLoaded) {
4517 cgs_get_active_displays_info(hwmgr->device, &info);
4518 num_turned_on_displays = info.display_count;
4519 smum_send_msg_to_smc_with_parameter(hwmgr, 4280 smum_send_msg_to_smc_with_parameter(hwmgr,
4520 PPSMC_MSG_NumOfDisplays, num_turned_on_displays); 4281 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
4521 } 4282 }
4522 4283
4523 return result; 4284 return result;
@@ -4603,15 +4364,12 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
4603{ 4364{
4604 struct vega10_hwmgr *data = hwmgr->backend; 4365 struct vega10_hwmgr *data = hwmgr->backend;
4605 bool is_update_required = false; 4366 bool is_update_required = false;
4606 struct cgs_display_info info = {0, 0, NULL};
4607 4367
4608 cgs_get_active_displays_info(hwmgr->device, &info); 4368 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4609
4610 if (data->display_timing.num_existing_displays != info.display_count)
4611 is_update_required = true; 4369 is_update_required = true;
4612 4370
4613 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { 4371 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4614 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) 4372 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
4615 is_update_required = true; 4373 is_update_required = true;
4616 } 4374 }
4617 4375
@@ -4886,6 +4644,200 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
4886 return 0; 4644 return 0;
4887} 4645}
4888 4646
4647
4648static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4649 enum PP_OD_DPM_TABLE_COMMAND type,
4650 uint32_t clk,
4651 uint32_t voltage)
4652{
4653 struct vega10_hwmgr *data = hwmgr->backend;
4654 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4655 struct vega10_single_dpm_table *golden_table;
4656
4657 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4658 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4659 return false;
4660 }
4661
4662 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4663 golden_table = &(data->golden_dpm_table.gfx_table);
4664 if (golden_table->dpm_levels[0].value > clk ||
4665 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4666 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4667 golden_table->dpm_levels[0].value/100,
4668 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4669 return false;
4670 }
4671 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4672 golden_table = &(data->golden_dpm_table.mem_table);
4673 if (golden_table->dpm_levels[0].value > clk ||
4674 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4675 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4676 golden_table->dpm_levels[0].value/100,
4677 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4678 return false;
4679 }
4680 } else {
4681 return false;
4682 }
4683
4684 return true;
4685}
4686
4687static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4688{
4689 struct vega10_hwmgr *data = hwmgr->backend;
4690 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4691 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4692 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4693 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
4694 uint32_t i;
4695
4696 dep_table = table_info->vdd_dep_on_mclk;
4697 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
4698
4699 for (i = 0; i < dep_table->count; i++) {
4700 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4701 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
4702 return;
4703 }
4704 }
4705
4706 dep_table = table_info->vdd_dep_on_sclk;
4707 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
4708 for (i = 0; i < dep_table->count; i++) {
4709 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4710 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
4711 return;
4712 }
4713 }
4714
4715 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4716 data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4717 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
4718 }
4719}
4720
4721static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
4722 enum PP_OD_DPM_TABLE_COMMAND type)
4723{
4724 struct vega10_hwmgr *data = hwmgr->backend;
4725 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4726 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
4727 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
4728
4729 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
4730 &data->odn_dpm_table.vdd_dep_on_socclk;
4731 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
4732
4733 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
4734 uint8_t i, j;
4735
4736 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4737 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4738 for (i = 0; i < podn_vdd_dep->count - 1; i++)
4739 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4740 if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
4741 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4742 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4743 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4744 for (i = 0; i < dpm_table->count; i++) {
4745 for (j = 0; j < od_vddc_lookup_table->count; j++) {
4746 if (od_vddc_lookup_table->entries[j].us_vdd >
4747 podn_vdd_dep->entries[i].vddc)
4748 break;
4749 }
4750 if (j == od_vddc_lookup_table->count) {
4751 od_vddc_lookup_table->entries[j-1].us_vdd =
4752 podn_vdd_dep->entries[i].vddc;
4753 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4754 }
4755 podn_vdd_dep->entries[i].vddInd = j;
4756 }
4757 dpm_table = &data->dpm_table.soc_table;
4758 for (i = 0; i < dep_table->count; i++) {
4759 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
4760 dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
4761 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4762 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4763 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
4764 }
4765 }
4766 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
4767 podn_vdd_dep->entries[dep_table->count-1].clk) {
4768 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4769 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4770 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
4771 }
4772 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
4773 podn_vdd_dep->entries[dep_table->count-1].vddInd) {
4774 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4775 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
4776 }
4777 }
4778}
4779
4780static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4781 enum PP_OD_DPM_TABLE_COMMAND type,
4782 long *input, uint32_t size)
4783{
4784 struct vega10_hwmgr *data = hwmgr->backend;
4785 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
4786 struct vega10_single_dpm_table *dpm_table;
4787
4788 uint32_t input_clk;
4789 uint32_t input_vol;
4790 uint32_t input_level;
4791 uint32_t i;
4792
4793 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4794 return -EINVAL);
4795
4796 if (!hwmgr->od_enabled) {
4797 pr_info("OverDrive feature not enabled\n");
4798 return -EINVAL;
4799 }
4800
4801 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4802 dpm_table = &data->dpm_table.gfx_table;
4803 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
4804 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4805 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4806 dpm_table = &data->dpm_table.mem_table;
4807 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
4808 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4809 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4810 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
4811 vega10_odn_initial_default_setting(hwmgr);
4812 return 0;
4813 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4814 vega10_check_dpm_table_updated(hwmgr);
4815 return 0;
4816 } else {
4817 return -EINVAL;
4818 }
4819
4820 for (i = 0; i < size; i += 3) {
4821 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
4822 pr_info("invalid clock voltage input\n");
4823 return 0;
4824 }
4825 input_level = input[i];
4826 input_clk = input[i+1] * 100;
4827 input_vol = input[i+2];
4828
4829 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4830 dpm_table->dpm_levels[input_level].value = input_clk;
4831 podn_vdd_dep_table->entries[input_level].clk = input_clk;
4832 podn_vdd_dep_table->entries[input_level].vddc = input_vol;
4833 } else {
4834 return -EINVAL;
4835 }
4836 }
4837 vega10_odn_update_soc_table(hwmgr, type);
4838 return 0;
4839}
4840
4889static const struct pp_hwmgr_func vega10_hwmgr_funcs = { 4841static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4890 .backend_init = vega10_hwmgr_backend_init, 4842 .backend_init = vega10_hwmgr_backend_init,
4891 .backend_fini = vega10_hwmgr_backend_fini, 4843 .backend_fini = vega10_hwmgr_backend_fini,
@@ -4944,6 +4896,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4944 .get_power_profile_mode = vega10_get_power_profile_mode, 4896 .get_power_profile_mode = vega10_get_power_profile_mode,
4945 .set_power_profile_mode = vega10_set_power_profile_mode, 4897 .set_power_profile_mode = vega10_set_power_profile_mode,
4946 .set_power_limit = vega10_set_power_limit, 4898 .set_power_limit = vega10_set_power_limit,
4899 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
4947}; 4900};
4948 4901
4949int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, 4902int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 5339ea1f3dce..aadd6cbc7e85 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -282,15 +282,21 @@ struct vega10_registry_data {
282 282
283struct vega10_odn_clock_voltage_dependency_table { 283struct vega10_odn_clock_voltage_dependency_table {
284 uint32_t count; 284 uint32_t count;
285 struct phm_ppt_v1_clock_voltage_dependency_record 285 struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER];
286 entries[MAX_REGULAR_DPM_NUMBER]; 286};
287
288struct vega10_odn_vddc_lookup_table {
289 uint32_t count;
290 struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER];
287}; 291};
288 292
289struct vega10_odn_dpm_table { 293struct vega10_odn_dpm_table {
290 struct phm_odn_clock_levels odn_core_clock_dpm_levels; 294 struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_sclk;
291 struct phm_odn_clock_levels odn_memory_clock_dpm_levels; 295 struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_mclk;
292 struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; 296 struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_socclk;
293 struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; 297 struct vega10_odn_vddc_lookup_table vddc_lookup_table;
298 uint32_t max_vddc;
299 uint32_t min_vddc;
294}; 300};
295 301
296struct vega10_odn_fan_table { 302struct vega10_odn_fan_table {
@@ -301,8 +307,8 @@ struct vega10_odn_fan_table {
301}; 307};
302 308
303struct vega10_hwmgr { 309struct vega10_hwmgr {
304 struct vega10_dpm_table dpm_table; 310 struct vega10_dpm_table dpm_table;
305 struct vega10_dpm_table golden_dpm_table; 311 struct vega10_dpm_table golden_dpm_table;
306 struct vega10_registry_data registry_data; 312 struct vega10_registry_data registry_data;
307 struct vega10_vbios_boot_state vbios_boot_state; 313 struct vega10_vbios_boot_state vbios_boot_state;
308 struct vega10_mclk_latency_table mclk_latency_table; 314 struct vega10_mclk_latency_table mclk_latency_table;
@@ -368,12 +374,8 @@ struct vega10_hwmgr {
368 bool need_long_memory_training; 374 bool need_long_memory_training;
369 375
370 /* Internal settings to apply the application power optimization parameters */ 376 /* Internal settings to apply the application power optimization parameters */
371 bool apply_optimized_settings;
372 uint32_t disable_dpm_mask; 377 uint32_t disable_dpm_mask;
373 378
374 /* ---- Overdrive next setting ---- */
375 uint32_t apply_overdrive_next_settings_mask;
376
377 /* ---- SMU9 ---- */ 379 /* ---- SMU9 ---- */
378 struct smu_features smu_features[GNLD_FEATURES_MAX]; 380 struct smu_features smu_features[GNLD_FEATURES_MAX];
379 struct vega10_smc_state_table smc_state_table; 381 struct vega10_smc_state_table smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index ba63faefc61f..a9efd8554fbc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -27,7 +27,7 @@
27#include "vega10_ppsmc.h" 27#include "vega10_ppsmc.h"
28#include "vega10_inc.h" 28#include "vega10_inc.h"
29#include "pp_debug.h" 29#include "pp_debug.h"
30#include "pp_soc15.h" 30#include "soc15_common.h"
31 31
32static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = 32static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
33{ 33{
@@ -888,36 +888,36 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
888 if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) { 888 if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
889 if (PP_CAP(PHM_PlatformCaps_SQRamping)) { 889 if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
890 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); 890 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
891 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); 891 data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
892 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); 892 data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
893 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); 893 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
894 } 894 }
895 895
896 if (PP_CAP(PHM_PlatformCaps_DBRamping)) { 896 if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
897 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); 897 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
898 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); 898 data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
899 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); 899 data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
900 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); 900 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
901 } 901 }
902 902
903 if (PP_CAP(PHM_PlatformCaps_TDRamping)) { 903 if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
904 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); 904 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
905 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); 905 data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
906 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); 906 data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); 907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
908 } 908 }
909 909
910 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { 910 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
911 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); 911 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
912 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); 912 data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
913 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); 913 data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
914 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); 914 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
915 } 915 }
916 916
917 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { 917 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
918 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); 918 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
919 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); 919 data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
920 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); 920 data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
921 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); 921 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
922 } 922 }
923 } 923 }
@@ -930,20 +930,18 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
930 930
931static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) 931static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
932{ 932{
933 struct amdgpu_device *adev = hwmgr->adev;
933 int result; 934 int result;
934 uint32_t num_se = 0, count, data; 935 uint32_t num_se = 0, count, data;
935 struct amdgpu_device *adev = hwmgr->adev;
936 uint32_t reg;
937 936
938 num_se = adev->gfx.config.max_shader_engines; 937 num_se = adev->gfx.config.max_shader_engines;
939 938
940 cgs_enter_safe_mode(hwmgr->device, true); 939 adev->gfx.rlc.funcs->enter_safe_mode(adev);
941 940
942 cgs_lock_grbm_idx(hwmgr->device, true); 941 mutex_lock(&adev->grbm_idx_mutex);
943 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
944 for (count = 0; count < num_se; count++) { 942 for (count = 0; count < num_se; count++) {
945 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 943 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
946 cgs_write_register(hwmgr->device, reg, data); 944 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
947 945
948 result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); 946 result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
949 result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); 947 result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -958,43 +956,43 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
958 if (0 != result) 956 if (0 != result)
959 break; 957 break;
960 } 958 }
961 cgs_write_register(hwmgr->device, reg, 0xE0000000); 959 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
962 cgs_lock_grbm_idx(hwmgr->device, false); 960 mutex_unlock(&adev->grbm_idx_mutex);
963 961
964 vega10_didt_set_mask(hwmgr, true); 962 vega10_didt_set_mask(hwmgr, true);
965 963
966 cgs_enter_safe_mode(hwmgr->device, false); 964 adev->gfx.rlc.funcs->exit_safe_mode(adev);
967 965
968 return 0; 966 return 0;
969} 967}
970 968
971static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) 969static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
972{ 970{
973 cgs_enter_safe_mode(hwmgr->device, true); 971 struct amdgpu_device *adev = hwmgr->adev;
972
973 adev->gfx.rlc.funcs->enter_safe_mode(adev);
974 974
975 vega10_didt_set_mask(hwmgr, false); 975 vega10_didt_set_mask(hwmgr, false);
976 976
977 cgs_enter_safe_mode(hwmgr->device, false); 977 adev->gfx.rlc.funcs->exit_safe_mode(adev);
978 978
979 return 0; 979 return 0;
980} 980}
981 981
982static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) 982static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
983{ 983{
984 struct amdgpu_device *adev = hwmgr->adev;
984 int result; 985 int result;
985 uint32_t num_se = 0, count, data; 986 uint32_t num_se = 0, count, data;
986 struct amdgpu_device *adev = hwmgr->adev;
987 uint32_t reg;
988 987
989 num_se = adev->gfx.config.max_shader_engines; 988 num_se = adev->gfx.config.max_shader_engines;
990 989
991 cgs_enter_safe_mode(hwmgr->device, true); 990 adev->gfx.rlc.funcs->enter_safe_mode(adev);
992 991
993 cgs_lock_grbm_idx(hwmgr->device, true); 992 mutex_lock(&adev->grbm_idx_mutex);
994 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
995 for (count = 0; count < num_se; count++) { 993 for (count = 0; count < num_se; count++) {
996 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 994 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
997 cgs_write_register(hwmgr->device, reg, data); 995 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
998 996
999 result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); 997 result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
1000 result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); 998 result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -1003,12 +1001,12 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1003 if (0 != result) 1001 if (0 != result)
1004 break; 1002 break;
1005 } 1003 }
1006 cgs_write_register(hwmgr->device, reg, 0xE0000000); 1004 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
1007 cgs_lock_grbm_idx(hwmgr->device, false); 1005 mutex_unlock(&adev->grbm_idx_mutex);
1008 1006
1009 vega10_didt_set_mask(hwmgr, true); 1007 vega10_didt_set_mask(hwmgr, true);
1010 1008
1011 cgs_enter_safe_mode(hwmgr->device, false); 1009 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1012 1010
1013 vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); 1011 vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
1014 if (PP_CAP(PHM_PlatformCaps_GCEDC)) 1012 if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1022,13 +1020,14 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1022 1020
1023static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) 1021static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1024{ 1022{
1023 struct amdgpu_device *adev = hwmgr->adev;
1025 uint32_t data; 1024 uint32_t data;
1026 1025
1027 cgs_enter_safe_mode(hwmgr->device, true); 1026 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1028 1027
1029 vega10_didt_set_mask(hwmgr, false); 1028 vega10_didt_set_mask(hwmgr, false);
1030 1029
1031 cgs_enter_safe_mode(hwmgr->device, false); 1030 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1032 1031
1033 if (PP_CAP(PHM_PlatformCaps_GCEDC)) { 1032 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1034 data = 0x00000000; 1033 data = 0x00000000;
@@ -1043,20 +1042,18 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1043 1042
1044static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) 1043static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
1045{ 1044{
1045 struct amdgpu_device *adev = hwmgr->adev;
1046 int result; 1046 int result;
1047 uint32_t num_se = 0, count, data; 1047 uint32_t num_se = 0, count, data;
1048 struct amdgpu_device *adev = hwmgr->adev;
1049 uint32_t reg;
1050 1048
1051 num_se = adev->gfx.config.max_shader_engines; 1049 num_se = adev->gfx.config.max_shader_engines;
1052 1050
1053 cgs_enter_safe_mode(hwmgr->device, true); 1051 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1054 1052
1055 cgs_lock_grbm_idx(hwmgr->device, true); 1053 mutex_lock(&adev->grbm_idx_mutex);
1056 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1057 for (count = 0; count < num_se; count++) { 1054 for (count = 0; count < num_se; count++) {
1058 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1055 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1059 cgs_write_register(hwmgr->device, reg, data); 1056 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1060 result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1057 result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
1061 result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1058 result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
1062 result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1059 result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1067,46 +1064,46 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
1067 if (0 != result) 1064 if (0 != result)
1068 break; 1065 break;
1069 } 1066 }
1070 cgs_write_register(hwmgr->device, reg, 0xE0000000); 1067 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
1071 cgs_lock_grbm_idx(hwmgr->device, false); 1068 mutex_unlock(&adev->grbm_idx_mutex);
1072 1069
1073 vega10_didt_set_mask(hwmgr, true); 1070 vega10_didt_set_mask(hwmgr, true);
1074 1071
1075 cgs_enter_safe_mode(hwmgr->device, false); 1072 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1076 1073
1077 return 0; 1074 return 0;
1078} 1075}
1079 1076
1080static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) 1077static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
1081{ 1078{
1082 cgs_enter_safe_mode(hwmgr->device, true); 1079 struct amdgpu_device *adev = hwmgr->adev;
1080
1081 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1083 1082
1084 vega10_didt_set_mask(hwmgr, false); 1083 vega10_didt_set_mask(hwmgr, false);
1085 1084
1086 cgs_enter_safe_mode(hwmgr->device, false); 1085 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1087 1086
1088 return 0; 1087 return 0;
1089} 1088}
1090 1089
1091static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1090static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1092{ 1091{
1092 struct amdgpu_device *adev = hwmgr->adev;
1093 int result; 1093 int result;
1094 uint32_t num_se = 0; 1094 uint32_t num_se = 0;
1095 uint32_t count, data; 1095 uint32_t count, data;
1096 struct amdgpu_device *adev = hwmgr->adev;
1097 uint32_t reg;
1098 1096
1099 num_se = adev->gfx.config.max_shader_engines; 1097 num_se = adev->gfx.config.max_shader_engines;
1100 1098
1101 cgs_enter_safe_mode(hwmgr->device, true); 1099 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1102 1100
1103 vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); 1101 vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
1104 1102
1105 cgs_lock_grbm_idx(hwmgr->device, true); 1103 mutex_lock(&adev->grbm_idx_mutex);
1106 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1107 for (count = 0; count < num_se; count++) { 1104 for (count = 0; count < num_se; count++) {
1108 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1105 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1109 cgs_write_register(hwmgr->device, reg, data); 1106 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1110 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1107 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
1111 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1108 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
1112 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1109 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1115,12 +1112,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1115 if (0 != result) 1112 if (0 != result)
1116 break; 1113 break;
1117 } 1114 }
1118 cgs_write_register(hwmgr->device, reg, 0xE0000000); 1115 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
1119 cgs_lock_grbm_idx(hwmgr->device, false); 1116 mutex_unlock(&adev->grbm_idx_mutex);
1120 1117
1121 vega10_didt_set_mask(hwmgr, true); 1118 vega10_didt_set_mask(hwmgr, true);
1122 1119
1123 cgs_enter_safe_mode(hwmgr->device, false); 1120 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1124 1121
1125 vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); 1122 vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
1126 1123
@@ -1137,13 +1134,14 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1137 1134
1138static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1135static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1139{ 1136{
1137 struct amdgpu_device *adev = hwmgr->adev;
1140 uint32_t data; 1138 uint32_t data;
1141 1139
1142 cgs_enter_safe_mode(hwmgr->device, true); 1140 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1143 1141
1144 vega10_didt_set_mask(hwmgr, false); 1142 vega10_didt_set_mask(hwmgr, false);
1145 1143
1146 cgs_enter_safe_mode(hwmgr->device, false); 1144 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1147 1145
1148 if (PP_CAP(PHM_PlatformCaps_GCEDC)) { 1146 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1149 data = 0x00000000; 1147 data = 0x00000000;
@@ -1158,15 +1156,14 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1158 1156
1159static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) 1157static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1160{ 1158{
1161 uint32_t reg; 1159 struct amdgpu_device *adev = hwmgr->adev;
1162 int result; 1160 int result;
1163 1161
1164 cgs_enter_safe_mode(hwmgr->device, true); 1162 adev->gfx.rlc.funcs->enter_safe_mode(adev);
1165 1163
1166 cgs_lock_grbm_idx(hwmgr->device, true); 1164 mutex_lock(&adev->grbm_idx_mutex);
1167 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); 1165 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
1168 cgs_write_register(hwmgr->device, reg, 0xE0000000); 1166 mutex_unlock(&adev->grbm_idx_mutex);
1169 cgs_lock_grbm_idx(hwmgr->device, false);
1170 1167
1171 result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1168 result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
1172 result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); 1169 result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1175,7 +1172,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1175 1172
1176 vega10_didt_set_mask(hwmgr, false); 1173 vega10_didt_set_mask(hwmgr, false);
1177 1174
1178 cgs_enter_safe_mode(hwmgr->device, false); 1175 adev->gfx.rlc.funcs->exit_safe_mode(adev);
1179 1176
1180 return 0; 1177 return 0;
1181} 1178}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index c61d0744860d..0768d259c07c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -52,7 +52,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
52 52
53 if (!table_address) { 53 if (!table_address) {
54 table_address = (ATOM_Vega10_POWERPLAYTABLE *) 54 table_address = (ATOM_Vega10_POWERPLAYTABLE *)
55 cgs_atom_get_data_table(hwmgr->device, index, 55 smu_atom_get_data_table(hwmgr->adev, index,
56 &size, &frev, &crev); 56 &size, &frev, &crev);
57 57
58 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ 58 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index 9f18226a56ea..aa044c1955fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -25,7 +25,7 @@
25#include "vega10_hwmgr.h" 25#include "vega10_hwmgr.h"
26#include "vega10_ppsmc.h" 26#include "vega10_ppsmc.h"
27#include "vega10_inc.h" 27#include "vega10_inc.h"
28#include "pp_soc15.h" 28#include "soc15_common.h"
29#include "pp_debug.h" 29#include "pp_debug.h"
30 30
31static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) 31static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -89,6 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
89 89
90int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) 90int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
91{ 91{
92 struct amdgpu_device *adev = hwmgr->adev;
92 struct vega10_hwmgr *data = hwmgr->backend; 93 struct vega10_hwmgr *data = hwmgr->backend;
93 uint32_t tach_period; 94 uint32_t tach_period;
94 uint32_t crystal_clock_freq; 95 uint32_t crystal_clock_freq;
@@ -100,10 +101,8 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
100 if (data->smu_features[GNLD_FAN_CONTROL].supported) { 101 if (data->smu_features[GNLD_FAN_CONTROL].supported) {
101 result = vega10_get_current_rpm(hwmgr, speed); 102 result = vega10_get_current_rpm(hwmgr, speed);
102 } else { 103 } else {
103 uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
104 mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
105 tach_period = 104 tach_period =
106 CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), 105 REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
107 CG_TACH_STATUS, 106 CG_TACH_STATUS,
108 TACH_PERIOD); 107 TACH_PERIOD);
109 108
@@ -127,26 +126,23 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
127*/ 126*/
128int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 127int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
129{ 128{
130 uint32_t reg; 129 struct amdgpu_device *adev = hwmgr->adev;
131
132 reg = soc15_get_register_offset(THM_HWID, 0,
133 mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
134 130
135 if (hwmgr->fan_ctrl_is_in_default_mode) { 131 if (hwmgr->fan_ctrl_is_in_default_mode) {
136 hwmgr->fan_ctrl_default_mode = 132 hwmgr->fan_ctrl_default_mode =
137 CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), 133 REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
138 CG_FDO_CTRL2, FDO_PWM_MODE); 134 CG_FDO_CTRL2, FDO_PWM_MODE);
139 hwmgr->tmin = 135 hwmgr->tmin =
140 CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), 136 REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
141 CG_FDO_CTRL2, TMIN); 137 CG_FDO_CTRL2, TMIN);
142 hwmgr->fan_ctrl_is_in_default_mode = false; 138 hwmgr->fan_ctrl_is_in_default_mode = false;
143 } 139 }
144 140
145 cgs_write_register(hwmgr->device, reg, 141 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
146 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), 142 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
147 CG_FDO_CTRL2, TMIN, 0)); 143 CG_FDO_CTRL2, TMIN, 0));
148 cgs_write_register(hwmgr->device, reg, 144 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
149 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), 145 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
150 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 146 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
151 147
152 return 0; 148 return 0;
@@ -159,18 +155,15 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
159*/ 155*/
160int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) 156int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
161{ 157{
162 uint32_t reg; 158 struct amdgpu_device *adev = hwmgr->adev;
163
164 reg = soc15_get_register_offset(THM_HWID, 0,
165 mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
166 159
167 if (!hwmgr->fan_ctrl_is_in_default_mode) { 160 if (!hwmgr->fan_ctrl_is_in_default_mode) {
168 cgs_write_register(hwmgr->device, reg, 161 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
169 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), 162 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
170 CG_FDO_CTRL2, FDO_PWM_MODE, 163 CG_FDO_CTRL2, FDO_PWM_MODE,
171 hwmgr->fan_ctrl_default_mode)); 164 hwmgr->fan_ctrl_default_mode));
172 cgs_write_register(hwmgr->device, reg, 165 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
173 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), 166 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
174 CG_FDO_CTRL2, TMIN, 167 CG_FDO_CTRL2, TMIN,
175 hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); 168 hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
176 hwmgr->fan_ctrl_is_in_default_mode = true; 169 hwmgr->fan_ctrl_is_in_default_mode = true;
@@ -257,10 +250,10 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
257int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, 250int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
258 uint32_t speed) 251 uint32_t speed)
259{ 252{
253 struct amdgpu_device *adev = hwmgr->adev;
260 uint32_t duty100; 254 uint32_t duty100;
261 uint32_t duty; 255 uint32_t duty;
262 uint64_t tmp64; 256 uint64_t tmp64;
263 uint32_t reg;
264 257
265 if (hwmgr->thermal_controller.fanInfo.bNoFan) 258 if (hwmgr->thermal_controller.fanInfo.bNoFan)
266 return 0; 259 return 0;
@@ -271,10 +264,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
271 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 264 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
272 vega10_fan_ctrl_stop_smc_fan_control(hwmgr); 265 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
273 266
274 reg = soc15_get_register_offset(THM_HWID, 0, 267 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
275 mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
276
277 duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
278 CG_FDO_CTRL1, FMAX_DUTY100); 268 CG_FDO_CTRL1, FMAX_DUTY100);
279 269
280 if (duty100 == 0) 270 if (duty100 == 0)
@@ -284,10 +274,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
284 do_div(tmp64, 100); 274 do_div(tmp64, 100);
285 duty = (uint32_t)tmp64; 275 duty = (uint32_t)tmp64;
286 276
287 reg = soc15_get_register_offset(THM_HWID, 0, 277 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
288 mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); 278 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
289 cgs_write_register(hwmgr->device, reg,
290 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
291 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 279 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
292 280
293 return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); 281 return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
@@ -317,10 +305,10 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
317*/ 305*/
318int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) 306int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
319{ 307{
308 struct amdgpu_device *adev = hwmgr->adev;
320 uint32_t tach_period; 309 uint32_t tach_period;
321 uint32_t crystal_clock_freq; 310 uint32_t crystal_clock_freq;
322 int result = 0; 311 int result = 0;
323 uint32_t reg;
324 312
325 if (hwmgr->thermal_controller.fanInfo.bNoFan || 313 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
326 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 314 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
@@ -333,10 +321,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
333 if (!result) { 321 if (!result) {
334 crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 322 crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
335 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 323 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
336 reg = soc15_get_register_offset(THM_HWID, 0, 324 WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
337 mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); 325 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
338 cgs_write_register(hwmgr->device, reg,
339 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
340 CG_TACH_STATUS, TACH_PERIOD, 326 CG_TACH_STATUS, TACH_PERIOD,
341 tach_period)); 327 tach_period));
342 } 328 }
@@ -350,13 +336,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
350*/ 336*/
351int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) 337int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
352{ 338{
339 struct amdgpu_device *adev = hwmgr->adev;
353 int temp; 340 int temp;
354 uint32_t reg;
355 341
356 reg = soc15_get_register_offset(THM_HWID, 0, 342 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
357 mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
358
359 temp = cgs_read_register(hwmgr->device, reg);
360 343
361 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> 344 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
362 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; 345 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -379,11 +362,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
379static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 362static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
380 struct PP_TemperatureRange *range) 363 struct PP_TemperatureRange *range)
381{ 364{
365 struct amdgpu_device *adev = hwmgr->adev;
382 int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * 366 int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
383 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 367 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
384 int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * 368 int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
385 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 369 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
386 uint32_t val, reg; 370 uint32_t val;
387 371
388 if (low < range->min) 372 if (low < range->min)
389 low = range->min; 373 low = range->min;
@@ -393,20 +377,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
393 if (low > high) 377 if (low > high)
394 return -EINVAL; 378 return -EINVAL;
395 379
396 reg = soc15_get_register_offset(THM_HWID, 0, 380 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
397 mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
398
399 val = cgs_read_register(hwmgr->device, reg);
400 381
401 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 382 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
402 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 383 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
403 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 384 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
404 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 385 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
405 val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & 386 val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
406 (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & 387 (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
407 (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); 388 (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
408 389
409 cgs_write_register(hwmgr->device, reg, val); 390 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
410 391
411 return 0; 392 return 0;
412} 393}
@@ -418,21 +399,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
418*/ 399*/
419static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) 400static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
420{ 401{
421 uint32_t reg; 402 struct amdgpu_device *adev = hwmgr->adev;
422 403
423 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { 404 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
424 reg = soc15_get_register_offset(THM_HWID, 0, 405 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
425 mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); 406 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
426 cgs_write_register(hwmgr->device, reg,
427 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
428 CG_TACH_CTRL, EDGE_PER_REV, 407 CG_TACH_CTRL, EDGE_PER_REV,
429 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1)); 408 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
430 } 409 }
431 410
432 reg = soc15_get_register_offset(THM_HWID, 0, 411 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
433 mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); 412 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
434 cgs_write_register(hwmgr->device, reg,
435 CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
436 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28)); 413 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
437 414
438 return 0; 415 return 0;
@@ -445,9 +422,9 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
445*/ 422*/
446static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) 423static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
447{ 424{
425 struct amdgpu_device *adev = hwmgr->adev;
448 struct vega10_hwmgr *data = hwmgr->backend; 426 struct vega10_hwmgr *data = hwmgr->backend;
449 uint32_t val = 0; 427 uint32_t val = 0;
450 uint32_t reg;
451 428
452 if (data->smu_features[GNLD_FW_CTF].supported) { 429 if (data->smu_features[GNLD_FW_CTF].supported) {
453 if (data->smu_features[GNLD_FW_CTF].enabled) 430 if (data->smu_features[GNLD_FW_CTF].enabled)
@@ -465,8 +442,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
465 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 442 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
466 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 443 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
467 444
468 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); 445 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
469 cgs_write_register(hwmgr->device, reg, val);
470 446
471 return 0; 447 return 0;
472} 448}
@@ -477,8 +453,8 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
477*/ 453*/
478int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) 454int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
479{ 455{
456 struct amdgpu_device *adev = hwmgr->adev;
480 struct vega10_hwmgr *data = hwmgr->backend; 457 struct vega10_hwmgr *data = hwmgr->backend;
481 uint32_t reg;
482 458
483 if (data->smu_features[GNLD_FW_CTF].supported) { 459 if (data->smu_features[GNLD_FW_CTF].supported) {
484 if (!data->smu_features[GNLD_FW_CTF].enabled) 460 if (!data->smu_features[GNLD_FW_CTF].enabled)
@@ -493,8 +469,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
493 data->smu_features[GNLD_FW_CTF].enabled = false; 469 data->smu_features[GNLD_FW_CTF].enabled = false;
494 } 470 }
495 471
496 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); 472 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
497 cgs_write_register(hwmgr->device, reg, 0);
498 473
499 return 0; 474 return 0;
500} 475}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 200de46bd06b..782e2098824d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -34,7 +34,6 @@
34#include "atomfirmware.h" 34#include "atomfirmware.h"
35#include "cgs_common.h" 35#include "cgs_common.h"
36#include "vega12_inc.h" 36#include "vega12_inc.h"
37#include "pp_soc15.h"
38#include "pppcielanes.h" 37#include "pppcielanes.h"
39#include "vega12_hwmgr.h" 38#include "vega12_hwmgr.h"
40#include "vega12_processpptables.h" 39#include "vega12_processpptables.h"
@@ -546,6 +545,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
546 return -EINVAL); 545 return -EINVAL);
547 546
548 dpm_table->dpm_levels[i].value = clock; 547 dpm_table->dpm_levels[i].value = clock;
548 dpm_table->dpm_levels[i].enabled = true;
549 } 549 }
550 550
551 vega12_init_dpm_state(&(dpm_table->dpm_state)); 551 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -565,6 +565,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
565 return -EINVAL); 565 return -EINVAL);
566 566
567 dpm_table->dpm_levels[i].value = clock; 567 dpm_table->dpm_levels[i].value = clock;
568 dpm_table->dpm_levels[i].enabled = true;
568 } 569 }
569 570
570 vega12_init_dpm_state(&(dpm_table->dpm_state)); 571 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -585,6 +586,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
585 return -EINVAL); 586 return -EINVAL);
586 587
587 dpm_table->dpm_levels[i].value = clock; 588 dpm_table->dpm_levels[i].value = clock;
589 dpm_table->dpm_levels[i].enabled = true;
588 } 590 }
589 591
590 vega12_init_dpm_state(&(dpm_table->dpm_state)); 592 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -605,6 +607,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
605 return -EINVAL); 607 return -EINVAL);
606 608
607 dpm_table->dpm_levels[i].value = clock; 609 dpm_table->dpm_levels[i].value = clock;
610 dpm_table->dpm_levels[i].enabled = true;
608 } 611 }
609 612
610 vega12_init_dpm_state(&(dpm_table->dpm_state)); 613 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -625,6 +628,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
625 return -EINVAL); 628 return -EINVAL);
626 629
627 dpm_table->dpm_levels[i].value = clock; 630 dpm_table->dpm_levels[i].value = clock;
631 dpm_table->dpm_levels[i].enabled = true;
628 } 632 }
629 633
630 vega12_init_dpm_state(&(dpm_table->dpm_state)); 634 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -645,6 +649,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
645 return -EINVAL); 649 return -EINVAL);
646 650
647 dpm_table->dpm_levels[i].value = clock; 651 dpm_table->dpm_levels[i].value = clock;
652 dpm_table->dpm_levels[i].enabled = true;
648 } 653 }
649 654
650 vega12_init_dpm_state(&(dpm_table->dpm_state)); 655 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -666,6 +671,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
666 return -EINVAL); 671 return -EINVAL);
667 672
668 dpm_table->dpm_levels[i].value = clock; 673 dpm_table->dpm_levels[i].value = clock;
674 dpm_table->dpm_levels[i].enabled = true;
669 } 675 }
670 676
671 vega12_init_dpm_state(&(dpm_table->dpm_state)); 677 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -686,6 +692,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
686 return -EINVAL); 692 return -EINVAL);
687 693
688 dpm_table->dpm_levels[i].value = clock; 694 dpm_table->dpm_levels[i].value = clock;
695 dpm_table->dpm_levels[i].enabled = true;
689 } 696 }
690 697
691 vega12_init_dpm_state(&(dpm_table->dpm_state)); 698 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -706,6 +713,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
706 return -EINVAL); 713 return -EINVAL);
707 714
708 dpm_table->dpm_levels[i].value = clock; 715 dpm_table->dpm_levels[i].value = clock;
716 dpm_table->dpm_levels[i].enabled = true;
709 } 717 }
710 718
711 vega12_init_dpm_state(&(dpm_table->dpm_state)); 719 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -726,6 +734,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
726 return -EINVAL); 734 return -EINVAL);
727 735
728 dpm_table->dpm_levels[i].value = clock; 736 dpm_table->dpm_levels[i].value = clock;
737 dpm_table->dpm_levels[i].enabled = true;
729 } 738 }
730 739
731 vega12_init_dpm_state(&(dpm_table->dpm_state)); 740 vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -992,15 +1001,55 @@ static uint32_t vega12_find_highest_dpm_level(
992 1001
993static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) 1002static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
994{ 1003{
1004 struct vega12_hwmgr *data = hwmgr->backend;
1005 if (data->smc_state_table.gfx_boot_level !=
1006 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
1007 smum_send_msg_to_smc_with_parameter(hwmgr,
1008 PPSMC_MSG_SetSoftMinByFreq,
1009 PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
1010 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1011 data->smc_state_table.gfx_boot_level;
1012 }
1013
1014 if (data->smc_state_table.mem_boot_level !=
1015 data->dpm_table.mem_table.dpm_state.soft_min_level) {
1016 smum_send_msg_to_smc_with_parameter(hwmgr,
1017 PPSMC_MSG_SetSoftMinByFreq,
1018 PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
1019 data->dpm_table.mem_table.dpm_state.soft_min_level =
1020 data->smc_state_table.mem_boot_level;
1021 }
1022
995 return 0; 1023 return 0;
1024
996} 1025}
997 1026
998static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) 1027static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
999{ 1028{
1029 struct vega12_hwmgr *data = hwmgr->backend;
1030 if (data->smc_state_table.gfx_max_level !=
1031 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
1032 smum_send_msg_to_smc_with_parameter(hwmgr,
1033 PPSMC_MSG_SetSoftMaxByFreq,
1034 /* plus the vale by 1 to align the resolution */
1035 PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
1036 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1037 data->smc_state_table.gfx_max_level;
1038 }
1039
1040 if (data->smc_state_table.mem_max_level !=
1041 data->dpm_table.mem_table.dpm_state.soft_max_level) {
1042 smum_send_msg_to_smc_with_parameter(hwmgr,
1043 PPSMC_MSG_SetSoftMaxByFreq,
1044 /* plus the vale by 1 to align the resolution */
1045 PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
1046 data->dpm_table.mem_table.dpm_state.soft_max_level =
1047 data->smc_state_table.mem_max_level;
1048 }
1049
1000 return 0; 1050 return 0;
1001} 1051}
1002 1052
1003
1004int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 1053int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1005{ 1054{
1006 struct vega12_hwmgr *data = 1055 struct vega12_hwmgr *data =
@@ -1064,8 +1113,7 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1064 return (mem_clk * 100); 1113 return (mem_clk * 100);
1065} 1114}
1066 1115
1067static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, 1116static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1068 struct pp_gpu_power *query)
1069{ 1117{
1070#if 0 1118#if 0
1071 uint32_t value; 1119 uint32_t value;
@@ -1077,7 +1125,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
1077 1125
1078 vega12_read_arg_from_smc(hwmgr, &value); 1126 vega12_read_arg_from_smc(hwmgr, &value);
1079 /* power value is an integer */ 1127 /* power value is an integer */
1080 query->average_gpu_power = value << 8; 1128 *query = value << 8;
1081#endif 1129#endif
1082 return 0; 1130 return 0;
1083} 1131}
@@ -1186,12 +1234,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1186 *size = 4; 1234 *size = 4;
1187 break; 1235 break;
1188 case AMDGPU_PP_SENSOR_GPU_POWER: 1236 case AMDGPU_PP_SENSOR_GPU_POWER:
1189 if (*size < sizeof(struct pp_gpu_power)) 1237 ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
1190 ret = -EINVAL; 1238
1191 else {
1192 *size = sizeof(struct pp_gpu_power);
1193 ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
1194 }
1195 break; 1239 break;
1196 default: 1240 default:
1197 ret = -EINVAL; 1241 ret = -EINVAL;
@@ -1260,23 +1304,18 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
1260{ 1304{
1261 struct vega12_hwmgr *data = 1305 struct vega12_hwmgr *data =
1262 (struct vega12_hwmgr *)(hwmgr->backend); 1306 (struct vega12_hwmgr *)(hwmgr->backend);
1263 uint32_t num_active_disps = 0;
1264 struct cgs_display_info info = {0};
1265 struct PP_Clocks min_clocks = {0}; 1307 struct PP_Clocks min_clocks = {0};
1266 struct pp_display_clock_request clock_req; 1308 struct pp_display_clock_request clock_req;
1267 uint32_t clk_request; 1309 uint32_t clk_request;
1268 1310
1269 info.mode_info = NULL; 1311 if (hwmgr->display_config->num_display > 1)
1270 cgs_get_active_displays_info(hwmgr->device, &info);
1271 num_active_disps = info.display_count;
1272 if (num_active_disps > 1)
1273 vega12_notify_smc_display_change(hwmgr, false); 1312 vega12_notify_smc_display_change(hwmgr, false);
1274 else 1313 else
1275 vega12_notify_smc_display_change(hwmgr, true); 1314 vega12_notify_smc_display_change(hwmgr, true);
1276 1315
1277 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; 1316 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1278 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; 1317 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1279 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 1318 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1280 1319
1281 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 1320 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1282 clock_req.clock_type = amd_pp_dcef_clock; 1321 clock_req.clock_type = amd_pp_dcef_clock;
@@ -1832,9 +1871,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
1832{ 1871{
1833 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1872 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1834 int result = 0; 1873 int result = 0;
1835 uint32_t num_turned_on_displays = 1;
1836 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 1874 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
1837 struct cgs_display_info info = {0};
1838 1875
1839 if ((data->water_marks_bitmap & WaterMarksExist) && 1876 if ((data->water_marks_bitmap & WaterMarksExist) &&
1840 !(data->water_marks_bitmap & WaterMarksLoaded)) { 1877 !(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -1846,12 +1883,9 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
1846 1883
1847 if ((data->water_marks_bitmap & WaterMarksExist) && 1884 if ((data->water_marks_bitmap & WaterMarksExist) &&
1848 data->smu_features[GNLD_DPM_DCEFCLK].supported && 1885 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1849 data->smu_features[GNLD_DPM_SOCCLK].supported) { 1886 data->smu_features[GNLD_DPM_SOCCLK].supported)
1850 cgs_get_active_displays_info(hwmgr->device, &info);
1851 num_turned_on_displays = info.display_count;
1852 smum_send_msg_to_smc_with_parameter(hwmgr, 1887 smum_send_msg_to_smc_with_parameter(hwmgr,
1853 PPSMC_MSG_NumOfDisplays, num_turned_on_displays); 1888 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
1854 }
1855 1889
1856 return result; 1890 return result;
1857} 1891}
@@ -1894,15 +1928,12 @@ vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
1894{ 1928{
1895 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1929 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1896 bool is_update_required = false; 1930 bool is_update_required = false;
1897 struct cgs_display_info info = {0, 0, NULL};
1898
1899 cgs_get_active_displays_info(hwmgr->device, &info);
1900 1931
1901 if (data->display_timing.num_existing_displays != info.display_count) 1932 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
1902 is_update_required = true; 1933 is_update_required = true;
1903 1934
1904 if (data->registry_data.gfx_clk_deep_sleep_support) { 1935 if (data->registry_data.gfx_clk_deep_sleep_support) {
1905 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) 1936 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
1906 is_update_required = true; 1937 is_update_required = true;
1907 } 1938 }
1908 1939
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index bc98b1df3b65..e81ded1ec198 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -33,7 +33,7 @@
33#define WaterMarksExist 1 33#define WaterMarksExist 1
34#define WaterMarksLoaded 2 34#define WaterMarksLoaded 2
35 35
36#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8 36#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 16
37#define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8 37#define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
38#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 38#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
39#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4 39#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index b34113f45904..888ddca902d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -51,7 +51,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
51 51
52 if (!table_address) { 52 if (!table_address) {
53 table_address = (ATOM_Vega12_POWERPLAYTABLE *) 53 table_address = (ATOM_Vega12_POWERPLAYTABLE *)
54 cgs_atom_get_data_table(hwmgr->device, index, 54 smu_atom_get_data_table(hwmgr->adev, index,
55 &size, &frev, &crev); 55 &size, &frev, &crev);
56 56
57 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ 57 hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
@@ -224,6 +224,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
224 ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent; 224 ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
225 ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq; 225 ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
226 226
227 /* 0xFFFF will disable the ACG feature */
228 if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
229 ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
230 ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
231 }
227 232
228 return 0; 233 return 0;
229} 234}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index df0fa815cd6e..cfd9e6ccb790 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -26,7 +26,7 @@
26#include "vega12_smumgr.h" 26#include "vega12_smumgr.h"
27#include "vega12_ppsmc.h" 27#include "vega12_ppsmc.h"
28#include "vega12_inc.h" 28#include "vega12_inc.h"
29#include "pp_soc15.h" 29#include "soc15_common.h"
30#include "pp_debug.h" 30#include "pp_debug.h"
31 31
32static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) 32static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -147,13 +147,10 @@ int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
147*/ 147*/
148int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) 148int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
149{ 149{
150 struct amdgpu_device *adev = hwmgr->adev;
150 int temp = 0; 151 int temp = 0;
151 uint32_t reg;
152 152
153 reg = soc15_get_register_offset(THM_HWID, 0, 153 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
154 mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
155
156 temp = cgs_read_register(hwmgr->device, reg);
157 154
158 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> 155 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
159 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; 156 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -175,11 +172,12 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
175static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 172static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
176 struct PP_TemperatureRange *range) 173 struct PP_TemperatureRange *range)
177{ 174{
175 struct amdgpu_device *adev = hwmgr->adev;
178 int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * 176 int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
179 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 177 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
180 int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * 178 int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
181 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 179 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
182 uint32_t val, reg; 180 uint32_t val;
183 181
184 if (low < range->min) 182 if (low < range->min)
185 low = range->min; 183 low = range->min;
@@ -189,18 +187,15 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
189 if (low > high) 187 if (low > high)
190 return -EINVAL; 188 return -EINVAL;
191 189
192 reg = soc15_get_register_offset(THM_HWID, 0, 190 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
193 mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
194
195 val = cgs_read_register(hwmgr->device, reg);
196 191
197 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 192 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
198 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 193 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
199 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 194 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
200 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 195 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
201 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 196 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
202 197
203 cgs_write_register(hwmgr->device, reg, val); 198 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
204 199
205 return 0; 200 return 0;
206} 201}
@@ -212,15 +207,14 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
212*/ 207*/
213static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr) 208static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
214{ 209{
210 struct amdgpu_device *adev = hwmgr->adev;
215 uint32_t val = 0; 211 uint32_t val = 0;
216 uint32_t reg;
217 212
218 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 213 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
219 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 214 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
220 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 215 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
221 216
222 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); 217 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
223 cgs_write_register(hwmgr->device, reg, val);
224 218
225 return 0; 219 return 0;
226} 220}
@@ -231,10 +225,9 @@ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
231*/ 225*/
232int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr) 226int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
233{ 227{
234 uint32_t reg; 228 struct amdgpu_device *adev = hwmgr->adev;
235 229
236 reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); 230 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
237 cgs_write_register(hwmgr->device, reg, 0);
238 231
239 return 0; 232 return 0;
240} 233}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 8b78bbecd1bc..9bb87857a20f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -377,11 +377,7 @@ struct phm_clocks {
377#define DPMTABLE_UPDATE_SCLK 0x00000004 377#define DPMTABLE_UPDATE_SCLK 0x00000004
378#define DPMTABLE_UPDATE_MCLK 0x00000008 378#define DPMTABLE_UPDATE_MCLK 0x00000008
379#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 379#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
380 380#define DPMTABLE_UPDATE_SOCCLK 0x00000020
381/* To determine if sclk and mclk are in overdrive state */
382#define SCLK_OVERDRIVE_ENABLED 0x00000001
383#define MCLK_OVERDRIVE_ENABLED 0x00000002
384#define VDDC_OVERDRIVE_ENABLED 0x00000010
385 381
386struct phm_odn_performance_level { 382struct phm_odn_performance_level {
387 uint32_t clock; 383 uint32_t clock;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 17f811d181c8..3c321c7d9626 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,6 +38,8 @@ struct phm_fan_speed_info;
38struct pp_atomctrl_voltage_table; 38struct pp_atomctrl_voltage_table;
39 39
40#define VOLTAGE_SCALE 4 40#define VOLTAGE_SCALE 4
41#define VOLTAGE_VID_OFFSET_SCALE1 625
42#define VOLTAGE_VID_OFFSET_SCALE2 100
41 43
42enum DISPLAY_GAP { 44enum DISPLAY_GAP {
43 DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ 45 DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
@@ -64,24 +66,6 @@ struct vi_dpm_table {
64#define PCIE_PERF_REQ_GEN2 3 66#define PCIE_PERF_REQ_GEN2 3
65#define PCIE_PERF_REQ_GEN3 4 67#define PCIE_PERF_REQ_GEN3 4
66 68
67enum PP_FEATURE_MASK {
68 PP_SCLK_DPM_MASK = 0x1,
69 PP_MCLK_DPM_MASK = 0x2,
70 PP_PCIE_DPM_MASK = 0x4,
71 PP_SCLK_DEEP_SLEEP_MASK = 0x8,
72 PP_POWER_CONTAINMENT_MASK = 0x10,
73 PP_UVD_HANDSHAKE_MASK = 0x20,
74 PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
75 PP_VBI_TIME_SUPPORT_MASK = 0x80,
76 PP_ULV_MASK = 0x100,
77 PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
78 PP_CLOCK_STRETCH_MASK = 0x400,
79 PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
80 PP_SOCCLK_DPM_MASK = 0x1000,
81 PP_DCEFCLK_DPM_MASK = 0x2000,
82 PP_OVERDRIVE_MASK = 0x4000,
83};
84
85enum PHM_BackEnd_Magic { 69enum PHM_BackEnd_Magic {
86 PHM_Dummy_Magic = 0xAA5555AA, 70 PHM_Dummy_Magic = 0xAA5555AA,
87 PHM_RV770_Magic = 0xDCBAABCD, 71 PHM_RV770_Magic = 0xDCBAABCD,
@@ -312,6 +296,7 @@ struct pp_hwmgr_func {
312 int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, 296 int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
313 struct pp_display_clock_request *clock); 297 struct pp_display_clock_request *clock);
314 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 298 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
299 int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable);
315 int (*power_off_asic)(struct pp_hwmgr *hwmgr); 300 int (*power_off_asic)(struct pp_hwmgr *hwmgr);
316 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); 301 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
317 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); 302 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
@@ -341,6 +326,7 @@ struct pp_hwmgr_func {
341 long *input, uint32_t size); 326 long *input, uint32_t size);
342 int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); 327 int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
343 int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr); 328 int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
329 int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
344}; 330};
345 331
346struct pp_table_func { 332struct pp_table_func {
@@ -718,6 +704,7 @@ struct pp_hwmgr {
718 uint32_t chip_family; 704 uint32_t chip_family;
719 uint32_t chip_id; 705 uint32_t chip_id;
720 uint32_t smu_version; 706 uint32_t smu_version;
707 bool not_vf;
721 bool pm_en; 708 bool pm_en;
722 struct mutex smu_lock; 709 struct mutex smu_lock;
723 710
@@ -764,7 +751,7 @@ struct pp_hwmgr {
764 struct pp_power_state *request_ps; 751 struct pp_power_state *request_ps;
765 struct pp_power_state *boot_ps; 752 struct pp_power_state *boot_ps;
766 struct pp_power_state *uvd_ps; 753 struct pp_power_state *uvd_ps;
767 struct amd_pp_display_configuration display_config; 754 const struct amd_pp_display_configuration *display_config;
768 uint32_t feature_mask; 755 uint32_t feature_mask;
769 bool avfs_supported; 756 bool avfs_supported;
770 /* UMD Pstate */ 757 /* UMD Pstate */
@@ -782,10 +769,13 @@ struct pp_hwmgr {
782}; 769};
783 770
784int hwmgr_early_init(struct pp_hwmgr *hwmgr); 771int hwmgr_early_init(struct pp_hwmgr *hwmgr);
772int hwmgr_sw_init(struct pp_hwmgr *hwmgr);
773int hwmgr_sw_fini(struct pp_hwmgr *hwmgr);
785int hwmgr_hw_init(struct pp_hwmgr *hwmgr); 774int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
786int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); 775int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
787int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr); 776int hwmgr_suspend(struct pp_hwmgr *hwmgr);
788int hwmgr_hw_resume(struct pp_hwmgr *hwmgr); 777int hwmgr_resume(struct pp_hwmgr *hwmgr);
778
789int hwmgr_handle_task(struct pp_hwmgr *hwmgr, 779int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
790 enum amd_pp_task task_id, 780 enum amd_pp_task task_id,
791 enum amd_pm_state_type *user_state); 781 enum amd_pm_state_type *user_state);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 426bff2aad2b..a2991fa2e6f8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,13 +75,15 @@
75#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C 75#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
76#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D 76#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
77#define PPSMC_MSG_SoftReset 0x2E 77#define PPSMC_MSG_SoftReset 0x2E
78#define PPSMC_MSG_SetGfxCGPG 0x2F
78#define PPSMC_MSG_SetSoftMaxGfxClk 0x30 79#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
79#define PPSMC_MSG_SetHardMinGfxClk 0x31 80#define PPSMC_MSG_SetHardMinGfxClk 0x31
80#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 81#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
81#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 82#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
82#define PPSMC_MSG_SetSoftMaxVcn 0x34 83#define PPSMC_MSG_SetSoftMaxVcn 0x34
83#define PPSMC_MSG_PowerGateMmHub 0x35 84#define PPSMC_MSG_PowerGateMmHub 0x35
84#define PPSMC_Message_Count 0x36 85#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36
86#define PPSMC_Message_Count 0x37
85 87
86 88
87typedef uint16_t PPSMC_Result; 89typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
new file mode 100644
index 000000000000..771523001533
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
@@ -0,0 +1,760 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef SMU75_H
24#define SMU75_H
25
26#pragma pack(push, 1)
27
28typedef struct {
29 uint32_t high;
30 uint32_t low;
31} data_64_t;
32
33typedef struct {
34 data_64_t high;
35 data_64_t low;
36} data_128_t;
37
38#define SMU__DGPU_ONLY
39
40#define SMU__NUM_SCLK_DPM_STATE 8
41#define SMU__NUM_MCLK_DPM_LEVELS 4
42#define SMU__NUM_LCLK_DPM_LEVELS 8
43#define SMU__NUM_PCIE_DPM_LEVELS 8
44
45#define SMU7_CONTEXT_ID_SMC 1
46#define SMU7_CONTEXT_ID_VBIOS 2
47
48#define SMU75_MAX_LEVELS_VDDC 16
49#define SMU75_MAX_LEVELS_VDDGFX 16
50#define SMU75_MAX_LEVELS_VDDCI 8
51#define SMU75_MAX_LEVELS_MVDD 4
52
53#define SMU_MAX_SMIO_LEVELS 4
54
55#define SMU75_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE
56#define SMU75_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS
57#define SMU75_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
58#define SMU75_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS
59#define SMU75_MAX_LEVELS_UVD 8
60#define SMU75_MAX_LEVELS_VCE 8
61#define SMU75_MAX_LEVELS_ACP 8
62#define SMU75_MAX_LEVELS_SAMU 8
63#define SMU75_MAX_ENTRIES_SMIO 32
64
65#define DPM_NO_LIMIT 0
66#define DPM_NO_UP 1
67#define DPM_GO_DOWN 2
68#define DPM_GO_UP 3
69
70#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
71#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
72
73#define GPIO_CLAMP_MODE_VRHOT 1
74#define GPIO_CLAMP_MODE_THERM 2
75#define GPIO_CLAMP_MODE_DC 4
76
77#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
78#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
79#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
80#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
81#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
82#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
83#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
84#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
85#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
86#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
87#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
88#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
89#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
90#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
91#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
92#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
93#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
94#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
95#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
96#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
97
98/* Virtualization Defines */
99#define CG_XDMA_MASK 0x1
100#define CG_XDMA_SHIFT 0
101#define CG_UVD_MASK 0x2
102#define CG_UVD_SHIFT 1
103#define CG_VCE_MASK 0x4
104#define CG_VCE_SHIFT 2
105#define CG_SAMU_MASK 0x8
106#define CG_SAMU_SHIFT 3
107#define CG_GFX_MASK 0x10
108#define CG_GFX_SHIFT 4
109#define CG_SDMA_MASK 0x20
110#define CG_SDMA_SHIFT 5
111#define CG_HDP_MASK 0x40
112#define CG_HDP_SHIFT 6
113#define CG_MC_MASK 0x80
114#define CG_MC_SHIFT 7
115#define CG_DRM_MASK 0x100
116#define CG_DRM_SHIFT 8
117#define CG_ROM_MASK 0x200
118#define CG_ROM_SHIFT 9
119#define CG_BIF_MASK 0x400
120#define CG_BIF_SHIFT 10
121
122#if defined SMU__DGPU_ONLY
123#define SMU75_DTE_ITERATIONS 5
124#define SMU75_DTE_SOURCES 3
125#define SMU75_DTE_SINKS 1
126#define SMU75_NUM_CPU_TES 0
127#define SMU75_NUM_GPU_TES 1
128#define SMU75_NUM_NON_TES 2
129#define SMU75_DTE_FAN_SCALAR_MIN 0x100
130#define SMU75_DTE_FAN_SCALAR_MAX 0x166
131#define SMU75_DTE_FAN_TEMP_MAX 93
132#define SMU75_DTE_FAN_TEMP_MIN 83
133#endif
134#define SMU75_THERMAL_INPUT_LOOP_COUNT 2
135#define SMU75_THERMAL_CLAMP_MODE_COUNT 2
136
137#define EXP_M1_1 93
138#define EXP_M2_1 195759
139#define EXP_B_1 111176531
140
141#define EXP_M1_2 67
142#define EXP_M2_2 153720
143#define EXP_B_2 94415767
144
145#define EXP_M1_3 48
146#define EXP_M2_3 119796
147#define EXP_B_3 79195279
148
149#define EXP_M1_4 550
150#define EXP_M2_4 1484190
151#define EXP_B_4 1051432828
152
153#define EXP_M1_5 394
154#define EXP_M2_5 1143049
155#define EXP_B_5 864288432
156
157struct SMU7_HystController_Data {
158 uint16_t waterfall_up;
159 uint16_t waterfall_down;
160 uint16_t waterfall_limit;
161 uint16_t release_cnt;
162 uint16_t release_limit;
163 uint16_t spare;
164};
165
166typedef struct SMU7_HystController_Data SMU7_HystController_Data;
167
168struct SMU75_PIDController {
169 uint32_t Ki;
170 int32_t LFWindupUpperLim;
171 int32_t LFWindupLowerLim;
172 uint32_t StatePrecision;
173 uint32_t LfPrecision;
174 uint32_t LfOffset;
175 uint32_t MaxState;
176 uint32_t MaxLfFraction;
177 uint32_t StateShift;
178};
179
180typedef struct SMU75_PIDController SMU75_PIDController;
181
182struct SMU7_LocalDpmScoreboard {
183 uint32_t PercentageBusy;
184
185 int32_t PIDError;
186 int32_t PIDIntegral;
187 int32_t PIDOutput;
188
189 uint32_t SigmaDeltaAccum;
190 uint32_t SigmaDeltaOutput;
191 uint32_t SigmaDeltaLevel;
192
193 uint32_t UtilizationSetpoint;
194
195 uint8_t TdpClampMode;
196 uint8_t TdcClampMode;
197 uint8_t ThermClampMode;
198 uint8_t VoltageBusy;
199
200 int8_t CurrLevel;
201 int8_t TargLevel;
202 uint8_t LevelChangeInProgress;
203 uint8_t UpHyst;
204
205 uint8_t DownHyst;
206 uint8_t VoltageDownHyst;
207 uint8_t DpmEnable;
208 uint8_t DpmRunning;
209
210 uint8_t DpmForce;
211 uint8_t DpmForceLevel;
212 uint8_t DisplayWatermark;
213 uint8_t McArbIndex;
214
215 uint32_t MinimumPerfSclk;
216
217 uint8_t AcpiReq;
218 uint8_t AcpiAck;
219 uint8_t GfxClkSlow;
220 uint8_t GpioClampMode;
221
222 uint8_t EnableModeSwitchRLCNotification;
223 uint8_t EnabledLevelsChange;
224 uint8_t DteClampMode;
225 uint8_t FpsClampMode;
226
227 uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS];
228 uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS];
229
230 void (*TargetStateCalculator)(uint8_t);
231 void (*SavedTargetStateCalculator)(uint8_t);
232
233 uint16_t AutoDpmInterval;
234 uint16_t AutoDpmRange;
235
236 uint8_t FpsEnabled;
237 uint8_t MaxPerfLevel;
238 uint8_t AllowLowClkInterruptToHost;
239 uint8_t FpsRunning;
240
241 uint32_t MaxAllowedFrequency;
242
243 uint32_t FilteredSclkFrequency;
244 uint32_t LastSclkFrequency;
245 uint32_t FilteredSclkFrequencyCnt;
246
247 uint8_t MinPerfLevel;
248#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
249 uint8_t ScksClampMode;
250 uint8_t padding[2];
251#else
252 uint8_t padding[3];
253#endif
254
255 uint16_t FpsAlpha;
256 uint16_t DeltaTime;
257 uint32_t CurrentFps;
258 uint32_t FilteredFps;
259 uint32_t FrameCount;
260 uint32_t FrameCountLast;
261 uint16_t FpsTargetScalar;
262 uint16_t FpsWaterfallLimitScalar;
263 uint16_t FpsAlphaScalar;
264 uint16_t spare8;
265 SMU7_HystController_Data HystControllerData;
266};
267
268typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
269
270#define SMU7_MAX_VOLTAGE_CLIENTS 12
271
272typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
273
274#define VDDC_MASK 0x00007FFF
275#define VDDC_SHIFT 0
276#define VDDCI_MASK 0x3FFF8000
277#define VDDCI_SHIFT 15
278#define PHASES_MASK 0xC0000000
279#define PHASES_SHIFT 30
280
281typedef uint32_t SMU_VoltageLevel;
282
283struct SMU7_VoltageScoreboard {
284 SMU_VoltageLevel TargetVoltage;
285 uint16_t MaxVid;
286 uint8_t HighestVidOffset;
287 uint8_t CurrentVidOffset;
288
289 uint16_t CurrentVddc;
290 uint16_t CurrentVddci;
291
292 uint8_t ControllerBusy;
293 uint8_t CurrentVid;
294 uint8_t CurrentVddciVid;
295 uint8_t padding;
296
297 SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
298 SMU_VoltageLevel TargetVoltageState;
299 uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
300
301 uint8_t padding2;
302 uint8_t padding3;
303 uint8_t ControllerEnable;
304 uint8_t ControllerRunning;
305 uint16_t CurrentStdVoltageHiSidd;
306 uint16_t CurrentStdVoltageLoSidd;
307 uint8_t OverrideVoltage;
308 uint8_t padding4;
309 uint8_t padding5;
310 uint8_t CurrentPhases;
311
312 VoltageChangeHandler_t ChangeVddc;
313 VoltageChangeHandler_t ChangeVddci;
314 VoltageChangeHandler_t ChangePhase;
315 VoltageChangeHandler_t ChangeMvdd;
316
317 VoltageChangeHandler_t functionLinks[6];
318
319 uint16_t * VddcFollower1;
320 int16_t Driver_OD_RequestedVidOffset1;
321 int16_t Driver_OD_RequestedVidOffset2;
322};
323
324typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
325
326#define SMU7_MAX_PCIE_LINK_SPEEDS 3
327
328struct SMU7_PCIeLinkSpeedScoreboard {
329 uint8_t DpmEnable;
330 uint8_t DpmRunning;
331 uint8_t DpmForce;
332 uint8_t DpmForceLevel;
333
334 uint8_t CurrentLinkSpeed;
335 uint8_t EnabledLevelsChange;
336 uint16_t AutoDpmInterval;
337
338 uint16_t AutoDpmRange;
339 uint16_t AutoDpmCount;
340
341 uint8_t DpmMode;
342 uint8_t AcpiReq;
343 uint8_t AcpiAck;
344 uint8_t CurrentLinkLevel;
345};
346
347typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
348
349#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
350#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
351
352#define SMU7_SCALE_I 7
353#define SMU7_SCALE_R 12
354
355struct SMU7_PowerScoreboard {
356 uint32_t GpuPower;
357
358 uint32_t VddcPower;
359 uint32_t VddcVoltage;
360 uint32_t VddcCurrent;
361
362 uint32_t VddciPower;
363 uint32_t VddciVoltage;
364 uint32_t VddciCurrent;
365
366 uint32_t RocPower;
367
368 uint16_t Telemetry_1_slope;
369 uint16_t Telemetry_2_slope;
370 int32_t Telemetry_1_offset;
371 int32_t Telemetry_2_offset;
372
373 uint8_t MCLK_patch_flag;
374 uint8_t reserved[3];
375};
376
377typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
378
379#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
380#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
381#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
382#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
383#define SMU7_UVD_DPM_CONFIG_MASK 0x10
384#define SMU7_VCE_DPM_CONFIG_MASK 0x20
385#define SMU7_ACP_DPM_CONFIG_MASK 0x40
386#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
387#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
388
389#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
390#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
391#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
392#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
393#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
394#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
395
396struct SMU75_SoftRegisters {
397 uint32_t RefClockFrequency;
398 uint32_t PmTimerPeriod;
399 uint32_t FeatureEnables;
400#if defined (SMU__DGPU_ONLY)
401 uint32_t PreVBlankGap;
402 uint32_t VBlankTimeout;
403 uint32_t TrainTimeGap;
404 uint32_t MvddSwitchTime;
405 uint32_t LongestAcpiTrainTime;
406 uint32_t AcpiDelay;
407 uint32_t G5TrainTime;
408 uint32_t DelayMpllPwron;
409 uint32_t VoltageChangeTimeout;
410#endif
411 uint32_t HandshakeDisables;
412
413 uint8_t DisplayPhy1Config;
414 uint8_t DisplayPhy2Config;
415 uint8_t DisplayPhy3Config;
416 uint8_t DisplayPhy4Config;
417
418 uint8_t DisplayPhy5Config;
419 uint8_t DisplayPhy6Config;
420 uint8_t DisplayPhy7Config;
421 uint8_t DisplayPhy8Config;
422
423 uint32_t AverageGraphicsActivity;
424 uint32_t AverageMemoryActivity;
425 uint32_t AverageGioActivity;
426
427 uint8_t SClkDpmEnabledLevels;
428 uint8_t MClkDpmEnabledLevels;
429 uint8_t LClkDpmEnabledLevels;
430 uint8_t PCIeDpmEnabledLevels;
431
432 uint8_t UVDDpmEnabledLevels;
433 uint8_t SAMUDpmEnabledLevels;
434 uint8_t ACPDpmEnabledLevels;
435 uint8_t VCEDpmEnabledLevels;
436
437 uint32_t DRAM_LOG_ADDR_H;
438 uint32_t DRAM_LOG_ADDR_L;
439 uint32_t DRAM_LOG_PHY_ADDR_H;
440 uint32_t DRAM_LOG_PHY_ADDR_L;
441 uint32_t DRAM_LOG_BUFF_SIZE;
442 uint32_t UlvEnterCount;
443 uint32_t UlvTime;
444 uint32_t UcodeLoadStatus;
445 uint32_t AllowMvddSwitch;
446 uint8_t Activity_Weight;
447 uint8_t Reserved8[3];
448};
449
450typedef struct SMU75_SoftRegisters SMU75_SoftRegisters;
451
452struct SMU75_Firmware_Header {
453 uint32_t Digest[5];
454 uint32_t Version;
455 uint32_t HeaderSize;
456 uint32_t Flags;
457 uint32_t EntryPoint;
458 uint32_t CodeSize;
459 uint32_t ImageSize;
460
461 uint32_t Rtos;
462 uint32_t SoftRegisters;
463 uint32_t DpmTable;
464 uint32_t FanTable;
465 uint32_t CacConfigTable;
466 uint32_t CacStatusTable;
467 uint32_t mcRegisterTable;
468 uint32_t mcArbDramTimingTable;
469 uint32_t PmFuseTable;
470 uint32_t Globals;
471 uint32_t ClockStretcherTable;
472 uint32_t VftTable;
473 uint32_t Reserved1;
474 uint32_t AvfsCksOff_AvfsGbvTable;
475 uint32_t AvfsCksOff_BtcGbvTable;
476 uint32_t MM_AvfsTable;
477 uint32_t PowerSharingTable;
478 uint32_t AvfsTable;
479 uint32_t AvfsCksOffGbvTable;
480 uint32_t AvfsMeanNSigma;
481 uint32_t AvfsSclkOffsetTable;
482 uint32_t Reserved[12];
483 uint32_t Signature;
484};
485
486typedef struct SMU75_Firmware_Header SMU75_Firmware_Header;
487
488#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
489
490enum DisplayConfig {
491 PowerDown = 1,
492 DP54x4,
493 DP54x2,
494 DP54x1,
495 DP27x4,
496 DP27x2,
497 DP27x1,
498 HDMI297,
499 HDMI162,
500 LVDS,
501 DP324x4,
502 DP324x2,
503 DP324x1
504};
505
506#define MC_BLOCK_COUNT 1
507#define CPL_BLOCK_COUNT 5
508#define SE_BLOCK_COUNT 15
509#define GC_BLOCK_COUNT 24
510
511struct SMU7_Local_Cac {
512 uint8_t BlockId;
513 uint8_t SignalId;
514 uint8_t Threshold;
515 uint8_t Padding;
516};
517
518typedef struct SMU7_Local_Cac SMU7_Local_Cac;
519
520struct SMU7_Local_Cac_Table {
521 SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
522 SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
523 SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
524 SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
525};
526
527typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
528
529#pragma pack(pop)
530
531#define CG_SYS_BITMASK_FIRST_BIT 0
532#define CG_SYS_BITMASK_LAST_BIT 10
533#define CG_SYS_BIF_MGLS_SHIFT 0
534#define CG_SYS_ROM_SHIFT 1
535#define CG_SYS_MC_MGCG_SHIFT 2
536#define CG_SYS_MC_MGLS_SHIFT 3
537#define CG_SYS_SDMA_MGCG_SHIFT 4
538#define CG_SYS_SDMA_MGLS_SHIFT 5
539#define CG_SYS_DRM_MGCG_SHIFT 6
540#define CG_SYS_HDP_MGCG_SHIFT 7
541#define CG_SYS_HDP_MGLS_SHIFT 8
542#define CG_SYS_DRM_MGLS_SHIFT 9
543#define CG_SYS_BIF_MGCG_SHIFT 10
544
545#define CG_SYS_BIF_MGLS_MASK 0x1
546#define CG_SYS_ROM_MASK 0x2
547#define CG_SYS_MC_MGCG_MASK 0x4
548#define CG_SYS_MC_MGLS_MASK 0x8
549#define CG_SYS_SDMA_MGCG_MASK 0x10
550#define CG_SYS_SDMA_MGLS_MASK 0x20
551#define CG_SYS_DRM_MGCG_MASK 0x40
552#define CG_SYS_HDP_MGCG_MASK 0x80
553#define CG_SYS_HDP_MGLS_MASK 0x100
554#define CG_SYS_DRM_MGLS_MASK 0x200
555#define CG_SYS_BIF_MGCG_MASK 0x400
556
557#define CG_GFX_BITMASK_FIRST_BIT 16
558#define CG_GFX_BITMASK_LAST_BIT 24
559
560#define CG_GFX_CGCG_SHIFT 16
561#define CG_GFX_CGLS_SHIFT 17
562#define CG_CPF_MGCG_SHIFT 18
563#define CG_RLC_MGCG_SHIFT 19
564#define CG_GFX_OTHERS_MGCG_SHIFT 20
565#define CG_GFX_3DCG_SHIFT 21
566#define CG_GFX_3DLS_SHIFT 22
567#define CG_GFX_RLC_LS_SHIFT 23
568#define CG_GFX_CP_LS_SHIFT 24
569
570#define CG_GFX_CGCG_MASK 0x00010000
571#define CG_GFX_CGLS_MASK 0x00020000
572#define CG_CPF_MGCG_MASK 0x00040000
573#define CG_RLC_MGCG_MASK 0x00080000
574#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
575#define CG_GFX_3DCG_MASK 0x00200000
576#define CG_GFX_3DLS_MASK 0x00400000
577#define CG_GFX_RLC_LS_MASK 0x00800000
578#define CG_GFX_CP_LS_MASK 0x01000000
579
580
581#define VRCONF_VDDC_MASK 0x000000FF
582#define VRCONF_VDDC_SHIFT 0
583#define VRCONF_VDDGFX_MASK 0x0000FF00
584#define VRCONF_VDDGFX_SHIFT 8
585#define VRCONF_VDDCI_MASK 0x00FF0000
586#define VRCONF_VDDCI_SHIFT 16
587#define VRCONF_MVDD_MASK 0xFF000000
588#define VRCONF_MVDD_SHIFT 24
589
590#define VR_MERGED_WITH_VDDC 0
591#define VR_SVI2_PLANE_1 1
592#define VR_SVI2_PLANE_2 2
593#define VR_SMIO_PATTERN_1 3
594#define VR_SMIO_PATTERN_2 4
595#define VR_STATIC_VOLTAGE 5
596
597#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
598#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
599
600#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
601#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
602#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
603#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
604#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
605#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
606
607struct SMU_ClockStretcherDataTableEntry {
608 uint8_t minVID;
609 uint8_t maxVID;
610
611 uint16_t setting;
612};
613typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
614
615struct SMU_ClockStretcherDataTable {
616 SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
617};
618typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
619
620struct SMU_CKS_LOOKUPTableEntry {
621 uint16_t minFreq;
622 uint16_t maxFreq;
623
624 uint8_t setting;
625 uint8_t padding[3];
626};
627typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
628
629struct SMU_CKS_LOOKUPTable {
630 SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
631};
632typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
633
634struct AgmAvfsData_t {
635 uint16_t avgPsmCount[28];
636 uint16_t minPsmCount[28];
637};
638typedef struct AgmAvfsData_t AgmAvfsData_t;
639
640enum VFT_COLUMNS {
641 SCLK0,
642 SCLK1,
643 SCLK2,
644 SCLK3,
645 SCLK4,
646 SCLK5,
647 SCLK6,
648 SCLK7,
649
650 NUM_VFT_COLUMNS
651};
652enum {
653 SCS_FUSE_T0,
654 SCS_FUSE_T1,
655 NUM_SCS_FUSE_TEMPERATURE
656};
657enum {
658 SCKS_ON,
659 SCKS_OFF,
660 NUM_SCKS_STATE_TYPES
661};
662
663#define VFT_TABLE_DEFINED
664
665#define TEMP_RANGE_MAXSTEPS 12
666struct VFT_CELL_t {
667 uint16_t Voltage;
668};
669
670typedef struct VFT_CELL_t VFT_CELL_t;
671#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
672struct SCS_CELL_t {
673 uint16_t PsmCnt[NUM_SCKS_STATE_TYPES];
674};
675typedef struct SCS_CELL_t SCS_CELL_t;
676#endif
677
678struct VFT_TABLE_t {
679 VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
680 uint16_t AvfsGbv [NUM_VFT_COLUMNS];
681 uint16_t BtcGbv [NUM_VFT_COLUMNS];
682 int16_t Temperature [TEMP_RANGE_MAXSTEPS];
683
684#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
685 SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
686#endif
687
688 uint8_t NumTemperatureSteps;
689 uint8_t padding[3];
690};
691typedef struct VFT_TABLE_t VFT_TABLE_t;
692
693#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
694#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
695
696struct GB_VDROOP_TABLE_t {
697 int32_t a0;
698 int32_t a1;
699 int32_t a2;
700 uint32_t spare;
701};
702typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
703
704struct SMU_QuadraticCoeffs {
705 int32_t m1;
706 int32_t b;
707
708 int16_t m2;
709 uint8_t m1_shift;
710 uint8_t m2_shift;
711};
712typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
713
714struct AVFS_Margin_t {
715 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
716};
717typedef struct AVFS_Margin_t AVFS_Margin_t;
718
719struct AVFS_CksOff_Gbv_t {
720 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
721};
722typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
723
724struct AVFS_CksOff_AvfsGbv_t {
725 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
726};
727typedef struct AVFS_CksOff_AvfsGbv_t AVFS_CksOff_AvfsGbv_t;
728
729struct AVFS_CksOff_BtcGbv_t {
730 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
731};
732typedef struct AVFS_CksOff_BtcGbv_t AVFS_CksOff_BtcGbv_t;
733
734struct AVFS_meanNsigma_t {
735 uint32_t Aconstant[3];
736 uint16_t DC_tol_sigma;
737 uint16_t Platform_mean;
738 uint16_t Platform_sigma;
739 uint16_t PSM_Age_CompFactor;
740 uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
741};
742typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
743
744struct AVFS_Sclk_Offset_t {
745 uint16_t Sclk_Offset[8];
746};
747typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
748
749struct Power_Sharing_t {
750 uint32_t EnergyCounter;
751 uint32_t EngeryThreshold;
752 uint64_t AM_SCLK_CNT;
753 uint64_t AM_0_BUSY_CNT;
754};
755typedef struct Power_Sharing_t Power_Sharing_t;
756
757
758#endif
759
760
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
new file mode 100644
index 000000000000..b64e58a22ddf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
@@ -0,0 +1,886 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU75_DISCRETE_H
25#define SMU75_DISCRETE_H
26
27#include "smu75.h"
28
29#pragma pack(push, 1)
30
31#define NUM_SCLK_RANGE 8
32
33#define VCO_3_6 1
34#define VCO_2_4 3
35
36#define POSTDIV_DIV_BY_1 0
37#define POSTDIV_DIV_BY_2 1
38#define POSTDIV_DIV_BY_4 2
39#define POSTDIV_DIV_BY_8 3
40#define POSTDIV_DIV_BY_16 4
41
42struct sclkFcwRange_t {
43 uint8_t vco_setting; /* 1: 3-6GHz, 3: 2-4GHz */
44 uint8_t postdiv; /* divide by 2^n */
45 uint16_t fcw_pcc;
46 uint16_t fcw_trans_upper;
47 uint16_t fcw_trans_lower;
48};
49typedef struct sclkFcwRange_t sclkFcwRange_t;
50
51struct SMIO_Pattern {
52 uint16_t Voltage;
53 uint8_t Smio;
54 uint8_t padding;
55};
56
57typedef struct SMIO_Pattern SMIO_Pattern;
58
59struct SMIO_Table {
60 SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
61};
62
63typedef struct SMIO_Table SMIO_Table;
64
65struct SMU_SclkSetting {
66 uint32_t SclkFrequency;
67 uint16_t Fcw_int;
68 uint16_t Fcw_frac;
69 uint16_t Pcc_fcw_int;
70 uint8_t PllRange;
71 uint8_t SSc_En;
72 uint16_t Sclk_slew_rate;
73 uint16_t Pcc_up_slew_rate;
74 uint16_t Pcc_down_slew_rate;
75 uint16_t Fcw1_int;
76 uint16_t Fcw1_frac;
77 uint16_t Sclk_ss_slew_rate;
78};
79typedef struct SMU_SclkSetting SMU_SclkSetting;
80
81struct SMU75_Discrete_GraphicsLevel {
82 SMU_VoltageLevel MinVoltage;
83
84 uint8_t pcieDpmLevel;
85 uint8_t DeepSleepDivId;
86 uint16_t ActivityLevel;
87
88 uint32_t CgSpllFuncCntl3;
89 uint32_t CgSpllFuncCntl4;
90 uint32_t CcPwrDynRm;
91 uint32_t CcPwrDynRm1;
92
93 uint8_t SclkDid;
94 uint8_t padding;
95 uint8_t EnabledForActivity;
96 uint8_t EnabledForThrottle;
97 uint8_t UpHyst;
98 uint8_t DownHyst;
99 uint8_t VoltageDownHyst;
100 uint8_t PowerThrottle;
101
102 SMU_SclkSetting SclkSetting;
103
104 uint8_t ScksStretchThreshVid[NUM_SCKS_STATE_TYPES];
105 uint16_t Padding;
106};
107
108typedef struct SMU75_Discrete_GraphicsLevel SMU75_Discrete_GraphicsLevel;
109
110struct SMU75_Discrete_ACPILevel {
111 uint32_t Flags;
112 SMU_VoltageLevel MinVoltage;
113 uint32_t SclkFrequency;
114 uint8_t SclkDid;
115 uint8_t DisplayWatermark;
116 uint8_t DeepSleepDivId;
117 uint8_t padding;
118 uint32_t CcPwrDynRm;
119 uint32_t CcPwrDynRm1;
120
121 SMU_SclkSetting SclkSetting;
122};
123
124typedef struct SMU75_Discrete_ACPILevel SMU75_Discrete_ACPILevel;
125
126struct SMU75_Discrete_Ulv {
127 uint32_t CcPwrDynRm;
128 uint32_t CcPwrDynRm1;
129 uint16_t VddcOffset;
130 uint8_t VddcOffsetVid;
131 uint8_t VddcPhase;
132 uint16_t BifSclkDfs;
133 uint16_t Reserved;
134};
135
136typedef struct SMU75_Discrete_Ulv SMU75_Discrete_Ulv;
137
138struct SMU75_Discrete_MemoryLevel {
139 SMU_VoltageLevel MinVoltage;
140 uint32_t MinMvdd;
141
142 uint32_t MclkFrequency;
143
144 uint8_t StutterEnable;
145 uint8_t EnabledForThrottle;
146 uint8_t EnabledForActivity;
147 uint8_t padding_0;
148
149 uint8_t UpHyst;
150 uint8_t DownHyst;
151 uint8_t VoltageDownHyst;
152 uint8_t padding_1;
153
154 uint16_t ActivityLevel;
155 uint8_t DisplayWatermark;
156 uint8_t padding_2;
157
158 uint16_t Fcw_int;
159 uint16_t Fcw_frac;
160 uint8_t Postdiv;
161 uint8_t padding_3[3];
162};
163
164typedef struct SMU75_Discrete_MemoryLevel SMU75_Discrete_MemoryLevel;
165
166struct SMU75_Discrete_LinkLevel {
167 uint8_t PcieGenSpeed;
168 uint8_t PcieLaneCount;
169 uint8_t EnabledForActivity;
170 uint8_t SPC;
171 uint32_t DownThreshold;
172 uint32_t UpThreshold;
173 uint16_t BifSclkDfs;
174 uint16_t Reserved;
175};
176
177typedef struct SMU75_Discrete_LinkLevel SMU75_Discrete_LinkLevel;
178
179
180/* MC ARB DRAM Timing registers. */
181struct SMU75_Discrete_MCArbDramTimingTableEntry {
182 uint32_t McArbDramTiming;
183 uint32_t McArbDramTiming2;
184 uint32_t McArbBurstTime;
185 uint32_t McArbRfshRate;
186 uint32_t McArbMisc3;
187};
188
189typedef struct SMU75_Discrete_MCArbDramTimingTableEntry SMU75_Discrete_MCArbDramTimingTableEntry;
190
191struct SMU75_Discrete_MCArbDramTimingTable {
192 SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
193};
194
195typedef struct SMU75_Discrete_MCArbDramTimingTable SMU75_Discrete_MCArbDramTimingTable;
196
197/* UVD VCLK/DCLK state (level) definition. */
198struct SMU75_Discrete_UvdLevel {
199 uint32_t VclkFrequency;
200 uint32_t DclkFrequency;
201 SMU_VoltageLevel MinVoltage;
202 uint8_t VclkDivider;
203 uint8_t DclkDivider;
204 uint8_t padding[2];
205};
206
207typedef struct SMU75_Discrete_UvdLevel SMU75_Discrete_UvdLevel;
208
209/* Clocks for other external blocks (VCE, ACP, SAMU). */
210struct SMU75_Discrete_ExtClkLevel {
211 uint32_t Frequency;
212 SMU_VoltageLevel MinVoltage;
213 uint8_t Divider;
214 uint8_t padding[3];
215};
216
217typedef struct SMU75_Discrete_ExtClkLevel SMU75_Discrete_ExtClkLevel;
218
219struct SMU75_Discrete_StateInfo {
220 uint32_t SclkFrequency;
221 uint32_t MclkFrequency;
222 uint32_t VclkFrequency;
223 uint32_t DclkFrequency;
224 uint32_t SamclkFrequency;
225 uint32_t AclkFrequency;
226 uint32_t EclkFrequency;
227 uint16_t MvddVoltage;
228 uint16_t padding16;
229 uint8_t DisplayWatermark;
230 uint8_t McArbIndex;
231 uint8_t McRegIndex;
232 uint8_t SeqIndex;
233 uint8_t SclkDid;
234 int8_t SclkIndex;
235 int8_t MclkIndex;
236 uint8_t PCIeGen;
237};
238
239typedef struct SMU75_Discrete_StateInfo SMU75_Discrete_StateInfo;
240
241struct SMU75_Discrete_DpmTable {
242 SMU75_PIDController GraphicsPIDController;
243 SMU75_PIDController MemoryPIDController;
244 SMU75_PIDController LinkPIDController;
245
246 uint32_t SystemFlags;
247
248 uint32_t VRConfig;
249 uint32_t SmioMask1;
250 uint32_t SmioMask2;
251 SMIO_Table SmioTable1;
252 SMIO_Table SmioTable2;
253
254 uint32_t MvddLevelCount;
255
256 uint8_t BapmVddcVidHiSidd [SMU75_MAX_LEVELS_VDDC];
257 uint8_t BapmVddcVidLoSidd [SMU75_MAX_LEVELS_VDDC];
258 uint8_t BapmVddcVidHiSidd2 [SMU75_MAX_LEVELS_VDDC];
259
260 uint8_t GraphicsDpmLevelCount;
261 uint8_t MemoryDpmLevelCount;
262 uint8_t LinkLevelCount;
263 uint8_t MasterDeepSleepControl;
264
265 uint8_t UvdLevelCount;
266 uint8_t VceLevelCount;
267 uint8_t AcpLevelCount;
268 uint8_t SamuLevelCount;
269
270 uint8_t ThermOutGpio;
271 uint8_t ThermOutPolarity;
272 uint8_t ThermOutMode;
273 uint8_t BootPhases;
274
275 uint8_t VRHotLevel;
276 uint8_t LdoRefSel;
277
278 uint8_t Reserved1[2];
279
280 uint16_t FanStartTemperature;
281 uint16_t FanStopTemperature;
282
283 uint16_t MaxVoltage;
284 uint16_t Reserved2;
285 uint32_t Reserved;
286
287 SMU75_Discrete_GraphicsLevel GraphicsLevel [SMU75_MAX_LEVELS_GRAPHICS];
288 SMU75_Discrete_MemoryLevel MemoryACPILevel;
289 SMU75_Discrete_MemoryLevel MemoryLevel [SMU75_MAX_LEVELS_MEMORY];
290 SMU75_Discrete_LinkLevel LinkLevel [SMU75_MAX_LEVELS_LINK];
291 SMU75_Discrete_ACPILevel ACPILevel;
292 SMU75_Discrete_UvdLevel UvdLevel [SMU75_MAX_LEVELS_UVD];
293 SMU75_Discrete_ExtClkLevel VceLevel [SMU75_MAX_LEVELS_VCE];
294 SMU75_Discrete_ExtClkLevel AcpLevel [SMU75_MAX_LEVELS_ACP];
295 SMU75_Discrete_ExtClkLevel SamuLevel [SMU75_MAX_LEVELS_SAMU];
296 SMU75_Discrete_Ulv Ulv;
297
298 uint8_t DisplayWatermark [SMU75_MAX_LEVELS_MEMORY][SMU75_MAX_LEVELS_GRAPHICS];
299
300 uint32_t SclkStepSize;
301 uint32_t Smio [SMU75_MAX_ENTRIES_SMIO];
302
303 uint8_t UvdBootLevel;
304 uint8_t VceBootLevel;
305 uint8_t AcpBootLevel;
306 uint8_t SamuBootLevel;
307
308 uint8_t GraphicsBootLevel;
309 uint8_t GraphicsVoltageChangeEnable;
310 uint8_t GraphicsThermThrottleEnable;
311 uint8_t GraphicsInterval;
312
313 uint8_t VoltageInterval;
314 uint8_t ThermalInterval;
315 uint16_t TemperatureLimitHigh;
316
317 uint16_t TemperatureLimitLow;
318 uint8_t MemoryBootLevel;
319 uint8_t MemoryVoltageChangeEnable;
320
321 uint16_t BootMVdd;
322 uint8_t MemoryInterval;
323 uint8_t MemoryThermThrottleEnable;
324
325 uint16_t VoltageResponseTime;
326 uint16_t PhaseResponseTime;
327
328 uint8_t PCIeBootLinkLevel;
329 uint8_t PCIeGenInterval;
330 uint8_t DTEInterval;
331 uint8_t DTEMode;
332
333 uint8_t SVI2Enable;
334 uint8_t VRHotGpio;
335 uint8_t AcDcGpio;
336 uint8_t ThermGpio;
337
338 uint16_t PPM_PkgPwrLimit;
339 uint16_t PPM_TemperatureLimit;
340
341 uint16_t DefaultTdp;
342 uint16_t TargetTdp;
343
344 uint16_t FpsHighThreshold;
345 uint16_t FpsLowThreshold;
346
347 uint16_t BAPMTI_R [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
348 uint16_t BAPMTI_RC [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
349
350 uint16_t TemperatureLimitEdge;
351 uint16_t TemperatureLimitHotspot;
352
353 uint16_t BootVddc;
354 uint16_t BootVddci;
355
356 uint16_t FanGainEdge;
357 uint16_t FanGainHotspot;
358
359 uint32_t LowSclkInterruptThreshold;
360 uint32_t VddGfxReChkWait;
361
362 uint8_t ClockStretcherAmount;
363 uint8_t Sclk_CKS_masterEn0_7;
364 uint8_t Sclk_CKS_masterEn8_15;
365 uint8_t DPMFreezeAndForced;
366
367 uint8_t Sclk_voltageOffset[8];
368
369 SMU_ClockStretcherDataTable ClockStretcherDataTable;
370 SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
371
372 uint32_t CurrSclkPllRange;
373 sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
374
375 GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
376 SMU_QuadraticCoeffs AVFSGB_FUSE_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
377};
378
379typedef struct SMU75_Discrete_DpmTable SMU75_Discrete_DpmTable;
380
381struct SMU75_Discrete_FanTable {
382 uint16_t FdoMode;
383 int16_t TempMin;
384 int16_t TempMed;
385 int16_t TempMax;
386 int16_t Slope1;
387 int16_t Slope2;
388 int16_t FdoMin;
389 int16_t HystUp;
390 int16_t HystDown;
391 int16_t HystSlope;
392 int16_t TempRespLim;
393 int16_t TempCurr;
394 int16_t SlopeCurr;
395 int16_t PwmCurr;
396 uint32_t RefreshPeriod;
397 int16_t FdoMax;
398 uint8_t TempSrc;
399 int8_t Padding;
400};
401
402typedef struct SMU75_Discrete_FanTable SMU75_Discrete_FanTable;
403
404#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
405#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
406
407
408
409struct SMU7_MclkDpmScoreboard {
410 uint32_t PercentageBusy;
411
412 int32_t PIDError;
413 int32_t PIDIntegral;
414 int32_t PIDOutput;
415
416 uint32_t SigmaDeltaAccum;
417 uint32_t SigmaDeltaOutput;
418 uint32_t SigmaDeltaLevel;
419
420 uint32_t UtilizationSetpoint;
421
422 uint8_t TdpClampMode;
423 uint8_t TdcClampMode;
424 uint8_t ThermClampMode;
425 uint8_t VoltageBusy;
426
427 int8_t CurrLevel;
428 int8_t TargLevel;
429 uint8_t LevelChangeInProgress;
430 uint8_t UpHyst;
431
432 uint8_t DownHyst;
433 uint8_t VoltageDownHyst;
434 uint8_t DpmEnable;
435 uint8_t DpmRunning;
436
437 uint8_t DpmForce;
438 uint8_t DpmForceLevel;
439 uint8_t padding2;
440 uint8_t McArbIndex;
441
442 uint32_t MinimumPerfMclk;
443
444 uint8_t AcpiReq;
445 uint8_t AcpiAck;
446 uint8_t MclkSwitchInProgress;
447 uint8_t MclkSwitchCritical;
448
449 uint8_t IgnoreVBlank;
450 uint8_t TargetMclkIndex;
451 uint8_t TargetMvddIndex;
452 uint8_t MclkSwitchResult;
453
454 uint16_t VbiFailureCount;
455 uint8_t VbiWaitCounter;
456 uint8_t EnabledLevelsChange;
457
458 uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_MEMORY];
459 uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_MEMORY];
460
461 void (*TargetStateCalculator)(uint8_t);
462 void (*SavedTargetStateCalculator)(uint8_t);
463
464 uint16_t AutoDpmInterval;
465 uint16_t AutoDpmRange;
466
467 uint16_t VbiTimeoutCount;
468 uint16_t MclkSwitchingTime;
469
470 uint8_t fastSwitch;
471 uint8_t Save_PIC_VDDGFX_EXIT;
472 uint8_t Save_PIC_VDDGFX_ENTER;
473 uint8_t VbiTimeout;
474
475 uint32_t HbmTempRegBackup;
476};
477
478typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
479
480struct SMU7_UlvScoreboard {
481 uint8_t EnterUlv;
482 uint8_t ExitUlv;
483 uint8_t UlvActive;
484 uint8_t WaitingForUlv;
485 uint8_t UlvEnable;
486 uint8_t UlvRunning;
487 uint8_t UlvMasterEnable;
488 uint8_t padding;
489 uint32_t UlvAbortedCount;
490 uint32_t UlvTimeStamp;
491};
492
493typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
494
495struct VddgfxSavedRegisters {
496 uint32_t GPU_DBG[3];
497 uint32_t MEC_BaseAddress_Hi;
498 uint32_t MEC_BaseAddress_Lo;
499 uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
500 uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
501 uint32_t CP_INT_CNTL;
502};
503
504typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
505
506struct SMU7_VddGfxScoreboard {
507 uint8_t VddGfxEnable;
508 uint8_t VddGfxActive;
509 uint8_t VPUResetOccured;
510 uint8_t padding;
511
512 uint32_t VddGfxEnteredCount;
513 uint32_t VddGfxAbortedCount;
514
515 uint32_t VddGfxVid;
516
517 VddgfxSavedRegisters SavedRegisters;
518};
519
520typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
521
522struct SMU7_TdcLimitScoreboard {
523 uint8_t Enable;
524 uint8_t Running;
525 uint16_t Alpha;
526 uint32_t FilteredIddc;
527 uint32_t IddcLimit;
528 uint32_t IddcHyst;
529 SMU7_HystController_Data HystControllerData;
530};
531
532typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
533
534struct SMU7_PkgPwrLimitScoreboard {
535 uint8_t Enable;
536 uint8_t Running;
537 uint16_t Alpha;
538 uint32_t FilteredPkgPwr;
539 uint32_t Limit;
540 uint32_t Hyst;
541 uint32_t LimitFromDriver;
542 uint8_t PowerSharingEnabled;
543 uint8_t PowerSharingCounter;
544 uint8_t PowerSharingINTEnabled;
545 uint8_t GFXActivityCounterEnabled;
546 uint32_t EnergyCount;
547 uint32_t PSACTCount;
548 uint8_t RollOverRequired;
549 uint8_t RollOverCount;
550 uint8_t padding[2];
551 SMU7_HystController_Data HystControllerData;
552};
553
554typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
555
556struct SMU7_BapmScoreboard {
557 uint32_t source_powers[SMU75_DTE_SOURCES];
558 uint32_t source_powers_last[SMU75_DTE_SOURCES];
559 int32_t entity_temperatures[SMU75_NUM_GPU_TES];
560 int32_t initial_entity_temperatures[SMU75_NUM_GPU_TES];
561 int32_t Limit;
562 int32_t Hyst;
563 int32_t therm_influence_coeff_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS * 2];
564 int32_t therm_node_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
565 uint16_t ConfigTDPPowerScalar;
566 uint16_t FanSpeedPowerScalar;
567 uint16_t OverDrivePowerScalar;
568 uint16_t OverDriveLimitScalar;
569 uint16_t FinalPowerScalar;
570 uint8_t VariantID;
571 uint8_t spare997;
572
573 SMU7_HystController_Data HystControllerData;
574
575 int32_t temperature_gradient_slope;
576 int32_t temperature_gradient;
577 uint32_t measured_temperature;
578};
579
580
581typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
582
583struct SMU7_AcpiScoreboard {
584 uint32_t SavedInterruptMask[2];
585 uint8_t LastACPIRequest;
586 uint8_t CgBifResp;
587 uint8_t RequestType;
588 uint8_t Padding;
589 SMU75_Discrete_ACPILevel D0Level;
590};
591
592typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
593
594struct SMU75_Discrete_PmFuses {
595 uint8_t BapmVddCVidHiSidd[8];
596
597 uint8_t BapmVddCVidLoSidd[8];
598
599 uint8_t VddCVid[8];
600
601 uint8_t SviLoadLineEn;
602 uint8_t SviLoadLineVddC;
603 uint8_t SviLoadLineTrimVddC;
604 uint8_t SviLoadLineOffsetVddC;
605
606 uint16_t TDC_VDDC_PkgLimit;
607 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
608 uint8_t TDC_MAWt;
609
610 uint8_t TdcWaterfallCtl;
611 uint8_t LPMLTemperatureMin;
612 uint8_t LPMLTemperatureMax;
613 uint8_t Reserved;
614
615 uint8_t LPMLTemperatureScaler[16];
616
617 int16_t FuzzyFan_ErrorSetDelta;
618 int16_t FuzzyFan_ErrorRateSetDelta;
619 int16_t FuzzyFan_PwmSetDelta;
620 uint16_t Reserved6;
621
622 uint8_t GnbLPML[16];
623
624 uint8_t GnbLPMLMaxVid;
625 uint8_t GnbLPMLMinVid;
626 uint8_t Reserved1[2];
627
628 uint16_t BapmVddCBaseLeakageHiSidd;
629 uint16_t BapmVddCBaseLeakageLoSidd;
630
631 uint16_t VFT_Temp[3];
632 uint8_t Version;
633 uint8_t padding;
634
635 SMU_QuadraticCoeffs VFT_ATE[3];
636
637 SMU_QuadraticCoeffs AVFS_GB;
638 SMU_QuadraticCoeffs ATE_ACBTC_GB;
639
640 SMU_QuadraticCoeffs P2V;
641
642 uint32_t PsmCharzFreq;
643
644 uint16_t InversionVoltage;
645 uint16_t PsmCharzTemp;
646
647 uint32_t EnabledAvfsModules;
648
649 SMU_QuadraticCoeffs BtcGbv_CksOff;
650};
651
652typedef struct SMU75_Discrete_PmFuses SMU75_Discrete_PmFuses;
653
654struct SMU7_Discrete_Log_Header_Table {
655 uint32_t version;
656 uint32_t asic_id;
657 uint16_t flags;
658 uint16_t entry_size;
659 uint32_t total_size;
660 uint32_t num_of_entries;
661 uint8_t type;
662 uint8_t mode;
663 uint8_t filler_0[2];
664 uint32_t filler_1[2];
665};
666
667typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
668
669struct SMU7_Discrete_Log_Cntl {
670 uint8_t Enabled;
671 uint8_t Type;
672 uint8_t padding[2];
673 uint32_t BufferSize;
674 uint32_t SamplesLogged;
675 uint32_t SampleSize;
676 uint32_t AddrL;
677 uint32_t AddrH;
678};
679
680typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
681
682#if defined SMU__DGPU_ONLY
683#define CAC_ACC_NW_NUM_OF_SIGNALS 87
684#endif
685
686
687struct SMU7_Discrete_Cac_Collection_Table {
688 uint32_t temperature;
689 uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
690};
691
692typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
693
694struct SMU7_Discrete_Cac_Verification_Table {
695 uint32_t VddcTotalPower;
696 uint32_t VddcLeakagePower;
697 uint32_t VddcConstantPower;
698 uint32_t VddcGfxDynamicPower;
699 uint32_t VddcUvdDynamicPower;
700 uint32_t VddcVceDynamicPower;
701 uint32_t VddcAcpDynamicPower;
702 uint32_t VddcPcieDynamicPower;
703 uint32_t VddcDceDynamicPower;
704 uint32_t VddcCurrent;
705 uint32_t VddcVoltage;
706 uint32_t VddciTotalPower;
707 uint32_t VddciLeakagePower;
708 uint32_t VddciConstantPower;
709 uint32_t VddciDynamicPower;
710 uint32_t Vddr1TotalPower;
711 uint32_t Vddr1LeakagePower;
712 uint32_t Vddr1ConstantPower;
713 uint32_t Vddr1DynamicPower;
714 uint32_t spare[4];
715 uint32_t temperature;
716};
717
718typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
719
720struct SMU7_Discrete_Pm_Status_Table {
721 int32_t T_meas_max[SMU75_THERMAL_INPUT_LOOP_COUNT];
722 int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
723
724 uint32_t I_calc_max;
725 uint32_t I_calc_acc;
726 uint32_t P_meas_acc;
727 uint32_t V_meas_load_acc;
728 uint32_t I_meas_acc;
729 uint32_t P_meas_acc_vddci;
730 uint32_t V_meas_load_acc_vddci;
731 uint32_t I_meas_acc_vddci;
732
733 uint16_t Sclk_dpm_residency[8];
734 uint16_t Uvd_dpm_residency[8];
735 uint16_t Vce_dpm_residency[8];
736 uint16_t Mclk_dpm_residency[4];
737
738 uint32_t P_roc_acc;
739 uint32_t PkgPwr_max;
740 uint32_t PkgPwr_acc;
741 uint32_t MclkSwitchingTime_max;
742 uint32_t MclkSwitchingTime_acc;
743 uint32_t FanPwm_acc;
744 uint32_t FanRpm_acc;
745 uint32_t Gfx_busy_acc;
746 uint32_t Mc_busy_acc;
747 uint32_t Fps_acc;
748
749 uint32_t AccCnt;
750};
751
752typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
753
754struct SMU7_Discrete_AutoWattMan_Status_Table {
755 int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
756 uint16_t Sclk_dpm_residency[8];
757 uint16_t Mclk_dpm_residency[4];
758 uint32_t TgpPwr_acc;
759 uint32_t Gfx_busy_acc;
760 uint32_t Mc_busy_acc;
761 uint32_t AccCnt;
762};
763
764typedef struct SMU7_Discrete_AutoWattMan_Status_Table SMU7_Discrete_AutoWattMan_Status_Table;
765
766#define SMU7_MAX_GFX_CU_COUNT 24
767#define SMU7_MIN_GFX_CU_COUNT 8
768#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT 0
769#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT)
770#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT 16
771#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT)
772
773struct SMU7_GfxCuPgScoreboard {
774 uint8_t Enabled;
775 uint8_t WaterfallUp;
776 uint8_t WaterfallDown;
777 uint8_t WaterfallLimit;
778 uint8_t CurrMaxCu;
779 uint8_t TargMaxCu;
780 uint8_t ClampMode;
781 uint8_t Active;
782 uint8_t MaxSupportedCu;
783 uint8_t MinSupportedCu;
784 uint8_t PendingGfxCuHostInterrupt;
785 uint8_t LastFilteredMaxCuInteger;
786 uint16_t FilteredMaxCu;
787 uint16_t FilteredMaxCuAlpha;
788 uint16_t FilterResetCount;
789 uint16_t FilterResetCountLimit;
790 uint8_t ForceCu;
791 uint8_t ForceCuCount;
792 uint8_t AcModeMaxCu;
793 uint8_t DcModeMaxCu;
794};
795
796typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
797
798#define SMU7_SCLK_CAC 0x561
799#define SMU7_MCLK_CAC 0xF9
800#define SMU7_VCLK_CAC 0x2DE
801#define SMU7_DCLK_CAC 0x2DE
802#define SMU7_ECLK_CAC 0x25E
803#define SMU7_ACLK_CAC 0x25E
804#define SMU7_SAMCLK_CAC 0x25E
805#define SMU7_DISPCLK_CAC 0x100
806#define SMU7_CAC_CONSTANT 0x2EE3430
807#define SMU7_CAC_CONSTANT_SHIFT 18
808
809#define SMU7_VDDCI_MCLK_CONST 1765
810#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
811#define SMU7_VDDCI_VDDCI_CONST 50958
812#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
813#define SMU7_VDDCI_CONST 11781
814#define SMU7_VDDCI_STROBE_PWR 1331
815
816#define SMU7_VDDR1_CONST 693
817#define SMU7_VDDR1_CAC_WEIGHT 20
818#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
819#define SMU7_VDDR1_STROBE_PWR 512
820
821#define SMU7_AREA_COEFF_UVD 0xA78
822#define SMU7_AREA_COEFF_VCE 0x190A
823#define SMU7_AREA_COEFF_ACP 0x22D1
824#define SMU7_AREA_COEFF_SAMU 0x534
825
826#define SMU7_THERM_OUT_MODE_DISABLE 0x0
827#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
828#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
829
830#define SQ_Enable_MASK 0x1
831#define SQ_IR_MASK 0x2
832#define SQ_PCC_MASK 0x4
833#define SQ_EDC_MASK 0x8
834
835#define TCP_Enable_MASK 0x100
836#define TCP_IR_MASK 0x200
837#define TCP_PCC_MASK 0x400
838#define TCP_EDC_MASK 0x800
839
840#define TD_Enable_MASK 0x10000
841#define TD_IR_MASK 0x20000
842#define TD_PCC_MASK 0x40000
843#define TD_EDC_MASK 0x80000
844
845#define DB_Enable_MASK 0x1000000
846#define DB_IR_MASK 0x2000000
847#define DB_PCC_MASK 0x4000000
848#define DB_EDC_MASK 0x8000000
849
850#define SQ_Enable_SHIFT 0
851#define SQ_IR_SHIFT 1
852#define SQ_PCC_SHIFT 2
853#define SQ_EDC_SHIFT 3
854
855#define TCP_Enable_SHIFT 8
856#define TCP_IR_SHIFT 9
857#define TCP_PCC_SHIFT 10
858#define TCP_EDC_SHIFT 11
859
860#define TD_Enable_SHIFT 16
861#define TD_IR_SHIFT 17
862#define TD_PCC_SHIFT 18
863#define TD_EDC_SHIFT 19
864
865#define DB_Enable_SHIFT 24
866#define DB_IR_SHIFT 25
867#define DB_PCC_SHIFT 26
868#define DB_EDC_SHIFT 27
869
870#define PMFUSES_AVFSSIZE 104
871
872#define BTCGB0_Vdroop_Enable_MASK 0x1
873#define BTCGB1_Vdroop_Enable_MASK 0x2
874#define AVFSGB0_Vdroop_Enable_MASK 0x4
875#define AVFSGB1_Vdroop_Enable_MASK 0x8
876
877#define BTCGB0_Vdroop_Enable_SHIFT 0
878#define BTCGB1_Vdroop_Enable_SHIFT 1
879#define AVFSGB0_Vdroop_Enable_SHIFT 2
880#define AVFSGB1_Vdroop_Enable_SHIFT 3
881
882#pragma pack(pop)
883
884
885#endif
886
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index c3ed737ab951..715b5a168831 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result;
131#define PPSMC_MSG_RunAcgInOpenLoop 0x5E 131#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
132#define PPSMC_MSG_InitializeAcg 0x5F 132#define PPSMC_MSG_InitializeAcg 0x5F
133#define PPSMC_MSG_GetCurrPkgPwr 0x61 133#define PPSMC_MSG_GetCurrPkgPwr 0x61
134#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63
134#define PPSMC_MSG_SetPccThrottleLevel 0x67 135#define PPSMC_MSG_SetPccThrottleLevel 0x67
135#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 136#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
136#define PPSMC_Message_Count 0x69 137#define PPSMC_Message_Count 0x69
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 958755075421..0a200406a1ec 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
27 polaris10_smumgr.o iceland_smumgr.o \ 27 polaris10_smumgr.o iceland_smumgr.o \
28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ 28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
29 vega12_smumgr.o 29 vega12_smumgr.o vegam_smumgr.o
30 30
31AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 31AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
32 32
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 08d000140eca..2d4ec8ac3a08 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -61,9 +61,6 @@
61 61
62#define SMC_RAM_END 0x40000 62#define SMC_RAM_END 0x40000
63 63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67#define CISLAND_MINIMUM_ENGINE_CLOCK 800 64#define CISLAND_MINIMUM_ENGINE_CLOCK 800
68#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 65#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
69 66
@@ -211,9 +208,7 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
211{ 208{
212 int ret; 209 int ret;
213 210
214 if (!ci_is_smc_ram_running(hwmgr)) 211 cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
215 return -EINVAL;
216
217 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); 212 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
218 213
219 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); 214 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -1182,7 +1177,6 @@ static int ci_populate_single_memory_level(
1182 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1177 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1183 int result = 0; 1178 int result = 0;
1184 bool dll_state_on; 1179 bool dll_state_on;
1185 struct cgs_display_info info = {0};
1186 uint32_t mclk_edc_wr_enable_threshold = 40000; 1180 uint32_t mclk_edc_wr_enable_threshold = 40000;
1187 uint32_t mclk_edc_enable_threshold = 40000; 1181 uint32_t mclk_edc_enable_threshold = 40000;
1188 uint32_t mclk_strobe_mode_threshold = 40000; 1182 uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1236,8 +1230,7 @@ static int ci_populate_single_memory_level(
1236 /* default set to low watermark. Highest level will be set to high later.*/ 1230 /* default set to low watermark. Highest level will be set to high later.*/
1237 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1231 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1238 1232
1239 cgs_get_active_displays_info(hwmgr->device, &info); 1233 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1240 data->display_timing.num_existing_displays = info.display_count;
1241 1234
1242 /* stutter mode not support on ci */ 1235 /* stutter mode not support on ci */
1243 1236
@@ -2784,7 +2777,6 @@ static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2784{ 2777{
2785 kfree(hwmgr->smu_backend); 2778 kfree(hwmgr->smu_backend);
2786 hwmgr->smu_backend = NULL; 2779 hwmgr->smu_backend = NULL;
2787 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
2788 return 0; 2780 return 0;
2789} 2781}
2790 2782
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index faef78321446..53df9405f43a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -53,10 +53,7 @@
53 53
54#define FIJI_SMC_SIZE 0x20000 54#define FIJI_SMC_SIZE 0x20000
55 55
56#define VOLTAGE_SCALE 4
57#define POWERTUNE_DEFAULT_SET_MAX 1 56#define POWERTUNE_DEFAULT_SET_MAX 1
58#define VOLTAGE_VID_OFFSET_SCALE1 625
59#define VOLTAGE_VID_OFFSET_SCALE2 100
60#define VDDC_VDDCI_DELTA 300 57#define VDDC_VDDCI_DELTA 300
61#define MC_CG_ARB_FREQ_F1 0x0b 58#define MC_CG_ARB_FREQ_F1 0x0b
62 59
@@ -288,8 +285,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
288 struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); 285 struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
289 286
290 /* Only start SMC if SMC RAM is not running */ 287 /* Only start SMC if SMC RAM is not running */
291 if (!(smu7_is_smc_ram_running(hwmgr) 288 if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
292 || cgs_is_virtualization_enabled(hwmgr->device))) {
293 /* Check if SMU is running in protected mode */ 289 /* Check if SMU is running in protected mode */
294 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 290 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
295 CGS_IND_REG__SMC, 291 CGS_IND_REG__SMC,
@@ -307,13 +303,13 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
307 } 303 }
308 304
309 /* To initialize all clock gating before RLC loaded and running.*/ 305 /* To initialize all clock gating before RLC loaded and running.*/
310 cgs_set_clockgating_state(hwmgr->device, 306 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
311 AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); 307 AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
312 cgs_set_clockgating_state(hwmgr->device, 308 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
313 AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); 309 AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
314 cgs_set_clockgating_state(hwmgr->device, 310 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
315 AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); 311 AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
316 cgs_set_clockgating_state(hwmgr->device, 312 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
317 AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); 313 AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
318 314
319 /* Setup SoftRegsStart here for register lookup in case 315 /* Setup SoftRegsStart here for register lookup in case
@@ -335,10 +331,10 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
335 uint32_t efuse = 0; 331 uint32_t efuse = 0;
336 uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; 332 uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
337 333
338 if (cgs_is_virtualization_enabled(hwmgr->device)) 334 if (!hwmgr->not_vf)
339 return 0; 335 return false;
340 336
341 if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, 337 if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB, AVFS_EN_MSB,
342 mask, &efuse)) { 338 mask, &efuse)) {
343 if (efuse) 339 if (efuse)
344 return true; 340 return true;
@@ -989,11 +985,11 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
989 985
990 threshold = clock * data->fast_watermark_threshold / 100; 986 threshold = clock * data->fast_watermark_threshold / 100;
991 987
992 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; 988 data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
993 989
994 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) 990 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
995 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, 991 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
996 hwmgr->display_config.min_core_set_clock_in_sr); 992 hwmgr->display_config->min_core_set_clock_in_sr);
997 993
998 994
999 /* Default to slow, highest DPM level will be 995 /* Default to slow, highest DPM level will be
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index d4bb934e7334..415f691c3fa9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -60,10 +60,7 @@
60 60
61#define ICELAND_SMC_SIZE 0x20000 61#define ICELAND_SMC_SIZE 0x20000
62 62
63#define VOLTAGE_SCALE 4
64#define POWERTUNE_DEFAULT_SET_MAX 1 63#define POWERTUNE_DEFAULT_SET_MAX 1
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67#define MC_CG_ARB_FREQ_F1 0x0b 64#define MC_CG_ARB_FREQ_F1 0x0b
68#define VDDC_VDDCI_DELTA 200 65#define VDDC_VDDCI_DELTA 200
69 66
@@ -932,7 +929,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
932 graphic_level->PowerThrottle = 0; 929 graphic_level->PowerThrottle = 0;
933 930
934 data->display_timing.min_clock_in_sr = 931 data->display_timing.min_clock_in_sr =
935 hwmgr->display_config.min_core_set_clock_in_sr; 932 hwmgr->display_config->min_core_set_clock_in_sr;
936 933
937 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 934 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
938 PHM_PlatformCaps_SclkDeepSleep)) 935 PHM_PlatformCaps_SclkDeepSleep))
@@ -1236,7 +1233,6 @@ static int iceland_populate_single_memory_level(
1236 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1237 int result = 0; 1234 int result = 0;
1238 bool dll_state_on; 1235 bool dll_state_on;
1239 struct cgs_display_info info = {0};
1240 uint32_t mclk_edc_wr_enable_threshold = 40000; 1236 uint32_t mclk_edc_wr_enable_threshold = 40000;
1241 uint32_t mclk_edc_enable_threshold = 40000; 1237 uint32_t mclk_edc_enable_threshold = 40000;
1242 uint32_t mclk_strobe_mode_threshold = 40000; 1238 uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1283,8 +1279,7 @@ static int iceland_populate_single_memory_level(
1283 /* default set to low watermark. Highest level will be set to high later.*/ 1279 /* default set to low watermark. Highest level will be set to high later.*/
1284 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1280 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1285 1281
1286 cgs_get_active_displays_info(hwmgr->device, &info); 1282 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1287 data->display_timing.num_existing_displays = info.display_count;
1288 1283
1289 /* stutter mode not support on iceland */ 1284 /* stutter mode not support on iceland */
1290 1285
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 997a777dd35b..a8c6524f07e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -52,8 +52,6 @@
52#include "dce/dce_10_0_sh_mask.h" 52#include "dce/dce_10_0_sh_mask.h"
53 53
54#define POLARIS10_SMC_SIZE 0x20000 54#define POLARIS10_SMC_SIZE 0x20000
55#define VOLTAGE_VID_OFFSET_SCALE1 625
56#define VOLTAGE_VID_OFFSET_SCALE2 100
57#define POWERTUNE_DEFAULT_SET_MAX 1 55#define POWERTUNE_DEFAULT_SET_MAX 1
58#define VDDC_VDDCI_DELTA 200 56#define VDDC_VDDCI_DELTA 200
59#define MC_CG_ARB_FREQ_F1 0x0b 57#define MC_CG_ARB_FREQ_F1 0x0b
@@ -295,25 +293,16 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
295 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); 293 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
296 294
297 /* Only start SMC if SMC RAM is not running */ 295 /* Only start SMC if SMC RAM is not running */
298 if (!(smu7_is_smc_ram_running(hwmgr) 296 if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
299 || cgs_is_virtualization_enabled(hwmgr->device))) {
300 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); 297 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
301 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); 298 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
302 299
303 /* Check if SMU is running in protected mode */ 300 /* Check if SMU is running in protected mode */
304 if (smu_data->protected_mode == 0) { 301 if (smu_data->protected_mode == 0)
305 result = polaris10_start_smu_in_non_protection_mode(hwmgr); 302 result = polaris10_start_smu_in_non_protection_mode(hwmgr);
306 } else { 303 else
307 result = polaris10_start_smu_in_protection_mode(hwmgr); 304 result = polaris10_start_smu_in_protection_mode(hwmgr);
308 305
309 /* If failed, try with different security Key. */
310 if (result != 0) {
311 smu_data->smu7_data.security_hard_key ^= 1;
312 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
313 result = polaris10_start_smu_in_protection_mode(hwmgr);
314 }
315 }
316
317 if (result != 0) 306 if (result != 0)
318 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); 307 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
319 308
@@ -951,11 +940,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
951 level->DownHyst = data->current_profile_setting.sclk_down_hyst; 940 level->DownHyst = data->current_profile_setting.sclk_down_hyst;
952 level->VoltageDownHyst = 0; 941 level->VoltageDownHyst = 0;
953 level->PowerThrottle = 0; 942 level->PowerThrottle = 0;
954 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; 943 data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
955 944
956 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) 945 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
957 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, 946 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
958 hwmgr->display_config.min_core_set_clock_in_sr); 947 hwmgr->display_config->min_core_set_clock_in_sr);
959 948
960 /* Default to slow, highest DPM level will be 949 /* Default to slow, highest DPM level will be
961 * set to PPSMC_DISPLAY_WATERMARK_LOW later. 950 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
@@ -1085,11 +1074,9 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1085 struct phm_ppt_v1_information *table_info = 1074 struct phm_ppt_v1_information *table_info =
1086 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1075 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1087 int result = 0; 1076 int result = 0;
1088 struct cgs_display_info info = {0, 0, NULL};
1089 uint32_t mclk_stutter_mode_threshold = 40000; 1077 uint32_t mclk_stutter_mode_threshold = 40000;
1090 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; 1078 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
1091 1079
1092 cgs_get_active_displays_info(hwmgr->device, &info);
1093 1080
1094 if (hwmgr->od_enabled) 1081 if (hwmgr->od_enabled)
1095 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; 1082 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1115,7 +1102,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1115 mem_level->StutterEnable = false; 1102 mem_level->StutterEnable = false;
1116 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1103 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1117 1104
1118 data->display_timing.num_existing_displays = info.display_count; 1105 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1119 1106
1120 if (mclk_stutter_mode_threshold && 1107 if (mclk_stutter_mode_threshold &&
1121 (clock <= mclk_stutter_mode_threshold) && 1108 (clock <= mclk_stutter_mode_threshold) &&
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index bc53f2beda30..0a563f6fe9ea 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -23,7 +23,7 @@
23 23
24#include "smumgr.h" 24#include "smumgr.h"
25#include "smu10_inc.h" 25#include "smu10_inc.h"
26#include "pp_soc15.h" 26#include "soc15_common.h"
27#include "smu10_smumgr.h" 27#include "smu10_smumgr.h"
28#include "ppatomctrl.h" 28#include "ppatomctrl.h"
29#include "rv_ppsmc.h" 29#include "rv_ppsmc.h"
@@ -33,8 +33,6 @@
33#include "pp_debug.h" 33#include "pp_debug.h"
34 34
35 35
36#define VOLTAGE_SCALE 4
37
38#define BUFFER_SIZE 80000 36#define BUFFER_SIZE 80000
39#define MAX_STRING_SIZE 15 37#define MAX_STRING_SIZE 15
40#define BUFFER_SIZETWO 131072 38#define BUFFER_SIZETWO 131072
@@ -49,48 +47,41 @@
49 47
50static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) 48static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
51{ 49{
50 struct amdgpu_device *adev = hwmgr->adev;
52 uint32_t reg; 51 uint32_t reg;
53 52
54 reg = soc15_get_register_offset(MP1_HWID, 0, 53 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
55 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
56 54
57 phm_wait_for_register_unequal(hwmgr, reg, 55 phm_wait_for_register_unequal(hwmgr, reg,
58 0, MP1_C2PMSG_90__CONTENT_MASK); 56 0, MP1_C2PMSG_90__CONTENT_MASK);
59 57
60 return cgs_read_register(hwmgr->device, reg); 58 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
61} 59}
62 60
63static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 61static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
64 uint16_t msg) 62 uint16_t msg)
65{ 63{
66 uint32_t reg; 64 struct amdgpu_device *adev = hwmgr->adev;
67 65
68 reg = soc15_get_register_offset(MP1_HWID, 0, 66 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
69 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
70 cgs_write_register(hwmgr->device, reg, msg);
71 67
72 return 0; 68 return 0;
73} 69}
74 70
75static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) 71static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
76{ 72{
77 uint32_t reg; 73 struct amdgpu_device *adev = hwmgr->adev;
78
79 reg = soc15_get_register_offset(MP1_HWID, 0,
80 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
81 74
82 return cgs_read_register(hwmgr->device, reg); 75 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
83} 76}
84 77
85static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 78static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{ 79{
87 uint32_t reg; 80 struct amdgpu_device *adev = hwmgr->adev;
88 81
89 smu10_wait_for_response(hwmgr); 82 smu10_wait_for_response(hwmgr);
90 83
91 reg = soc15_get_register_offset(MP1_HWID, 0, 84 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
92 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
93 cgs_write_register(hwmgr->device, reg, 0);
94 85
95 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 86 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
96 87
@@ -104,17 +95,13 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
104static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 95static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
105 uint16_t msg, uint32_t parameter) 96 uint16_t msg, uint32_t parameter)
106{ 97{
107 uint32_t reg; 98 struct amdgpu_device *adev = hwmgr->adev;
108 99
109 smu10_wait_for_response(hwmgr); 100 smu10_wait_for_response(hwmgr);
110 101
111 reg = soc15_get_register_offset(MP1_HWID, 0, 102 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
112 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
113 cgs_write_register(hwmgr->device, reg, 0);
114 103
115 reg = soc15_get_register_offset(MP1_HWID, 0, 104 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
116 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
117 cgs_write_register(hwmgr->device, reg, parameter);
118 105
119 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 106 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
120 107
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 0399c10d2be0..64d33b775906 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -167,24 +167,25 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
167{ 167{
168 int ret; 168 int ret;
169 169
170 if (!smu7_is_smc_ram_running(hwmgr))
171 return -EINVAL;
172
173
174 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); 170 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
175 171
176 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); 172 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
177 173
178 if (ret != 1) 174 if (ret == 0xFE)
179 pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); 175 pr_debug("last message was not supported\n");
176 else if (ret != 1)
177 pr_info("\n last message was failed ret is %d\n", ret);
180 178
179 cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
181 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); 180 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
182 181
183 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); 182 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
184 183
185 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); 184 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
186 185
187 if (ret != 1) 186 if (ret == 0xFE)
187 pr_debug("message %x was not supported\n", msg);
188 else if (ret != 1)
188 pr_info("\n failed to send message %x ret is %d \n", msg, ret); 189 pr_info("\n failed to send message %x ret is %d \n", msg, ret);
189 190
190 return 0; 191 return 0;
@@ -199,10 +200,6 @@ int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
199 200
200int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) 201int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
201{ 202{
202 if (!smu7_is_smc_ram_running(hwmgr)) {
203 return -EINVAL;
204 }
205
206 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); 203 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
207 204
208 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); 205 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
@@ -231,16 +228,6 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
231 return 0; 228 return 0;
232} 229}
233 230
234int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
235{
236 if (!smu7_is_smc_ram_running(hwmgr))
237 return -EINVAL;
238
239 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
240 return 0;
241}
242
243
244enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) 231enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
245{ 232{
246 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; 233 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
@@ -375,7 +362,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
375 entry->meta_data_addr_low = 0; 362 entry->meta_data_addr_low = 0;
376 363
377 /* digest need be excluded out */ 364 /* digest need be excluded out */
378 if (cgs_is_virtualization_enabled(hwmgr->device)) 365 if (!hwmgr->not_vf)
379 info.image_size -= 20; 366 info.image_size -= 20;
380 entry->data_size_byte = info.image_size; 367 entry->data_size_byte = info.image_size;
381 entry->num_register_entries = 0; 368 entry->num_register_entries = 0;
@@ -409,7 +396,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
409 0x0); 396 0x0);
410 397
411 if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ 398 if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
412 if (!cgs_is_virtualization_enabled(hwmgr->device)) { 399 if (hwmgr->not_vf) {
413 smu7_send_msg_to_smc_with_parameter(hwmgr, 400 smu7_send_msg_to_smc_with_parameter(hwmgr,
414 PPSMC_MSG_SMU_DRAM_ADDR_HI, 401 PPSMC_MSG_SMU_DRAM_ADDR_HI,
415 upper_32_bits(smu_data->smu_buffer.mc_addr)); 402 upper_32_bits(smu_data->smu_buffer.mc_addr));
@@ -467,7 +454,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
467 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 454 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
468 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), 455 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
469 "Failed to Get Firmware Entry.", return -EINVAL); 456 "Failed to Get Firmware Entry.", return -EINVAL);
470 if (cgs_is_virtualization_enabled(hwmgr->device)) 457 if (!hwmgr->not_vf)
471 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, 458 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
472 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), 459 UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
473 "Failed to Get Firmware Entry.", return -EINVAL); 460 "Failed to Get Firmware Entry.", return -EINVAL);
@@ -608,7 +595,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
608 smu_data->header = smu_data->header_buffer.kaddr; 595 smu_data->header = smu_data->header_buffer.kaddr;
609 smu_data->header_buffer.mc_addr = mc_addr; 596 smu_data->header_buffer.mc_addr = mc_addr;
610 597
611 if (cgs_is_virtualization_enabled(hwmgr->device)) 598 if (!hwmgr->not_vf)
612 return 0; 599 return 0;
613 600
614 smu_data->smu_buffer.data_size = 200*4096; 601 smu_data->smu_buffer.data_size = 200*4096;
@@ -643,13 +630,12 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
643 &smu_data->header_buffer.mc_addr, 630 &smu_data->header_buffer.mc_addr,
644 &smu_data->header_buffer.kaddr); 631 &smu_data->header_buffer.kaddr);
645 632
646 if (!cgs_is_virtualization_enabled(hwmgr->device)) 633 if (hwmgr->not_vf)
647 amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle, 634 amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
648 &smu_data->smu_buffer.mc_addr, 635 &smu_data->smu_buffer.mc_addr,
649 &smu_data->smu_buffer.kaddr); 636 &smu_data->smu_buffer.kaddr);
650 637
651 kfree(hwmgr->smu_backend); 638 kfree(hwmgr->smu_backend);
652 hwmgr->smu_backend = NULL; 639 hwmgr->smu_backend = NULL;
653 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
654 return 0; 640 return 0;
655} 641}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 126d300259ba..39c9bfda0ab4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -67,7 +67,6 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
67int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, 67int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
68 uint16_t msg, uint32_t parameter); 68 uint16_t msg, uint32_t parameter);
69int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); 69int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
70int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
71 70
72enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); 71enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
73int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, 72int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c28b60aae5f8..ee236dfbf1d6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -41,6 +41,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
41MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 41MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
42MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); 42MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
43MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); 43MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
44MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
44MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); 45MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
45MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); 46MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
46MODULE_FIRMWARE("amdgpu/vega12_smc.bin"); 47MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b51d7468c3e7..782b19fc2e70 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -55,11 +55,7 @@
55#include "dce/dce_10_0_d.h" 55#include "dce/dce_10_0_d.h"
56#include "dce/dce_10_0_sh_mask.h" 56#include "dce/dce_10_0_sh_mask.h"
57 57
58
59#define VOLTAGE_SCALE 4
60#define POWERTUNE_DEFAULT_SET_MAX 1 58#define POWERTUNE_DEFAULT_SET_MAX 1
61#define VOLTAGE_VID_OFFSET_SCALE1 625
62#define VOLTAGE_VID_OFFSET_SCALE2 100
63#define MC_CG_ARB_FREQ_F1 0x0b 59#define MC_CG_ARB_FREQ_F1 0x0b
64#define VDDC_VDDCI_DELTA 200 60#define VDDC_VDDCI_DELTA 200
65 61
@@ -199,8 +195,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
199 int result; 195 int result;
200 196
201 /* Only start SMC if SMC RAM is not running */ 197 /* Only start SMC if SMC RAM is not running */
202 if (!(smu7_is_smc_ram_running(hwmgr) || 198 if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
203 cgs_is_virtualization_enabled(hwmgr->device))) {
204 /*Check if SMU is running in protected mode*/ 199 /*Check if SMU is running in protected mode*/
205 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 200 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
206 SMU_FIRMWARE, SMU_MODE)) { 201 SMU_FIRMWARE, SMU_MODE)) {
@@ -651,7 +646,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
651 graphic_level->PowerThrottle = 0; 646 graphic_level->PowerThrottle = 0;
652 647
653 data->display_timing.min_clock_in_sr = 648 data->display_timing.min_clock_in_sr =
654 hwmgr->display_config.min_core_set_clock_in_sr; 649 hwmgr->display_config->min_core_set_clock_in_sr;
655 650
656 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
657 PHM_PlatformCaps_SclkDeepSleep)) 652 PHM_PlatformCaps_SclkDeepSleep))
@@ -957,18 +952,17 @@ static int tonga_populate_single_memory_level(
957 SMU72_Discrete_MemoryLevel *memory_level 952 SMU72_Discrete_MemoryLevel *memory_level
958 ) 953 )
959{ 954{
960 uint32_t mvdd = 0;
961 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 955 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
962 struct phm_ppt_v1_information *pptable_info = 956 struct phm_ppt_v1_information *pptable_info =
963 (struct phm_ppt_v1_information *)(hwmgr->pptable); 957 (struct phm_ppt_v1_information *)(hwmgr->pptable);
964 int result = 0;
965 bool dll_state_on;
966 struct cgs_display_info info = {0};
967 uint32_t mclk_edc_wr_enable_threshold = 40000; 958 uint32_t mclk_edc_wr_enable_threshold = 40000;
968 uint32_t mclk_stutter_mode_threshold = 30000; 959 uint32_t mclk_stutter_mode_threshold = 30000;
969 uint32_t mclk_edc_enable_threshold = 40000; 960 uint32_t mclk_edc_enable_threshold = 40000;
970 uint32_t mclk_strobe_mode_threshold = 40000; 961 uint32_t mclk_strobe_mode_threshold = 40000;
971 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; 962 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
963 int result = 0;
964 bool dll_state_on;
965 uint32_t mvdd = 0;
972 966
973 if (hwmgr->od_enabled) 967 if (hwmgr->od_enabled)
974 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; 968 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1009,8 +1003,7 @@ static int tonga_populate_single_memory_level(
1009 /* default set to low watermark. Highest level will be set to high later.*/ 1003 /* default set to low watermark. Highest level will be set to high later.*/
1010 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1004 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1011 1005
1012 cgs_get_active_displays_info(hwmgr->device, &info); 1006 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1013 data->display_timing.num_existing_displays = info.display_count;
1014 1007
1015 if ((mclk_stutter_mode_threshold != 0) && 1008 if ((mclk_stutter_mode_threshold != 0) &&
1016 (memory_clock <= mclk_stutter_mode_threshold) && 1009 (memory_clock <= mclk_stutter_mode_threshold) &&
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 4aafb043bcb0..e84669c448a3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -23,7 +23,7 @@
23 23
24#include "smumgr.h" 24#include "smumgr.h"
25#include "vega10_inc.h" 25#include "vega10_inc.h"
26#include "pp_soc15.h" 26#include "soc15_common.h"
27#include "vega10_smumgr.h" 27#include "vega10_smumgr.h"
28#include "vega10_hwmgr.h" 28#include "vega10_hwmgr.h"
29#include "vega10_ppsmc.h" 29#include "vega10_ppsmc.h"
@@ -35,8 +35,6 @@
35#define AVFS_EN_MSB 1568 35#define AVFS_EN_MSB 1568
36#define AVFS_EN_LSB 1568 36#define AVFS_EN_LSB 1568
37 37
38#define VOLTAGE_SCALE 4
39
40/* Microcode file is stored in this buffer */ 38/* Microcode file is stored in this buffer */
41#define BUFFER_SIZE 80000 39#define BUFFER_SIZE 80000
42#define MAX_STRING_SIZE 15 40#define MAX_STRING_SIZE 15
@@ -54,18 +52,13 @@
54 52
55static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) 53static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
56{ 54{
57 uint32_t mp1_fw_flags, reg; 55 struct amdgpu_device *adev = hwmgr->adev;
58 56 uint32_t mp1_fw_flags;
59 reg = soc15_get_register_offset(NBIF_HWID, 0,
60 mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
61 57
62 cgs_write_register(hwmgr->device, reg, 58 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
63 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 59 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
64 60
65 reg = soc15_get_register_offset(NBIF_HWID, 0, 61 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
66 mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
67
68 mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
69 62
70 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) 63 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
71 return true; 64 return true;
@@ -81,11 +74,11 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
81 */ 74 */
82static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) 75static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
83{ 76{
77 struct amdgpu_device *adev = hwmgr->adev;
84 uint32_t reg; 78 uint32_t reg;
85 uint32_t ret; 79 uint32_t ret;
86 80
87 reg = soc15_get_register_offset(MP1_HWID, 0, 81 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
88 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
89 82
90 ret = phm_wait_for_register_unequal(hwmgr, reg, 83 ret = phm_wait_for_register_unequal(hwmgr, reg,
91 0, MP1_C2PMSG_90__CONTENT_MASK); 84 0, MP1_C2PMSG_90__CONTENT_MASK);
@@ -93,7 +86,7 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
93 if (ret) 86 if (ret)
94 pr_err("No response from smu\n"); 87 pr_err("No response from smu\n");
95 88
96 return cgs_read_register(hwmgr->device, reg); 89 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
97} 90}
98 91
99/* 92/*
@@ -105,11 +98,9 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
105static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 98static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
106 uint16_t msg) 99 uint16_t msg)
107{ 100{
108 uint32_t reg; 101 struct amdgpu_device *adev = hwmgr->adev;
109 102
110 reg = soc15_get_register_offset(MP1_HWID, 0, 103 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
111 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
112 cgs_write_register(hwmgr->device, reg, msg);
113 104
114 return 0; 105 return 0;
115} 106}
@@ -122,14 +113,12 @@ static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
122 */ 113 */
123static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 114static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
124{ 115{
125 uint32_t reg; 116 struct amdgpu_device *adev = hwmgr->adev;
126 uint32_t ret; 117 uint32_t ret;
127 118
128 vega10_wait_for_response(hwmgr); 119 vega10_wait_for_response(hwmgr);
129 120
130 reg = soc15_get_register_offset(MP1_HWID, 0, 121 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
131 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
132 cgs_write_register(hwmgr->device, reg, 0);
133 122
134 vega10_send_msg_to_smc_without_waiting(hwmgr, msg); 123 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
135 124
@@ -150,18 +139,14 @@ static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
150static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 139static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
151 uint16_t msg, uint32_t parameter) 140 uint16_t msg, uint32_t parameter)
152{ 141{
153 uint32_t reg; 142 struct amdgpu_device *adev = hwmgr->adev;
154 uint32_t ret; 143 uint32_t ret;
155 144
156 vega10_wait_for_response(hwmgr); 145 vega10_wait_for_response(hwmgr);
157 146
158 reg = soc15_get_register_offset(MP1_HWID, 0, 147 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
159 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
160 cgs_write_register(hwmgr->device, reg, 0);
161 148
162 reg = soc15_get_register_offset(MP1_HWID, 0, 149 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
163 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
164 cgs_write_register(hwmgr->device, reg, parameter);
165 150
166 vega10_send_msg_to_smc_without_waiting(hwmgr, msg); 151 vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
167 152
@@ -174,12 +159,9 @@ static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
174 159
175static int vega10_get_argument(struct pp_hwmgr *hwmgr) 160static int vega10_get_argument(struct pp_hwmgr *hwmgr)
176{ 161{
177 uint32_t reg; 162 struct amdgpu_device *adev = hwmgr->adev;
178
179 reg = soc15_get_register_offset(MP1_HWID, 0,
180 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
181 163
182 return cgs_read_register(hwmgr->device, reg); 164 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
183} 165}
184 166
185static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, 167static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 651a3f28734b..7d9b40e8b1bf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -23,7 +23,7 @@
23 23
24#include "smumgr.h" 24#include "smumgr.h"
25#include "vega12_inc.h" 25#include "vega12_inc.h"
26#include "pp_soc15.h" 26#include "soc15_common.h"
27#include "vega12_smumgr.h" 27#include "vega12_smumgr.h"
28#include "vega12_ppsmc.h" 28#include "vega12_ppsmc.h"
29#include "vega12/smu9_driver_if.h" 29#include "vega12/smu9_driver_if.h"
@@ -44,18 +44,13 @@
44 44
45static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr) 45static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
46{ 46{
47 uint32_t mp1_fw_flags, reg; 47 struct amdgpu_device *adev = hwmgr->adev;
48 uint32_t mp1_fw_flags;
48 49
49 reg = soc15_get_register_offset(NBIF_HWID, 0, 50 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
50 mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
51
52 cgs_write_register(hwmgr->device, reg,
53 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 51 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
54 52
55 reg = soc15_get_register_offset(NBIF_HWID, 0, 53 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
56 mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
57
58 mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
59 54
60 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 55 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
61 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 56 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@ -72,15 +67,15 @@ static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
72 */ 67 */
73static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr) 68static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
74{ 69{
70 struct amdgpu_device *adev = hwmgr->adev;
75 uint32_t reg; 71 uint32_t reg;
76 72
77 reg = soc15_get_register_offset(MP1_HWID, 0, 73 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
78 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
79 74
80 phm_wait_for_register_unequal(hwmgr, reg, 75 phm_wait_for_register_unequal(hwmgr, reg,
81 0, MP1_C2PMSG_90__CONTENT_MASK); 76 0, MP1_C2PMSG_90__CONTENT_MASK);
82 77
83 return cgs_read_register(hwmgr->device, reg); 78 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
84} 79}
85 80
86/* 81/*
@@ -92,11 +87,9 @@ static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
92int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 87int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
93 uint16_t msg) 88 uint16_t msg)
94{ 89{
95 uint32_t reg; 90 struct amdgpu_device *adev = hwmgr->adev;
96 91
97 reg = soc15_get_register_offset(MP1_HWID, 0, 92 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
98 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
99 cgs_write_register(hwmgr->device, reg, msg);
100 93
101 return 0; 94 return 0;
102} 95}
@@ -109,13 +102,11 @@ int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
109 */ 102 */
110int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 103int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
111{ 104{
112 uint32_t reg; 105 struct amdgpu_device *adev = hwmgr->adev;
113 106
114 vega12_wait_for_response(hwmgr); 107 vega12_wait_for_response(hwmgr);
115 108
116 reg = soc15_get_register_offset(MP1_HWID, 0, 109 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
117 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
118 cgs_write_register(hwmgr->device, reg, 0);
119 110
120 vega12_send_msg_to_smc_without_waiting(hwmgr, msg); 111 vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
121 112
@@ -135,17 +126,13 @@ int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
135int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 126int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
136 uint16_t msg, uint32_t parameter) 127 uint16_t msg, uint32_t parameter)
137{ 128{
138 uint32_t reg; 129 struct amdgpu_device *adev = hwmgr->adev;
139 130
140 vega12_wait_for_response(hwmgr); 131 vega12_wait_for_response(hwmgr);
141 132
142 reg = soc15_get_register_offset(MP1_HWID, 0, 133 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
143 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
144 cgs_write_register(hwmgr->device, reg, 0);
145 134
146 reg = soc15_get_register_offset(MP1_HWID, 0, 135 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
147 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
148 cgs_write_register(hwmgr->device, reg, parameter);
149 136
150 vega12_send_msg_to_smc_without_waiting(hwmgr, msg); 137 vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
151 138
@@ -166,11 +153,9 @@ int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
166int vega12_send_msg_to_smc_with_parameter_without_waiting( 153int vega12_send_msg_to_smc_with_parameter_without_waiting(
167 struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) 154 struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
168{ 155{
169 uint32_t reg; 156 struct amdgpu_device *adev = hwmgr->adev;
170 157
171 reg = soc15_get_register_offset(MP1_HWID, 0, 158 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
172 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
173 cgs_write_register(hwmgr->device, reg, parameter);
174 159
175 return vega12_send_msg_to_smc_without_waiting(hwmgr, msg); 160 return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
176} 161}
@@ -183,12 +168,9 @@ int vega12_send_msg_to_smc_with_parameter_without_waiting(
183 */ 168 */
184int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) 169int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
185{ 170{
186 uint32_t reg; 171 struct amdgpu_device *adev = hwmgr->adev;
187
188 reg = soc15_get_register_offset(MP1_HWID, 0,
189 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
190 172
191 *arg = cgs_read_register(hwmgr->device, reg); 173 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
192 174
193 return 0; 175 return 0;
194} 176}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
new file mode 100644
index 000000000000..c9a563399330
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -0,0 +1,2382 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "pp_debug.h"
24#include "smumgr.h"
25#include "smu_ucode_xfer_vi.h"
26#include "vegam_smumgr.h"
27#include "smu/smu_7_1_3_d.h"
28#include "smu/smu_7_1_3_sh_mask.h"
29#include "gmc/gmc_8_1_d.h"
30#include "gmc/gmc_8_1_sh_mask.h"
31#include "oss/oss_3_0_d.h"
32#include "gca/gfx_8_0_d.h"
33#include "bif/bif_5_0_d.h"
34#include "bif/bif_5_0_sh_mask.h"
35#include "ppatomctrl.h"
36#include "cgs_common.h"
37#include "smu7_ppsmc.h"
38
39#include "smu7_dyn_defaults.h"
40
41#include "smu7_hwmgr.h"
42#include "hardwaremanager.h"
43#include "ppatomctrl.h"
44#include "atombios.h"
45#include "pppcielanes.h"
46
47#include "dce/dce_11_2_d.h"
48#include "dce/dce_11_2_sh_mask.h"
49
50#define PPVEGAM_TARGETACTIVITY_DFLT 50
51
52#define VOLTAGE_VID_OFFSET_SCALE1 625
53#define VOLTAGE_VID_OFFSET_SCALE2 100
54#define POWERTUNE_DEFAULT_SET_MAX 1
55#define VDDC_VDDCI_DELTA 200
56#define MC_CG_ARB_FREQ_F1 0x0b
57
58#define STRAP_ASIC_RO_LSB 2168
59#define STRAP_ASIC_RO_MSB 2175
60
61#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
62#define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305)
63
64static const struct vegam_pt_defaults
65vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
66 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
67 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
68 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
69 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
70 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
71};
72
73static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
74 {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
75 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
76 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
77 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
78 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
79 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
80 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
81 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
82
83static int vegam_smu_init(struct pp_hwmgr *hwmgr)
84{
85 struct vegam_smumgr *smu_data;
86
87 smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL);
88 if (smu_data == NULL)
89 return -ENOMEM;
90
91 hwmgr->smu_backend = smu_data;
92
93 if (smu7_init(hwmgr)) {
94 kfree(smu_data);
95 return -EINVAL;
96 }
97
98 return 0;
99}
100
101static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
102{
103 int result = 0;
104
105 /* Wait for smc boot up */
106 /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
107
108 /* Assert reset */
109 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
110 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
111
112 result = smu7_upload_smu_firmware_image(hwmgr);
113 if (result != 0)
114 return result;
115
116 /* Clear status */
117 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
118
119 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
120 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
121
122 /* De-assert reset */
123 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
124 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
125
126
127 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
128
129
130 /* Call Test SMU message with 0x20000 offset to trigger SMU start */
131 smu7_send_msg_to_smc_offset(hwmgr);
132
133 /* Wait done bit to be set */
134 /* Check pass/failed indicator */
135
136 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
137
138 if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
139 SMU_STATUS, SMU_PASS))
140 PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
141
142 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
143
144 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
145 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
146
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
148 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
149
150 /* Wait for firmware to initialize */
151 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
152
153 return result;
154}
155
156static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
157{
158 int result = 0;
159
160 /* wait for smc boot up */
161 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
162
163 /* Clear firmware interrupt enable flag */
164 /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
165 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
166 ixFIRMWARE_FLAGS, 0);
167
168 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
169 SMC_SYSCON_RESET_CNTL,
170 rst_reg, 1);
171
172 result = smu7_upload_smu_firmware_image(hwmgr);
173 if (result != 0)
174 return result;
175
176 /* Set smc instruct start point at 0x0 */
177 smu7_program_jump_on_start(hwmgr);
178
179 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
180 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
181
182 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
183 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
184
185 /* Wait for firmware to initialize */
186
187 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
188 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
189
190 return result;
191}
192
193static int vegam_start_smu(struct pp_hwmgr *hwmgr)
194{
195 int result = 0;
196 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
197
198 /* Only start SMC if SMC RAM is not running */
199 if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
200 smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
201 CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
202 smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
203 hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
204
205 /* Check if SMU is running in protected mode */
206 if (smu_data->protected_mode == 0)
207 result = vegam_start_smu_in_non_protection_mode(hwmgr);
208 else
209 result = vegam_start_smu_in_protection_mode(hwmgr);
210
211 if (result != 0)
212 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
213 }
214
215 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
216 smu7_read_smc_sram_dword(hwmgr,
217 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters),
218 &(smu_data->smu7_data.soft_regs_start),
219 0x40000);
220
221 result = smu7_request_smu_load_fw(hwmgr);
222
223 return result;
224}
225
226static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr)
227{
228 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
230 uint32_t tmp;
231 int result;
232 bool error = false;
233
234 result = smu7_read_smc_sram_dword(hwmgr,
235 SMU7_FIRMWARE_HEADER_LOCATION +
236 offsetof(SMU75_Firmware_Header, DpmTable),
237 &tmp, SMC_RAM_END);
238
239 if (0 == result)
240 smu_data->smu7_data.dpm_table_start = tmp;
241
242 error |= (0 != result);
243
244 result = smu7_read_smc_sram_dword(hwmgr,
245 SMU7_FIRMWARE_HEADER_LOCATION +
246 offsetof(SMU75_Firmware_Header, SoftRegisters),
247 &tmp, SMC_RAM_END);
248
249 if (!result) {
250 data->soft_regs_start = tmp;
251 smu_data->smu7_data.soft_regs_start = tmp;
252 }
253
254 error |= (0 != result);
255
256 result = smu7_read_smc_sram_dword(hwmgr,
257 SMU7_FIRMWARE_HEADER_LOCATION +
258 offsetof(SMU75_Firmware_Header, mcRegisterTable),
259 &tmp, SMC_RAM_END);
260
261 if (!result)
262 smu_data->smu7_data.mc_reg_table_start = tmp;
263
264 result = smu7_read_smc_sram_dword(hwmgr,
265 SMU7_FIRMWARE_HEADER_LOCATION +
266 offsetof(SMU75_Firmware_Header, FanTable),
267 &tmp, SMC_RAM_END);
268
269 if (!result)
270 smu_data->smu7_data.fan_table_start = tmp;
271
272 error |= (0 != result);
273
274 result = smu7_read_smc_sram_dword(hwmgr,
275 SMU7_FIRMWARE_HEADER_LOCATION +
276 offsetof(SMU75_Firmware_Header, mcArbDramTimingTable),
277 &tmp, SMC_RAM_END);
278
279 if (!result)
280 smu_data->smu7_data.arb_table_start = tmp;
281
282 error |= (0 != result);
283
284 result = smu7_read_smc_sram_dword(hwmgr,
285 SMU7_FIRMWARE_HEADER_LOCATION +
286 offsetof(SMU75_Firmware_Header, Version),
287 &tmp, SMC_RAM_END);
288
289 if (!result)
290 hwmgr->microcode_version_info.SMC = tmp;
291
292 error |= (0 != result);
293
294 return error ? -1 : 0;
295}
296
297static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr)
298{
299 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
300 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
301 ? true : false;
302}
303
304static uint32_t vegam_get_mac_definition(uint32_t value)
305{
306 switch (value) {
307 case SMU_MAX_LEVELS_GRAPHICS:
308 return SMU75_MAX_LEVELS_GRAPHICS;
309 case SMU_MAX_LEVELS_MEMORY:
310 return SMU75_MAX_LEVELS_MEMORY;
311 case SMU_MAX_LEVELS_LINK:
312 return SMU75_MAX_LEVELS_LINK;
313 case SMU_MAX_ENTRIES_SMIO:
314 return SMU75_MAX_ENTRIES_SMIO;
315 case SMU_MAX_LEVELS_VDDC:
316 return SMU75_MAX_LEVELS_VDDC;
317 case SMU_MAX_LEVELS_VDDGFX:
318 return SMU75_MAX_LEVELS_VDDGFX;
319 case SMU_MAX_LEVELS_VDDCI:
320 return SMU75_MAX_LEVELS_VDDCI;
321 case SMU_MAX_LEVELS_MVDD:
322 return SMU75_MAX_LEVELS_MVDD;
323 case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
324 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE |
325 SMU7_VCE_MCLK_HANDSHAKE_DISABLE;
326 }
327
328 pr_warn("can't get the mac of %x\n", value);
329 return 0;
330}
331
332static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
333{
334 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
335 uint32_t mm_boot_level_offset, mm_boot_level_value;
336 struct phm_ppt_v1_information *table_info =
337 (struct phm_ppt_v1_information *)(hwmgr->pptable);
338
339 smu_data->smc_state_table.UvdBootLevel = 0;
340 if (table_info->mm_dep_table->count > 0)
341 smu_data->smc_state_table.UvdBootLevel =
342 (uint8_t) (table_info->mm_dep_table->count - 1);
343 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable,
344 UvdBootLevel);
345 mm_boot_level_offset /= 4;
346 mm_boot_level_offset *= 4;
347 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
348 CGS_IND_REG__SMC, mm_boot_level_offset);
349 mm_boot_level_value &= 0x00FFFFFF;
350 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
351 cgs_write_ind_register(hwmgr->device,
352 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
353
354 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
355 PHM_PlatformCaps_UVDDPM) ||
356 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
357 PHM_PlatformCaps_StablePState))
358 smum_send_msg_to_smc_with_parameter(hwmgr,
359 PPSMC_MSG_UVDDPM_SetEnabledMask,
360 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
361 return 0;
362}
363
364static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
365{
366 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
367 uint32_t mm_boot_level_offset, mm_boot_level_value;
368 struct phm_ppt_v1_information *table_info =
369 (struct phm_ppt_v1_information *)(hwmgr->pptable);
370
371 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
372 PHM_PlatformCaps_StablePState))
373 smu_data->smc_state_table.VceBootLevel =
374 (uint8_t) (table_info->mm_dep_table->count - 1);
375 else
376 smu_data->smc_state_table.VceBootLevel = 0;
377
378 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
379 offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
380 mm_boot_level_offset /= 4;
381 mm_boot_level_offset *= 4;
382 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
383 CGS_IND_REG__SMC, mm_boot_level_offset);
384 mm_boot_level_value &= 0xFF00FFFF;
385 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
386 cgs_write_ind_register(hwmgr->device,
387 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
388
389 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
390 smum_send_msg_to_smc_with_parameter(hwmgr,
391 PPSMC_MSG_VCEDPM_SetEnabledMask,
392 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
393 return 0;
394}
395
396static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
397{
398 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
399 uint32_t mm_boot_level_offset, mm_boot_level_value;
400
401
402 smu_data->smc_state_table.SamuBootLevel = 0;
403 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
404 offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
405
406 mm_boot_level_offset /= 4;
407 mm_boot_level_offset *= 4;
408 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
409 CGS_IND_REG__SMC, mm_boot_level_offset);
410 mm_boot_level_value &= 0xFFFFFF00;
411 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
412 cgs_write_ind_register(hwmgr->device,
413 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
414
415 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
416 PHM_PlatformCaps_StablePState))
417 smum_send_msg_to_smc_with_parameter(hwmgr,
418 PPSMC_MSG_SAMUDPM_SetEnabledMask,
419 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
420 return 0;
421}
422
423
424static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
425{
426 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
427 struct phm_ppt_v1_information *table_info =
428 (struct phm_ppt_v1_information *)(hwmgr->pptable);
429 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
430 int max_entry, i;
431
432 max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ?
433 SMU75_MAX_LEVELS_LINK :
434 pcie_table->count;
435 /* Setup BIF_SCLK levels */
436 for (i = 0; i < max_entry; i++)
437 smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
438 return 0;
439}
440
441static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
442{
443 switch (type) {
444 case SMU_UVD_TABLE:
445 vegam_update_uvd_smc_table(hwmgr);
446 break;
447 case SMU_VCE_TABLE:
448 vegam_update_vce_smc_table(hwmgr);
449 break;
450 case SMU_SAMU_TABLE:
451 vegam_update_samu_smc_table(hwmgr);
452 break;
453 case SMU_BIF_TABLE:
454 vegam_update_bif_smc_table(hwmgr);
455 break;
456 default:
457 break;
458 }
459 return 0;
460}
461
462static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
463{
464 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
465 struct phm_ppt_v1_information *table_info =
466 (struct phm_ppt_v1_information *)(hwmgr->pptable);
467
468 if (table_info &&
469 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
470 table_info->cac_dtp_table->usPowerTuneDataSetID)
471 smu_data->power_tune_defaults =
472 &vegam_power_tune_data_set_array
473 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
474 else
475 smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0];
476
477}
478
479static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
480 SMU75_Discrete_DpmTable *table)
481{
482 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
483 uint32_t count, level;
484
485 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
486 count = data->mvdd_voltage_table.count;
487 if (count > SMU_MAX_SMIO_LEVELS)
488 count = SMU_MAX_SMIO_LEVELS;
489 for (level = 0; level < count; level++) {
490 table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
491 data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
492 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
493 table->SmioTable2.Pattern[level].Smio =
494 (uint8_t) level;
495 table->Smio[level] |=
496 data->mvdd_voltage_table.entries[level].smio_low;
497 }
498 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
499
500 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
501 }
502
503 return 0;
504}
505
506static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
507 struct SMU75_Discrete_DpmTable *table)
508{
509 uint32_t count, level;
510 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
511
512 count = data->vddci_voltage_table.count;
513
514 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
515 if (count > SMU_MAX_SMIO_LEVELS)
516 count = SMU_MAX_SMIO_LEVELS;
517 for (level = 0; level < count; ++level) {
518 table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
519 data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
520 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
521
522 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
523 }
524 }
525
526 table->SmioMask1 = data->vddci_voltage_table.mask_low;
527
528 return 0;
529}
530
531static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr,
532 struct SMU75_Discrete_DpmTable *table)
533{
534 uint32_t count;
535 uint8_t index;
536 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
537 struct phm_ppt_v1_information *table_info =
538 (struct phm_ppt_v1_information *)(hwmgr->pptable);
539 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
540 table_info->vddc_lookup_table;
541 /* tables is already swapped, so in order to use the value from it,
542 * we need to swap it back.
543 * We are populating vddc CAC data to BapmVddc table
544 * in split and merged mode
545 */
546 for (count = 0; count < lookup_table->count; count++) {
547 index = phm_get_voltage_index(lookup_table,
548 data->vddc_voltage_table.entries[count].value);
549 table->BapmVddcVidLoSidd[count] =
550 convert_to_vid(lookup_table->entries[index].us_cac_low);
551 table->BapmVddcVidHiSidd[count] =
552 convert_to_vid(lookup_table->entries[index].us_cac_mid);
553 table->BapmVddcVidHiSidd2[count] =
554 convert_to_vid(lookup_table->entries[index].us_cac_high);
555 }
556
557 return 0;
558}
559
560static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
561 struct SMU75_Discrete_DpmTable *table)
562{
563 vegam_populate_smc_vddci_table(hwmgr, table);
564 vegam_populate_smc_mvdd_table(hwmgr, table);
565 vegam_populate_cac_table(hwmgr, table);
566
567 return 0;
568}
569
570static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr,
571 struct SMU75_Discrete_Ulv *state)
572{
573 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
574 struct phm_ppt_v1_information *table_info =
575 (struct phm_ppt_v1_information *)(hwmgr->pptable);
576
577 state->CcPwrDynRm = 0;
578 state->CcPwrDynRm1 = 0;
579
580 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
581 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
582 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
583
584 state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
585
586 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
587 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
588 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
589
590 return 0;
591}
592
593static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr,
594 struct SMU75_Discrete_DpmTable *table)
595{
596 return vegam_populate_ulv_level(hwmgr, &table->Ulv);
597}
598
599static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr,
600 struct SMU75_Discrete_DpmTable *table)
601{
602 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
603 struct vegam_smumgr *smu_data =
604 (struct vegam_smumgr *)(hwmgr->smu_backend);
605 struct smu7_dpm_table *dpm_table = &data->dpm_table;
606 int i;
607
608 /* Index (dpm_table->pcie_speed_table.count)
609 * is reserved for PCIE boot level. */
610 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
611 table->LinkLevel[i].PcieGenSpeed =
612 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
613 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
614 dpm_table->pcie_speed_table.dpm_levels[i].param1);
615 table->LinkLevel[i].EnabledForActivity = 1;
616 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
617 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
618 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
619 }
620
621 smu_data->smc_state_table.LinkLevelCount =
622 (uint8_t)dpm_table->pcie_speed_table.count;
623
624/* To Do move to hwmgr */
625 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
626 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
627
628 return 0;
629}
630
631static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
632 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
633 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
634{
635 uint32_t i;
636 uint16_t vddci;
637 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
638
639 *voltage = *mvdd = 0;
640
641 /* clock - voltage dependency table is empty table */
642 if (dep_table->count == 0)
643 return -EINVAL;
644
645 for (i = 0; i < dep_table->count; i++) {
646 /* find first sclk bigger than request */
647 if (dep_table->entries[i].clk >= clock) {
648 *voltage |= (dep_table->entries[i].vddc *
649 VOLTAGE_SCALE) << VDDC_SHIFT;
650 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
651 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
652 VOLTAGE_SCALE) << VDDCI_SHIFT;
653 else if (dep_table->entries[i].vddci)
654 *voltage |= (dep_table->entries[i].vddci *
655 VOLTAGE_SCALE) << VDDCI_SHIFT;
656 else {
657 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
658 (dep_table->entries[i].vddc -
659 (uint16_t)VDDC_VDDCI_DELTA));
660 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
661 }
662
663 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
664 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
665 VOLTAGE_SCALE;
666 else if (dep_table->entries[i].mvdd)
667 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
668 VOLTAGE_SCALE;
669
670 *voltage |= 1 << PHASES_SHIFT;
671 return 0;
672 }
673 }
674
675 /* sclk is bigger than max sclk in the dependence table */
676 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
677 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
678 (dep_table->entries[i - 1].vddc -
679 (uint16_t)VDDC_VDDCI_DELTA));
680
681 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
682 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
683 VOLTAGE_SCALE) << VDDCI_SHIFT;
684 else if (dep_table->entries[i - 1].vddci)
685 *voltage |= (dep_table->entries[i - 1].vddci *
686 VOLTAGE_SCALE) << VDDC_SHIFT;
687 else
688 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
689
690 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
691 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
692 else if (dep_table->entries[i].mvdd)
693 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
694
695 return 0;
696}
697
698static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr,
699 SMU75_Discrete_DpmTable *table)
700{
701 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
702 uint32_t i, ref_clk;
703
704 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
705
706 ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
707
708 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
709 for (i = 0; i < NUM_SCLK_RANGE; i++) {
710 table->SclkFcwRangeTable[i].vco_setting =
711 range_table_from_vbios.entry[i].ucVco_setting;
712 table->SclkFcwRangeTable[i].postdiv =
713 range_table_from_vbios.entry[i].ucPostdiv;
714 table->SclkFcwRangeTable[i].fcw_pcc =
715 range_table_from_vbios.entry[i].usFcw_pcc;
716
717 table->SclkFcwRangeTable[i].fcw_trans_upper =
718 range_table_from_vbios.entry[i].usFcw_trans_upper;
719 table->SclkFcwRangeTable[i].fcw_trans_lower =
720 range_table_from_vbios.entry[i].usRcw_trans_lower;
721
722 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
723 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
724 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
725 }
726 return;
727 }
728
729 for (i = 0; i < NUM_SCLK_RANGE; i++) {
730 smu_data->range_table[i].trans_lower_frequency =
731 (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
732 smu_data->range_table[i].trans_upper_frequency =
733 (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
734
735 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
736 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
737 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
738
739 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
740 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
741
742 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
743 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
744 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
745 }
746}
747
748static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr,
749 uint32_t clock, SMU_SclkSetting *sclk_setting)
750{
751 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
752 const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
753 struct pp_atomctrl_clock_dividers_ai dividers;
754 uint32_t ref_clock;
755 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
756 uint8_t i;
757 int result;
758 uint64_t temp;
759
760 sclk_setting->SclkFrequency = clock;
761 /* get the engine clock dividers for this clock value */
762 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
763 if (result == 0) {
764 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
765 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
766 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
767 sclk_setting->PllRange = dividers.ucSclkPllRange;
768 sclk_setting->Sclk_slew_rate = 0x400;
769 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
770 sclk_setting->Pcc_down_slew_rate = 0xffff;
771 sclk_setting->SSc_En = dividers.ucSscEnable;
772 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
773 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
774 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
775 return result;
776 }
777
778 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
779
780 for (i = 0; i < NUM_SCLK_RANGE; i++) {
781 if (clock > smu_data->range_table[i].trans_lower_frequency
782 && clock <= smu_data->range_table[i].trans_upper_frequency) {
783 sclk_setting->PllRange = i;
784 break;
785 }
786 }
787
788 sclk_setting->Fcw_int = (uint16_t)
789 ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
790 ref_clock);
791 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
792 temp <<= 0x10;
793 do_div(temp, ref_clock);
794 sclk_setting->Fcw_frac = temp & 0xffff;
795
796 pcc_target_percent = 10; /* Hardcode 10% for now. */
797 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
798 sclk_setting->Pcc_fcw_int = (uint16_t)
799 ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
800 ref_clock);
801
802 ss_target_percent = 2; /* Hardcode 2% for now. */
803 sclk_setting->SSc_En = 0;
804 if (ss_target_percent) {
805 sclk_setting->SSc_En = 1;
806 ss_target_freq = clock - (clock * ss_target_percent / 100);
807 sclk_setting->Fcw1_int = (uint16_t)
808 ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
809 ref_clock);
810 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
811 temp <<= 0x10;
812 do_div(temp, ref_clock);
813 sclk_setting->Fcw1_frac = temp & 0xffff;
814 }
815
816 return 0;
817}
818
819static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock,
820 uint32_t clock_insr)
821{
822 uint8_t i;
823 uint32_t temp;
824 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
825
826 PP_ASSERT_WITH_CODE((clock >= min),
827 "Engine clock can't satisfy stutter requirement!",
828 return 0);
829 for (i = 31; ; i--) {
830 temp = clock / (i + 1);
831
832 if (temp >= min || i == 0)
833 break;
834 }
835 return i;
836}
837
838static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
839 uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level)
840{
841 int result;
842 /* PP_Clocks minClocks; */
843 uint32_t mvdd;
844 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
845 struct phm_ppt_v1_information *table_info =
846 (struct phm_ppt_v1_information *)(hwmgr->pptable);
847 SMU_SclkSetting curr_sclk_setting = { 0 };
848
849 result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
850
851 /* populate graphics levels */
852 result = vegam_get_dependency_volt_by_clk(hwmgr,
853 table_info->vdd_dep_on_sclk, clock,
854 &level->MinVoltage, &mvdd);
855
856 PP_ASSERT_WITH_CODE((0 == result),
857 "can not find VDDC voltage value for "
858 "VDDC engine clock dependency table",
859 return result);
860 level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
861
862 level->CcPwrDynRm = 0;
863 level->CcPwrDynRm1 = 0;
864 level->EnabledForActivity = 0;
865 level->EnabledForThrottle = 1;
866 level->VoltageDownHyst = 0;
867 level->PowerThrottle = 0;
868 data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
869
870 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
871 level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock,
872 hwmgr->display_config->min_core_set_clock_in_sr);
873
874 level->SclkSetting = curr_sclk_setting;
875
876 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
877 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
878 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
879 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
880 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
881 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
882 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
883 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
884 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
885 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
886 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
887 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
888 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
889 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
890 return 0;
891}
892
893static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
894{
895 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
896 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
897 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
898 struct phm_ppt_v1_information *table_info =
899 (struct phm_ppt_v1_information *)(hwmgr->pptable);
900 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
901 uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
902 int result = 0;
903 uint32_t array = smu_data->smu7_data.dpm_table_start +
904 offsetof(SMU75_Discrete_DpmTable, GraphicsLevel);
905 uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) *
906 SMU75_MAX_LEVELS_GRAPHICS;
907 struct SMU75_Discrete_GraphicsLevel *levels =
908 smu_data->smc_state_table.GraphicsLevel;
909 uint32_t i, max_entry;
910 uint8_t hightest_pcie_level_enabled = 0,
911 lowest_pcie_level_enabled = 0,
912 mid_pcie_level_enabled = 0,
913 count = 0;
914
915 vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
916
917 for (i = 0; i < dpm_table->sclk_table.count; i++) {
918
919 result = vegam_populate_single_graphic_level(hwmgr,
920 dpm_table->sclk_table.dpm_levels[i].value,
921 &(smu_data->smc_state_table.GraphicsLevel[i]));
922 if (result)
923 return result;
924
925 levels[i].UpHyst = (uint8_t)
926 (SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
927 levels[i].DownHyst = (uint8_t)
928 (SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
929 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
930 if (i > 1)
931 levels[i].DeepSleepDivId = 0;
932 }
933 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
934 PHM_PlatformCaps_SPLLShutdownSupport))
935 smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
936
937 smu_data->smc_state_table.GraphicsDpmLevelCount =
938 (uint8_t)dpm_table->sclk_table.count;
939 hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
940 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
941
942 for (i = 0; i < dpm_table->sclk_table.count; i++)
943 levels[i].EnabledForActivity =
944 (hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1;
945
946 if (pcie_table != NULL) {
947 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
948 "There must be 1 or more PCIE levels defined in PPTable.",
949 return -EINVAL);
950 max_entry = pcie_entry_cnt - 1;
951 for (i = 0; i < dpm_table->sclk_table.count; i++)
952 levels[i].pcieDpmLevel =
953 (uint8_t) ((i < max_entry) ? i : max_entry);
954 } else {
955 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
956 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
957 (1 << (hightest_pcie_level_enabled + 1))) != 0))
958 hightest_pcie_level_enabled++;
959
960 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
961 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
962 (1 << lowest_pcie_level_enabled)) == 0))
963 lowest_pcie_level_enabled++;
964
965 while ((count < hightest_pcie_level_enabled) &&
966 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
967 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
968 count++;
969
970 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
971 hightest_pcie_level_enabled ?
972 (lowest_pcie_level_enabled + 1 + count) :
973 hightest_pcie_level_enabled;
974
975 /* set pcieDpmLevel to hightest_pcie_level_enabled */
976 for (i = 2; i < dpm_table->sclk_table.count; i++)
977 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
978
979 /* set pcieDpmLevel to lowest_pcie_level_enabled */
980 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
981
982 /* set pcieDpmLevel to mid_pcie_level_enabled */
983 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
984 }
985 /* level count will send to smc once at init smc table and never change */
986 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
987 (uint32_t)array_size, SMC_RAM_END);
988
989 return result;
990}
991
992static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr,
993 uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
994{
995 struct pp_atomctrl_memory_clock_param_ai mpll_param;
996
997 PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr,
998 clock, &mpll_param),
999 "Failed to retrieve memory pll parameter.",
1000 return -EINVAL);
1001
1002 mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock;
1003 mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int;
1004 mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac;
1005 mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv;
1006
1007 return 0;
1008}
1009
1010static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1011 uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
1012{
1013 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1014 struct phm_ppt_v1_information *table_info =
1015 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1016 int result = 0;
1017 uint32_t mclk_stutter_mode_threshold = 60000;
1018
1019
1020 if (table_info->vdd_dep_on_mclk) {
1021 result = vegam_get_dependency_volt_by_clk(hwmgr,
1022 table_info->vdd_dep_on_mclk, clock,
1023 &mem_level->MinVoltage, &mem_level->MinMvdd);
1024 PP_ASSERT_WITH_CODE(!result,
1025 "can not find MinVddc voltage value from memory "
1026 "VDDC voltage dependency table", return result);
1027 }
1028
1029 result = vegam_calculate_mclk_params(hwmgr, clock, mem_level);
1030 PP_ASSERT_WITH_CODE(!result,
1031 "Failed to calculate mclk params.",
1032 return -EINVAL);
1033
1034 mem_level->EnabledForThrottle = 1;
1035 mem_level->EnabledForActivity = 0;
1036 mem_level->VoltageDownHyst = 0;
1037 mem_level->ActivityLevel = (uint16_t)
1038 (MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
1039 mem_level->StutterEnable = false;
1040 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1041
1042 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1043
1044 if (mclk_stutter_mode_threshold &&
1045 (clock <= mclk_stutter_mode_threshold) &&
1046 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1047 STUTTER_ENABLE) & 0x1))
1048 mem_level->StutterEnable = true;
1049
1050 if (!result) {
1051 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1052 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1053 CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int);
1054 CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac);
1055 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1056 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1057 }
1058
1059 return result;
1060}
1061
1062static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1063{
1064 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1065 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1066 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
1067 int result;
1068 /* populate MCLK dpm table to SMU7 */
1069 uint32_t array = smu_data->smu7_data.dpm_table_start +
1070 offsetof(SMU75_Discrete_DpmTable, MemoryLevel);
1071 uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) *
1072 SMU75_MAX_LEVELS_MEMORY;
1073 struct SMU75_Discrete_MemoryLevel *levels =
1074 smu_data->smc_state_table.MemoryLevel;
1075 uint32_t i;
1076
1077 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1078 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1079 "can not populate memory level as memory clock is zero",
1080 return -EINVAL);
1081 result = vegam_populate_single_memory_level(hwmgr,
1082 dpm_table->mclk_table.dpm_levels[i].value,
1083 &levels[i]);
1084
1085 if (result)
1086 return result;
1087
1088 levels[i].UpHyst = (uint8_t)
1089 (MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
1090 levels[i].DownHyst = (uint8_t)
1091 (MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
1092 }
1093
1094 smu_data->smc_state_table.MemoryDpmLevelCount =
1095 (uint8_t)dpm_table->mclk_table.count;
1096 hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1097 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1098
1099 for (i = 0; i < dpm_table->mclk_table.count; i++)
1100 levels[i].EnabledForActivity =
1101 (hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1;
1102
1103 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
1104 PPSMC_DISPLAY_WATERMARK_HIGH;
1105
1106 /* level count will send to smc once at init smc table and never change */
1107 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1108 (uint32_t)array_size, SMC_RAM_END);
1109
1110 return result;
1111}
1112
1113static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1114 uint32_t mclk, SMIO_Pattern *smio_pat)
1115{
1116 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1117 struct phm_ppt_v1_information *table_info =
1118 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1119 uint32_t i = 0;
1120
1121 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1122 /* find mvdd value which clock is more than request */
1123 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1124 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1125 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1126 break;
1127 }
1128 }
1129 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1130 "MVDD Voltage is outside the supported range.",
1131 return -EINVAL);
1132 } else
1133 return -EINVAL;
1134
1135 return 0;
1136}
1137
1138static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1139 SMU75_Discrete_DpmTable *table)
1140{
1141 int result = 0;
1142 uint32_t sclk_frequency;
1143 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1144 struct phm_ppt_v1_information *table_info =
1145 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1146 SMIO_Pattern vol_level;
1147 uint32_t mvdd;
1148 uint16_t us_mvdd;
1149
1150 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1151
1152 /* Get MinVoltage and Frequency from DPM0,
1153 * already converted to SMC_UL */
1154 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1155 result = vegam_get_dependency_volt_by_clk(hwmgr,
1156 table_info->vdd_dep_on_sclk,
1157 sclk_frequency,
1158 &table->ACPILevel.MinVoltage, &mvdd);
1159 PP_ASSERT_WITH_CODE(!result,
1160 "Cannot find ACPI VDDC voltage value "
1161 "in Clock Dependency Table",
1162 );
1163
1164 result = vegam_calculate_sclk_params(hwmgr, sclk_frequency,
1165 &(table->ACPILevel.SclkSetting));
1166 PP_ASSERT_WITH_CODE(!result,
1167 "Error retrieving Engine Clock dividers from VBIOS.",
1168 return result);
1169
1170 table->ACPILevel.DeepSleepDivId = 0;
1171 table->ACPILevel.CcPwrDynRm = 0;
1172 table->ACPILevel.CcPwrDynRm1 = 0;
1173
1174 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1175 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1176 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1177 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1178
1179 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1180 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1181 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1182 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1183 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1184 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1185 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1186 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1187 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1188 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1189
1190
1191 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1192 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1193 result = vegam_get_dependency_volt_by_clk(hwmgr,
1194 table_info->vdd_dep_on_mclk,
1195 table->MemoryACPILevel.MclkFrequency,
1196 &table->MemoryACPILevel.MinVoltage, &mvdd);
1197 PP_ASSERT_WITH_CODE((0 == result),
1198 "Cannot find ACPI VDDCI voltage value "
1199 "in Clock Dependency Table",
1200 );
1201
1202 us_mvdd = 0;
1203 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1204 (data->mclk_dpm_key_disabled))
1205 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1206 else {
1207 if (!vegam_populate_mvdd_value(hwmgr,
1208 data->dpm_table.mclk_table.dpm_levels[0].value,
1209 &vol_level))
1210 us_mvdd = vol_level.Voltage;
1211 }
1212
1213 if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
1214 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1215 else
1216 table->MemoryACPILevel.MinMvdd = 0;
1217
1218 table->MemoryACPILevel.StutterEnable = false;
1219
1220 table->MemoryACPILevel.EnabledForThrottle = 0;
1221 table->MemoryACPILevel.EnabledForActivity = 0;
1222 table->MemoryACPILevel.UpHyst = 0;
1223 table->MemoryACPILevel.DownHyst = 100;
1224 table->MemoryACPILevel.VoltageDownHyst = 0;
1225 table->MemoryACPILevel.ActivityLevel =
1226 PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1227
1228 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1229 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1230
1231 return result;
1232}
1233
1234static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1235 SMU75_Discrete_DpmTable *table)
1236{
1237 int result = -EINVAL;
1238 uint8_t count;
1239 struct pp_atomctrl_clock_dividers_vi dividers;
1240 struct phm_ppt_v1_information *table_info =
1241 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1242 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1243 table_info->mm_dep_table;
1244 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1245 uint32_t vddci;
1246
1247 table->VceLevelCount = (uint8_t)(mm_table->count);
1248 table->VceBootLevel = 0;
1249
1250 for (count = 0; count < table->VceLevelCount; count++) {
1251 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1252 table->VceLevel[count].MinVoltage = 0;
1253 table->VceLevel[count].MinVoltage |=
1254 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1255
1256 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1257 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1258 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1259 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1260 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1261 else
1262 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1263
1264
1265 table->VceLevel[count].MinVoltage |=
1266 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1267 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1268
1269 /*retrieve divider value for VBIOS */
1270 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1271 table->VceLevel[count].Frequency, &dividers);
1272 PP_ASSERT_WITH_CODE((0 == result),
1273 "can not find divide id for VCE engine clock",
1274 return result);
1275
1276 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1277
1278 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1279 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1280 }
1281 return result;
1282}
1283
1284static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1285 SMU75_Discrete_DpmTable *table)
1286{
1287 int result = -EINVAL;
1288 uint8_t count;
1289 struct pp_atomctrl_clock_dividers_vi dividers;
1290 struct phm_ppt_v1_information *table_info =
1291 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1292 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1293 table_info->mm_dep_table;
1294 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1295 uint32_t vddci;
1296
1297 table->SamuBootLevel = 0;
1298 table->SamuLevelCount = (uint8_t)(mm_table->count);
1299
1300 for (count = 0; count < table->SamuLevelCount; count++) {
1301 /* not sure whether we need evclk or not */
1302 table->SamuLevel[count].MinVoltage = 0;
1303 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1304 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1305 VOLTAGE_SCALE) << VDDC_SHIFT;
1306
1307 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1308 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1309 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1310 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1311 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1312 else
1313 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1314
1315 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1316 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1317
1318 /* retrieve divider value for VBIOS */
1319 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1320 table->SamuLevel[count].Frequency, &dividers);
1321 PP_ASSERT_WITH_CODE((0 == result),
1322 "can not find divide id for samu clock", return result);
1323
1324 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1325
1326 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1327 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1328 }
1329 return result;
1330}
1331
1332static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1333 int32_t eng_clock, int32_t mem_clock,
1334 SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
1335{
1336 uint32_t dram_timing;
1337 uint32_t dram_timing2;
1338 uint32_t burst_time;
1339 uint32_t rfsh_rate;
1340 uint32_t misc3;
1341
1342 int result;
1343
1344 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1345 eng_clock, mem_clock);
1346 PP_ASSERT_WITH_CODE(result == 0,
1347 "Error calling VBIOS to set DRAM_TIMING.",
1348 return result);
1349
1350 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1351 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1352 burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
1353 rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE);
1354 misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3);
1355
1356 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1357 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1358 arb_regs->McArbBurstTime = PP_HOST_TO_SMC_UL(burst_time);
1359 arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate);
1360 arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3);
1361
1362 return 0;
1363}
1364
1365static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1366{
1367 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1368 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1369 struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
1370 uint32_t i, j;
1371 int result = 0;
1372
1373 for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1374 for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1375 result = vegam_populate_memory_timing_parameters(hwmgr,
1376 hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1377 hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1378 &arb_regs.entries[i][j]);
1379 if (result)
1380 return result;
1381 }
1382 }
1383
1384 result = smu7_copy_bytes_to_smc(
1385 hwmgr,
1386 smu_data->smu7_data.arb_table_start,
1387 (uint8_t *)&arb_regs,
1388 sizeof(SMU75_Discrete_MCArbDramTimingTable),
1389 SMC_RAM_END);
1390 return result;
1391}
1392
1393static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1394 struct SMU75_Discrete_DpmTable *table)
1395{
1396 int result = -EINVAL;
1397 uint8_t count;
1398 struct pp_atomctrl_clock_dividers_vi dividers;
1399 struct phm_ppt_v1_information *table_info =
1400 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1401 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1402 table_info->mm_dep_table;
1403 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1404 uint32_t vddci;
1405
1406 table->UvdLevelCount = (uint8_t)(mm_table->count);
1407 table->UvdBootLevel = 0;
1408
1409 for (count = 0; count < table->UvdLevelCount; count++) {
1410 table->UvdLevel[count].MinVoltage = 0;
1411 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1412 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1413 table->UvdLevel[count].MinVoltage |=
1414 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1415
1416 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1417 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1418 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1419 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1420 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1421 else
1422 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1423
1424 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1425 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1426
1427 /* retrieve divider value for VBIOS */
1428 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1429 table->UvdLevel[count].VclkFrequency, &dividers);
1430 PP_ASSERT_WITH_CODE((0 == result),
1431 "can not find divide id for Vclk clock", return result);
1432
1433 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1434
1435 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1436 table->UvdLevel[count].DclkFrequency, &dividers);
1437 PP_ASSERT_WITH_CODE((0 == result),
1438 "can not find divide id for Dclk clock", return result);
1439
1440 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1441
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1445 }
1446
1447 return result;
1448}
1449
1450static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1451 struct SMU75_Discrete_DpmTable *table)
1452{
1453 int result = 0;
1454 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1455
1456 table->GraphicsBootLevel = 0;
1457 table->MemoryBootLevel = 0;
1458
1459 /* find boot level from dpm table */
1460 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1461 data->vbios_boot_state.sclk_bootup_value,
1462 (uint32_t *)&(table->GraphicsBootLevel));
1463
1464 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1465 data->vbios_boot_state.mclk_bootup_value,
1466 (uint32_t *)&(table->MemoryBootLevel));
1467
1468 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1469 VOLTAGE_SCALE;
1470 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1471 VOLTAGE_SCALE;
1472 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1473 VOLTAGE_SCALE;
1474
1475 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1476 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1477 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1478
1479 return 0;
1480}
1481
1482static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1483{
1484 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1485 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1486 struct phm_ppt_v1_information *table_info =
1487 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1488 uint8_t count, level;
1489
1490 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1491
1492 for (level = 0; level < count; level++) {
1493 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1494 hw_data->vbios_boot_state.sclk_bootup_value) {
1495 smu_data->smc_state_table.GraphicsBootLevel = level;
1496 break;
1497 }
1498 }
1499
1500 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1501 for (level = 0; level < count; level++) {
1502 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1503 hw_data->vbios_boot_state.mclk_bootup_value) {
1504 smu_data->smc_state_table.MemoryBootLevel = level;
1505 break;
1506 }
1507 }
1508
1509 return 0;
1510}
1511
1512static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
1513{
1514 uint32_t tmp;
1515 tmp = raw_setting * 4096 / 100;
1516 return (uint16_t)tmp;
1517}
1518
1519static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1520{
1521 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1522
1523 const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
1524 SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1525 struct phm_ppt_v1_information *table_info =
1526 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1527 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
1528 struct pp_advance_fan_control_parameters *fan_table =
1529 &hwmgr->thermal_controller.advanceFanControlParameters;
1530 int i, j, k;
1531 const uint16_t *pdef1;
1532 const uint16_t *pdef2;
1533
1534 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
1535 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
1536
1537 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
1538 "Target Operating Temp is out of Range!",
1539 );
1540
1541 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
1542 cac_dtp_table->usTargetOperatingTemp * 256);
1543 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
1544 cac_dtp_table->usTemperatureLimitHotspot * 256);
1545 table->FanGainEdge = PP_HOST_TO_SMC_US(
1546 scale_fan_gain_settings(fan_table->usFanGainEdge));
1547 table->FanGainHotspot = PP_HOST_TO_SMC_US(
1548 scale_fan_gain_settings(fan_table->usFanGainHotspot));
1549
1550 pdef1 = defaults->BAPMTI_R;
1551 pdef2 = defaults->BAPMTI_RC;
1552
1553 for (i = 0; i < SMU75_DTE_ITERATIONS; i++) {
1554 for (j = 0; j < SMU75_DTE_SOURCES; j++) {
1555 for (k = 0; k < SMU75_DTE_SINKS; k++) {
1556 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
1557 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
1558 pdef1++;
1559 pdef2++;
1560 }
1561 }
1562 }
1563
1564 return 0;
1565}
1566
1567static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1568{
1569 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1570 struct vegam_smumgr *smu_data =
1571 (struct vegam_smumgr *)(hwmgr->smu_backend);
1572
1573 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1574 struct phm_ppt_v1_information *table_info =
1575 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1576 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1577 table_info->vdd_dep_on_sclk;
1578 uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1;
1579
1580 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1581
1582 atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB,
1583 mask, &efuse);
1584
1585 min = 1200;
1586 max = 2500;
1587
1588 ro = efuse * (max - min) / 255 + min;
1589
1590 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1591 for (i = 0; i < sclk_table->count; i++) {
1592 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1593 sclk_table->entries[i].cks_enable << i;
1594 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) *
1595 136418 - (ro - 70) * 1000000) /
1596 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1597 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 *
1598 3232 - (ro - 65) * 1000000) /
1599 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1600
1601 if (volt_without_cks >= volt_with_cks)
1602 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1603 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1604
1605 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1606 }
1607
1608 smu_data->smc_state_table.LdoRefSel =
1609 (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
1610 table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
1611 /* Populate CKS Lookup Table */
1612 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1613 stretch_amount2 = 0;
1614 else if (stretch_amount == 3 || stretch_amount == 4)
1615 stretch_amount2 = 1;
1616 else {
1617 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1618 PHM_PlatformCaps_ClockStretcher);
1619 PP_ASSERT_WITH_CODE(false,
1620 "Stretch Amount in PPTable not supported\n",
1621 return -EINVAL);
1622 }
1623
1624 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1625 value &= 0xFFFFFFFE;
1626 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1627
1628 return 0;
1629}
1630
1631static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
1632{
1633 uint32_t efuse;
1634
1635 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1636 ixSMU_EFUSE_0 + (49 * 4));
1637 efuse &= 0x00000001;
1638
1639 if (efuse)
1640 return true;
1641
1642 return false;
1643}
1644
1645static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1646{
1647 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1648 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1649
1650 SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1651 int result = 0;
1652 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1653 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1654 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1655 uint32_t tmp, i;
1656
1657 struct phm_ppt_v1_information *table_info =
1658 (struct phm_ppt_v1_information *)hwmgr->pptable;
1659 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1660 table_info->vdd_dep_on_sclk;
1661
1662 if (!hwmgr->avfs_supported)
1663 return 0;
1664
1665 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1666
1667 if (0 == result) {
1668 table->BTCGB_VDROOP_TABLE[0].a0 =
1669 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1670 table->BTCGB_VDROOP_TABLE[0].a1 =
1671 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1672 table->BTCGB_VDROOP_TABLE[0].a2 =
1673 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1674 table->BTCGB_VDROOP_TABLE[1].a0 =
1675 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1676 table->BTCGB_VDROOP_TABLE[1].a1 =
1677 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1678 table->BTCGB_VDROOP_TABLE[1].a2 =
1679 PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1680 table->AVFSGB_FUSE_TABLE[0].m1 =
1681 PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1682 table->AVFSGB_FUSE_TABLE[0].m2 =
1683 PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1684 table->AVFSGB_FUSE_TABLE[0].b =
1685 PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1686 table->AVFSGB_FUSE_TABLE[0].m1_shift = 24;
1687 table->AVFSGB_FUSE_TABLE[0].m2_shift = 12;
1688 table->AVFSGB_FUSE_TABLE[1].m1 =
1689 PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1690 table->AVFSGB_FUSE_TABLE[1].m2 =
1691 PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1692 table->AVFSGB_FUSE_TABLE[1].b =
1693 PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1694 table->AVFSGB_FUSE_TABLE[1].m1_shift = 24;
1695 table->AVFSGB_FUSE_TABLE[1].m2_shift = 12;
1696 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1697 AVFS_meanNsigma.Aconstant[0] =
1698 PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1699 AVFS_meanNsigma.Aconstant[1] =
1700 PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1701 AVFS_meanNsigma.Aconstant[2] =
1702 PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1703 AVFS_meanNsigma.DC_tol_sigma =
1704 PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1705 AVFS_meanNsigma.Platform_mean =
1706 PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1707 AVFS_meanNsigma.PSM_Age_CompFactor =
1708 PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1709 AVFS_meanNsigma.Platform_sigma =
1710 PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1711
1712 for (i = 0; i < sclk_table->count; i++) {
1713 AVFS_meanNsigma.Static_Voltage_Offset[i] =
1714 (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1715 AVFS_SclkOffset.Sclk_Offset[i] =
1716 PP_HOST_TO_SMC_US((uint16_t)
1717 (sclk_table->entries[i].sclk_offset) / 100);
1718 }
1719
1720 result = smu7_read_smc_sram_dword(hwmgr,
1721 SMU7_FIRMWARE_HEADER_LOCATION +
1722 offsetof(SMU75_Firmware_Header, AvfsMeanNSigma),
1723 &tmp, SMC_RAM_END);
1724 smu7_copy_bytes_to_smc(hwmgr,
1725 tmp,
1726 (uint8_t *)&AVFS_meanNsigma,
1727 sizeof(AVFS_meanNsigma_t),
1728 SMC_RAM_END);
1729
1730 result = smu7_read_smc_sram_dword(hwmgr,
1731 SMU7_FIRMWARE_HEADER_LOCATION +
1732 offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable),
1733 &tmp, SMC_RAM_END);
1734 smu7_copy_bytes_to_smc(hwmgr,
1735 tmp,
1736 (uint8_t *)&AVFS_SclkOffset,
1737 sizeof(AVFS_Sclk_Offset_t),
1738 SMC_RAM_END);
1739
1740 data->avfs_vdroop_override_setting =
1741 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1742 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1743 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1744 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1745 data->apply_avfs_cks_off_voltage =
1746 (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1747 }
1748 return result;
1749}
1750
1751static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr,
1752 struct SMU75_Discrete_DpmTable *table)
1753{
1754 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1755 struct vegam_smumgr *smu_data =
1756 (struct vegam_smumgr *)(hwmgr->smu_backend);
1757 uint16_t config;
1758
1759 config = VR_MERGED_WITH_VDDC;
1760 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1761
1762 /* Set Vddc Voltage Controller */
1763 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1764 config = VR_SVI2_PLANE_1;
1765 table->VRConfig |= config;
1766 } else {
1767 PP_ASSERT_WITH_CODE(false,
1768 "VDDC should be on SVI2 control in merged mode!",
1769 );
1770 }
1771 /* Set Vddci Voltage Controller */
1772 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1773 config = VR_SVI2_PLANE_2; /* only in merged mode */
1774 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1775 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1776 config = VR_SMIO_PATTERN_1;
1777 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1778 } else {
1779 config = VR_STATIC_VOLTAGE;
1780 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1781 }
1782 /* Set Mvdd Voltage Controller */
1783 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1784 if (config != VR_SVI2_PLANE_2) {
1785 config = VR_SVI2_PLANE_2;
1786 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1787 cgs_write_ind_register(hwmgr->device,
1788 CGS_IND_REG__SMC,
1789 smu_data->smu7_data.soft_regs_start +
1790 offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
1791 0x1);
1792 } else {
1793 PP_ASSERT_WITH_CODE(false,
1794 "SVI2 Plane 2 is already taken, set MVDD as Static",);
1795 config = VR_STATIC_VOLTAGE;
1796 table->VRConfig = (config << VRCONF_MVDD_SHIFT);
1797 }
1798 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1799 config = VR_SMIO_PATTERN_2;
1800 table->VRConfig = (config << VRCONF_MVDD_SHIFT);
1801 cgs_write_ind_register(hwmgr->device,
1802 CGS_IND_REG__SMC,
1803 smu_data->smu7_data.soft_regs_start +
1804 offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
1805 0x1);
1806 } else {
1807 config = VR_STATIC_VOLTAGE;
1808 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1809 }
1810
1811 return 0;
1812}
1813
1814static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr)
1815{
1816 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1817 const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
1818
1819 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
1820 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
1821 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
1822 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
1823
1824 return 0;
1825}
1826
1827static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr)
1828{
1829 uint16_t tdc_limit;
1830 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1831 struct phm_ppt_v1_information *table_info =
1832 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1833 const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
1834
1835 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
1836 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
1837 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
1838 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
1839 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
1840 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
1841
1842 return 0;
1843}
1844
1845static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
1846{
1847 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1848 const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
1849 uint32_t temp;
1850
1851 if (smu7_read_smc_sram_dword(hwmgr,
1852 fuse_table_offset +
1853 offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl),
1854 (uint32_t *)&temp, SMC_RAM_END))
1855 PP_ASSERT_WITH_CODE(false,
1856 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
1857 return -EINVAL);
1858 else {
1859 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
1860 smu_data->power_tune_table.LPMLTemperatureMin =
1861 (uint8_t)((temp >> 16) & 0xff);
1862 smu_data->power_tune_table.LPMLTemperatureMax =
1863 (uint8_t)((temp >> 8) & 0xff);
1864 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
1865 }
1866 return 0;
1867}
1868
1869static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
1870{
1871 int i;
1872 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1873
1874 /* Currently not used. Set all to zero. */
1875 for (i = 0; i < 16; i++)
1876 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
1877
1878 return 0;
1879}
1880
1881static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
1882{
1883 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1884
1885/* TO DO move to hwmgr */
1886 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
1887 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
1888 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
1889 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
1890
1891 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
1892 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
1893 return 0;
1894}
1895
1896static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
1897{
1898 int i;
1899 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1900
1901 /* Currently not used. Set all to zero. */
1902 for (i = 0; i < 16; i++)
1903 smu_data->power_tune_table.GnbLPML[i] = 0;
1904
1905 return 0;
1906}
1907
1908static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
1909{
1910 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1911 struct phm_ppt_v1_information *table_info =
1912 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1913 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
1914 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
1915 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
1916
1917 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
1918 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
1919
1920 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
1921 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
1922 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
1923 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
1924
1925 return 0;
1926}
1927
1928static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr)
1929{
1930 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
1931 uint32_t pm_fuse_table_offset;
1932
1933 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1934 PHM_PlatformCaps_PowerContainment)) {
1935 if (smu7_read_smc_sram_dword(hwmgr,
1936 SMU7_FIRMWARE_HEADER_LOCATION +
1937 offsetof(SMU75_Firmware_Header, PmFuseTable),
1938 &pm_fuse_table_offset, SMC_RAM_END))
1939 PP_ASSERT_WITH_CODE(false,
1940 "Attempt to get pm_fuse_table_offset Failed!",
1941 return -EINVAL);
1942
1943 if (vegam_populate_svi_load_line(hwmgr))
1944 PP_ASSERT_WITH_CODE(false,
1945 "Attempt to populate SviLoadLine Failed!",
1946 return -EINVAL);
1947
1948 if (vegam_populate_tdc_limit(hwmgr))
1949 PP_ASSERT_WITH_CODE(false,
1950 "Attempt to populate TDCLimit Failed!", return -EINVAL);
1951
1952 if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset))
1953 PP_ASSERT_WITH_CODE(false,
1954 "Attempt to populate TdcWaterfallCtl, "
1955 "LPMLTemperature Min and Max Failed!",
1956 return -EINVAL);
1957
1958 if (0 != vegam_populate_temperature_scaler(hwmgr))
1959 PP_ASSERT_WITH_CODE(false,
1960 "Attempt to populate LPMLTemperatureScaler Failed!",
1961 return -EINVAL);
1962
1963 if (vegam_populate_fuzzy_fan(hwmgr))
1964 PP_ASSERT_WITH_CODE(false,
1965 "Attempt to populate Fuzzy Fan Control parameters Failed!",
1966 return -EINVAL);
1967
1968 if (vegam_populate_gnb_lpml(hwmgr))
1969 PP_ASSERT_WITH_CODE(false,
1970 "Attempt to populate GnbLPML Failed!",
1971 return -EINVAL);
1972
1973 if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr))
1974 PP_ASSERT_WITH_CODE(false,
1975 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
1976 "Sidd Failed!", return -EINVAL);
1977
1978 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
1979 (uint8_t *)&smu_data->power_tune_table,
1980 (sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE),
1981 SMC_RAM_END))
1982 PP_ASSERT_WITH_CODE(false,
1983 "Attempt to download PmFuseTable Failed!",
1984 return -EINVAL);
1985 }
1986 return 0;
1987}
1988
1989static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
1990{
1991 struct amdgpu_device *adev = hwmgr->adev;
1992
1993 smum_send_msg_to_smc_with_parameter(hwmgr,
1994 PPSMC_MSG_EnableModeSwitchRLCNotification,
1995 adev->gfx.cu_info.number);
1996
1997 return 0;
1998}
1999
2000static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
2001{
2002 int result;
2003 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
2004 struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
2005
2006 struct phm_ppt_v1_information *table_info =
2007 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2008 struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2009 uint8_t i;
2010 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2011 struct phm_ppt_v1_gpio_table *gpio_table =
2012 (struct phm_ppt_v1_gpio_table *)table_info->gpio_table;
2013 pp_atomctrl_clock_dividers_vi dividers;
2014
2015 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2016 PHM_PlatformCaps_AutomaticDCTransition);
2017
2018 vegam_initialize_power_tune_defaults(hwmgr);
2019
2020 if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
2021 vegam_populate_smc_voltage_tables(hwmgr, table);
2022
2023 table->SystemFlags = 0;
2024 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2025 PHM_PlatformCaps_AutomaticDCTransition))
2026 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2027
2028 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2029 PHM_PlatformCaps_StepVddc))
2030 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2031
2032 if (hw_data->is_memory_gddr5)
2033 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2034
2035 if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
2036 result = vegam_populate_ulv_state(hwmgr, table);
2037 PP_ASSERT_WITH_CODE(!result,
2038 "Failed to initialize ULV state!", return result);
2039 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2040 ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
2041 }
2042
2043 result = vegam_populate_smc_link_level(hwmgr, table);
2044 PP_ASSERT_WITH_CODE(!result,
2045 "Failed to initialize Link Level!", return result);
2046
2047 result = vegam_populate_all_graphic_levels(hwmgr);
2048 PP_ASSERT_WITH_CODE(!result,
2049 "Failed to initialize Graphics Level!", return result);
2050
2051 result = vegam_populate_all_memory_levels(hwmgr);
2052 PP_ASSERT_WITH_CODE(!result,
2053 "Failed to initialize Memory Level!", return result);
2054
2055 result = vegam_populate_smc_acpi_level(hwmgr, table);
2056 PP_ASSERT_WITH_CODE(!result,
2057 "Failed to initialize ACPI Level!", return result);
2058
2059 result = vegam_populate_smc_vce_level(hwmgr, table);
2060 PP_ASSERT_WITH_CODE(!result,
2061 "Failed to initialize VCE Level!", return result);
2062
2063 result = vegam_populate_smc_samu_level(hwmgr, table);
2064 PP_ASSERT_WITH_CODE(!result,
2065 "Failed to initialize SAMU Level!", return result);
2066
2067 /* Since only the initial state is completely set up at this point
2068 * (the other states are just copies of the boot state) we only
2069 * need to populate the ARB settings for the initial state.
2070 */
2071 result = vegam_program_memory_timing_parameters(hwmgr);
2072 PP_ASSERT_WITH_CODE(!result,
2073 "Failed to Write ARB settings for the initial state.", return result);
2074
2075 result = vegam_populate_smc_uvd_level(hwmgr, table);
2076 PP_ASSERT_WITH_CODE(!result,
2077 "Failed to initialize UVD Level!", return result);
2078
2079 result = vegam_populate_smc_boot_level(hwmgr, table);
2080 PP_ASSERT_WITH_CODE(!result,
2081 "Failed to initialize Boot Level!", return result);
2082
2083 result = vegam_populate_smc_initial_state(hwmgr);
2084 PP_ASSERT_WITH_CODE(!result,
2085 "Failed to initialize Boot State!", return result);
2086
2087 result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr);
2088 PP_ASSERT_WITH_CODE(!result,
2089 "Failed to populate BAPM Parameters!", return result);
2090
2091 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2092 PHM_PlatformCaps_ClockStretcher)) {
2093 result = vegam_populate_clock_stretcher_data_table(hwmgr);
2094 PP_ASSERT_WITH_CODE(!result,
2095 "Failed to populate Clock Stretcher Data Table!",
2096 return result);
2097 }
2098
2099 result = vegam_populate_avfs_parameters(hwmgr);
2100 PP_ASSERT_WITH_CODE(!result,
2101 "Failed to populate AVFS Parameters!", return result;);
2102
2103 table->CurrSclkPllRange = 0xff;
2104 table->GraphicsVoltageChangeEnable = 1;
2105 table->GraphicsThermThrottleEnable = 1;
2106 table->GraphicsInterval = 1;
2107 table->VoltageInterval = 1;
2108 table->ThermalInterval = 1;
2109 table->TemperatureLimitHigh =
2110 table_info->cac_dtp_table->usTargetOperatingTemp *
2111 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2112 table->TemperatureLimitLow =
2113 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2114 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2115 table->MemoryVoltageChangeEnable = 1;
2116 table->MemoryInterval = 1;
2117 table->VoltageResponseTime = 0;
2118 table->PhaseResponseTime = 0;
2119 table->MemoryThermThrottleEnable = 1;
2120
2121 PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1,
2122 "There must be 1 or more PCIE levels defined in PPTable.",
2123 return -EINVAL);
2124 table->PCIeBootLinkLevel =
2125 hw_data->dpm_table.pcie_speed_table.count;
2126 table->PCIeGenInterval = 1;
2127 table->VRConfig = 0;
2128
2129 result = vegam_populate_vr_config(hwmgr, table);
2130 PP_ASSERT_WITH_CODE(!result,
2131 "Failed to populate VRConfig setting!", return result);
2132
2133 table->ThermGpio = 17;
2134 table->SclkStepSize = 0x4000;
2135
2136 if (atomctrl_get_pp_assign_pin(hwmgr,
2137 VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2138 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2139 if (gpio_table)
2140 table->VRHotLevel =
2141 table_info->gpio_table->vrhot_triggered_sclk_dpm_index;
2142 } else {
2143 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2144 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2145 PHM_PlatformCaps_RegulatorHot);
2146 }
2147
2148 if (atomctrl_get_pp_assign_pin(hwmgr,
2149 PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin)) {
2150 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2151 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2152 PHM_PlatformCaps_AutomaticDCTransition) &&
2153 !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
2154 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2155 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
2156 } else {
2157 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2158 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2159 PHM_PlatformCaps_AutomaticDCTransition);
2160 }
2161
2162 /* Thermal Output GPIO */
2163 if (atomctrl_get_pp_assign_pin(hwmgr,
2164 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) {
2165 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2166
2167 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2168 * since VBIOS will program this register to set 'inactive state',
2169 * driver can then determine 'active state' from this and
2170 * program SMU with correct polarity
2171 */
2172 table->ThermOutPolarity =
2173 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
2174 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2175 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2176
2177 /* if required, combine VRHot/PCC with thermal out GPIO */
2178 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2179 PHM_PlatformCaps_RegulatorHot) &&
2180 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2181 PHM_PlatformCaps_CombinePCCWithThermalSignal))
2182 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2183 } else {
2184 table->ThermOutGpio = 17;
2185 table->ThermOutPolarity = 1;
2186 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2187 }
2188
2189 /* Populate BIF_SCLK levels into SMC DPM table */
2190 for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
2191 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2192 smu_data->bif_sclk_table[i], &dividers);
2193 PP_ASSERT_WITH_CODE(!result,
2194 "Can not find DFS divide id for Sclk",
2195 return result);
2196
2197 if (i == 0)
2198 table->Ulv.BifSclkDfs =
2199 PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
2200 else
2201 table->LinkLevel[i - 1].BifSclkDfs =
2202 PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
2203 }
2204
2205 for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++)
2206 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2207
2208 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2209 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2210 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2211 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2212 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2213 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2214 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2215 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2216 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2217 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2218
2219 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2220 result = smu7_copy_bytes_to_smc(hwmgr,
2221 smu_data->smu7_data.dpm_table_start +
2222 offsetof(SMU75_Discrete_DpmTable, SystemFlags),
2223 (uint8_t *)&(table->SystemFlags),
2224 sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController),
2225 SMC_RAM_END);
2226 PP_ASSERT_WITH_CODE(!result,
2227 "Failed to upload dpm data to SMC memory!", return result);
2228
2229 result = vegam_populate_pm_fuses(hwmgr);
2230 PP_ASSERT_WITH_CODE(!result,
2231 "Failed to populate PM fuses to SMC memory!", return result);
2232
2233 result = vegam_enable_reconfig_cus(hwmgr);
2234 PP_ASSERT_WITH_CODE(!result,
2235 "Failed to enable reconfigurable CUs!", return result);
2236
2237 return 0;
2238}
2239
2240static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
2241{
2242 switch (type) {
2243 case SMU_SoftRegisters:
2244 switch (member) {
2245 case HandshakeDisables:
2246 return offsetof(SMU75_SoftRegisters, HandshakeDisables);
2247 case VoltageChangeTimeout:
2248 return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout);
2249 case AverageGraphicsActivity:
2250 return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity);
2251 case PreVBlankGap:
2252 return offsetof(SMU75_SoftRegisters, PreVBlankGap);
2253 case VBlankTimeout:
2254 return offsetof(SMU75_SoftRegisters, VBlankTimeout);
2255 case UcodeLoadStatus:
2256 return offsetof(SMU75_SoftRegisters, UcodeLoadStatus);
2257 case DRAM_LOG_ADDR_H:
2258 return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H);
2259 case DRAM_LOG_ADDR_L:
2260 return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L);
2261 case DRAM_LOG_PHY_ADDR_H:
2262 return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2263 case DRAM_LOG_PHY_ADDR_L:
2264 return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2265 case DRAM_LOG_BUFF_SIZE:
2266 return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2267 }
2268 case SMU_Discrete_DpmTable:
2269 switch (member) {
2270 case UvdBootLevel:
2271 return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
2272 case VceBootLevel:
2273 return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
2274 case SamuBootLevel:
2275 return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
2276 case LowSclkInterruptThreshold:
2277 return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
2278 }
2279 }
2280 pr_warn("can't get the offset of type %x member %x\n", type, member);
2281 return 0;
2282}
2283
2284static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2285{
2286 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2287
2288 if (data->need_update_smu7_dpm_table &
2289 (DPMTABLE_OD_UPDATE_SCLK +
2290 DPMTABLE_UPDATE_SCLK +
2291 DPMTABLE_UPDATE_MCLK))
2292 return vegam_program_memory_timing_parameters(hwmgr);
2293
2294 return 0;
2295}
2296
2297static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2298{
2299 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2300 struct vegam_smumgr *smu_data =
2301 (struct vegam_smumgr *)(hwmgr->smu_backend);
2302 int result = 0;
2303 uint32_t low_sclk_interrupt_threshold = 0;
2304
2305 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2306 PHM_PlatformCaps_SclkThrottleLowNotification)
2307 && (data->low_sclk_interrupt_threshold != 0)) {
2308 low_sclk_interrupt_threshold =
2309 data->low_sclk_interrupt_threshold;
2310
2311 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2312
2313 result = smu7_copy_bytes_to_smc(
2314 hwmgr,
2315 smu_data->smu7_data.dpm_table_start +
2316 offsetof(SMU75_Discrete_DpmTable,
2317 LowSclkInterruptThreshold),
2318 (uint8_t *)&low_sclk_interrupt_threshold,
2319 sizeof(uint32_t),
2320 SMC_RAM_END);
2321 }
2322 PP_ASSERT_WITH_CODE((result == 0),
2323 "Failed to update SCLK threshold!", return result);
2324
2325 result = vegam_program_mem_timing_parameters(hwmgr);
2326 PP_ASSERT_WITH_CODE((result == 0),
2327 "Failed to program memory timing parameters!",
2328 );
2329
2330 return result;
2331}
2332
2333int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2334{
2335 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2336 int ret;
2337
2338 if (!hwmgr->avfs_supported)
2339 return 0;
2340
2341 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2342 if (!ret) {
2343 if (data->apply_avfs_cks_off_voltage)
2344 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
2345 }
2346
2347 return ret;
2348}
2349
2350static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2351{
2352 PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan,
2353 "VBIOS fan info is not correct!",
2354 );
2355 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2356 PHM_PlatformCaps_MicrocodeFanControl);
2357 return 0;
2358}
2359
2360const struct pp_smumgr_func vegam_smu_funcs = {
2361 .smu_init = vegam_smu_init,
2362 .smu_fini = smu7_smu_fini,
2363 .start_smu = vegam_start_smu,
2364 .check_fw_load_finish = smu7_check_fw_load_finish,
2365 .request_smu_load_fw = smu7_reload_firmware,
2366 .request_smu_load_specific_fw = NULL,
2367 .send_msg_to_smc = smu7_send_msg_to_smc,
2368 .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
2369 .process_firmware_header = vegam_process_firmware_header,
2370 .is_dpm_running = vegam_is_dpm_running,
2371 .get_mac_definition = vegam_get_mac_definition,
2372 .update_smc_table = vegam_update_smc_table,
2373 .init_smc_table = vegam_init_smc_table,
2374 .get_offsetof = vegam_get_offsetof,
2375 .populate_all_graphic_levels = vegam_populate_all_graphic_levels,
2376 .populate_all_memory_levels = vegam_populate_all_memory_levels,
2377 .update_sclk_threshold = vegam_update_sclk_threshold,
2378 .is_hw_avfs_present = vegam_is_hw_avfs_present,
2379 .thermal_avfs_enable = vegam_thermal_avfs_enable,
2380 .is_dpm_running = vegam_is_dpm_running,
2381 .thermal_setup_fan_table = vegam_thermal_setup_fan_table,
2382};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
new file mode 100644
index 000000000000..2b6558238500
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _VEGAM_SMUMANAGER_H
25#define _VEGAM_SMUMANAGER_H
26
27
28#include <pp_endian.h>
29#include "smu75_discrete.h"
30#include "smu7_smumgr.h"
31
32#define SMC_RAM_END 0x40000
33
34#define DPMTuning_Uphyst_Shift 0
35#define DPMTuning_Downhyst_Shift 8
36#define DPMTuning_Activity_Shift 16
37
38#define GraphicsDPMTuning_VEGAM 0x001e6400
39#define MemoryDPMTuning_VEGAM 0x000f3c0a
40#define SclkDPMTuning_VEGAM 0x002d000a
41#define MclkDPMTuning_VEGAM 0x001f100a
42
43
44struct vegam_pt_defaults {
45 uint8_t SviLoadLineEn;
46 uint8_t SviLoadLineVddC;
47 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
48 uint8_t TDC_MAWt;
49 uint8_t TdcWaterfallCtl;
50 uint8_t DTEAmbientTempBase;
51
52 uint32_t DisplayCac;
53 uint32_t BAPM_TEMP_GRADIENT;
54 uint16_t BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
55 uint16_t BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
56};
57
58struct vegam_range_table {
59 uint32_t trans_lower_frequency; /* in 10khz */
60 uint32_t trans_upper_frequency;
61};
62
63struct vegam_smumgr {
64 struct smu7_smumgr smu7_data;
65 uint8_t protected_mode;
66 SMU75_Discrete_DpmTable smc_state_table;
67 struct SMU75_Discrete_Ulv ulv_setting;
68 struct SMU75_Discrete_PmFuses power_tune_table;
69 struct vegam_range_table range_table[NUM_SCLK_RANGE];
70 const struct vegam_pt_defaults *power_tune_defaults;
71 uint32_t bif_sclk_table[SMU75_MAX_LEVELS_LINK];
72};
73
74
75#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ab50090d066c..23e73c2a19f4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
116 drm_sched_entity_init(&gpu->sched, 116 drm_sched_entity_init(&gpu->sched,
117 &ctx->sched_entity[i], 117 &ctx->sched_entity[i],
118 &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], 118 &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
119 32, NULL); 119 NULL);
120 } 120 }
121 } 121 }
122 122
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 02baaaf20e9d..efbd5816082d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1145,7 +1145,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1145 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1145 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1146 struct drm_device *dev = crtc->dev; 1146 struct drm_device *dev = crtc->dev;
1147 struct radeon_device *rdev = dev->dev_private; 1147 struct radeon_device *rdev = dev->dev_private;
1148 struct radeon_framebuffer *radeon_fb;
1149 struct drm_framebuffer *target_fb; 1148 struct drm_framebuffer *target_fb;
1150 struct drm_gem_object *obj; 1149 struct drm_gem_object *obj;
1151 struct radeon_bo *rbo; 1150 struct radeon_bo *rbo;
@@ -1164,19 +1163,15 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1164 return 0; 1163 return 0;
1165 } 1164 }
1166 1165
1167 if (atomic) { 1166 if (atomic)
1168 radeon_fb = to_radeon_framebuffer(fb);
1169 target_fb = fb; 1167 target_fb = fb;
1170 } 1168 else
1171 else {
1172 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1173 target_fb = crtc->primary->fb; 1169 target_fb = crtc->primary->fb;
1174 }
1175 1170
1176 /* If atomic, assume fb object is pinned & idle & fenced and 1171 /* If atomic, assume fb object is pinned & idle & fenced and
1177 * just update base pointers 1172 * just update base pointers
1178 */ 1173 */
1179 obj = radeon_fb->obj; 1174 obj = target_fb->obj[0];
1180 rbo = gem_to_radeon_bo(obj); 1175 rbo = gem_to_radeon_bo(obj);
1181 r = radeon_bo_reserve(rbo, false); 1176 r = radeon_bo_reserve(rbo, false);
1182 if (unlikely(r != 0)) 1177 if (unlikely(r != 0))
@@ -1441,8 +1436,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1441 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1436 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
1442 1437
1443 if (!atomic && fb && fb != crtc->primary->fb) { 1438 if (!atomic && fb && fb != crtc->primary->fb) {
1444 radeon_fb = to_radeon_framebuffer(fb); 1439 rbo = gem_to_radeon_bo(fb->obj[0]);
1445 rbo = gem_to_radeon_bo(radeon_fb->obj);
1446 r = radeon_bo_reserve(rbo, false); 1440 r = radeon_bo_reserve(rbo, false);
1447 if (unlikely(r != 0)) 1441 if (unlikely(r != 0))
1448 return r; 1442 return r;
@@ -1463,7 +1457,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1463 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1457 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1464 struct drm_device *dev = crtc->dev; 1458 struct drm_device *dev = crtc->dev;
1465 struct radeon_device *rdev = dev->dev_private; 1459 struct radeon_device *rdev = dev->dev_private;
1466 struct radeon_framebuffer *radeon_fb;
1467 struct drm_gem_object *obj; 1460 struct drm_gem_object *obj;
1468 struct radeon_bo *rbo; 1461 struct radeon_bo *rbo;
1469 struct drm_framebuffer *target_fb; 1462 struct drm_framebuffer *target_fb;
@@ -1481,16 +1474,12 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1481 return 0; 1474 return 0;
1482 } 1475 }
1483 1476
1484 if (atomic) { 1477 if (atomic)
1485 radeon_fb = to_radeon_framebuffer(fb);
1486 target_fb = fb; 1478 target_fb = fb;
1487 } 1479 else
1488 else {
1489 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1490 target_fb = crtc->primary->fb; 1480 target_fb = crtc->primary->fb;
1491 }
1492 1481
1493 obj = radeon_fb->obj; 1482 obj = target_fb->obj[0];
1494 rbo = gem_to_radeon_bo(obj); 1483 rbo = gem_to_radeon_bo(obj);
1495 r = radeon_bo_reserve(rbo, false); 1484 r = radeon_bo_reserve(rbo, false);
1496 if (unlikely(r != 0)) 1485 if (unlikely(r != 0))
@@ -1641,8 +1630,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1641 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); 1630 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1642 1631
1643 if (!atomic && fb && fb != crtc->primary->fb) { 1632 if (!atomic && fb && fb != crtc->primary->fb) {
1644 radeon_fb = to_radeon_framebuffer(fb); 1633 rbo = gem_to_radeon_bo(fb->obj[0]);
1645 rbo = gem_to_radeon_bo(radeon_fb->obj);
1646 r = radeon_bo_reserve(rbo, false); 1634 r = radeon_bo_reserve(rbo, false);
1647 if (unlikely(r != 0)) 1635 if (unlikely(r != 0))
1648 return r; 1636 return r;
@@ -2149,11 +2137,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
2149 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2137 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2150 if (crtc->primary->fb) { 2138 if (crtc->primary->fb) {
2151 int r; 2139 int r;
2152 struct radeon_framebuffer *radeon_fb;
2153 struct radeon_bo *rbo; 2140 struct radeon_bo *rbo;
2154 2141
2155 radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 2142 rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
2156 rbo = gem_to_radeon_bo(radeon_fb->obj);
2157 r = radeon_bo_reserve(rbo, false); 2143 r = radeon_bo_reserve(rbo, false);
2158 if (unlikely(r)) 2144 if (unlikely(r))
2159 DRM_ERROR("failed to reserve rbo before unpin\n"); 2145 DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 40be4068ca69..fa5fadaa9bbb 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -526,7 +526,7 @@ static int radeon_atpx_init(void)
526 * look up whether we are the integrated or discrete GPU (all asics). 526 * look up whether we are the integrated or discrete GPU (all asics).
527 * Returns the client id. 527 * Returns the client id.
528 */ 528 */
529static int radeon_atpx_get_client_id(struct pci_dev *pdev) 529static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev)
530{ 530{
531 if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) 531 if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
532 return VGA_SWITCHEROO_IGD; 532 return VGA_SWITCHEROO_IGD;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index df9469a8fdb1..2aea2bdff99b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -852,7 +852,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
852 return ret; 852 return ret;
853} 853}
854 854
855static int radeon_lvds_mode_valid(struct drm_connector *connector, 855static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
856 struct drm_display_mode *mode) 856 struct drm_display_mode *mode)
857{ 857{
858 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 858 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -1012,7 +1012,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
1012 return ret; 1012 return ret;
1013} 1013}
1014 1014
1015static int radeon_vga_mode_valid(struct drm_connector *connector, 1015static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
1016 struct drm_display_mode *mode) 1016 struct drm_display_mode *mode)
1017{ 1017{
1018 struct drm_device *dev = connector->dev; 1018 struct drm_device *dev = connector->dev;
@@ -1156,7 +1156,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
1156 return 1; 1156 return 1;
1157} 1157}
1158 1158
1159static int radeon_tv_mode_valid(struct drm_connector *connector, 1159static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
1160 struct drm_display_mode *mode) 1160 struct drm_display_mode *mode)
1161{ 1161{
1162 if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) 1162 if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1498,7 +1498,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
1498 radeon_connector->use_digital = true; 1498 radeon_connector->use_digital = true;
1499} 1499}
1500 1500
1501static int radeon_dvi_mode_valid(struct drm_connector *connector, 1501static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
1502 struct drm_display_mode *mode) 1502 struct drm_display_mode *mode)
1503{ 1503{
1504 struct drm_device *dev = connector->dev; 1504 struct drm_device *dev = connector->dev;
@@ -1800,7 +1800,7 @@ out:
1800 return ret; 1800 return ret;
1801} 1801}
1802 1802
1803static int radeon_dp_mode_valid(struct drm_connector *connector, 1803static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
1804 struct drm_display_mode *mode) 1804 struct drm_display_mode *mode)
1805{ 1805{
1806 struct drm_device *dev = connector->dev; 1806 struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 48d0e6bd0508..59c8a6647ff2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1591,7 +1591,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1591 /* unpin the front buffers and cursors */ 1591 /* unpin the front buffers and cursors */
1592 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1592 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1593 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1593 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1594 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1594 struct drm_framebuffer *fb = crtc->primary->fb;
1595 struct radeon_bo *robj; 1595 struct radeon_bo *robj;
1596 1596
1597 if (radeon_crtc->cursor_bo) { 1597 if (radeon_crtc->cursor_bo) {
@@ -1603,10 +1603,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1603 } 1603 }
1604 } 1604 }
1605 1605
1606 if (rfb == NULL || rfb->obj == NULL) { 1606 if (fb == NULL || fb->obj[0] == NULL) {
1607 continue; 1607 continue;
1608 } 1608 }
1609 robj = gem_to_radeon_bo(rfb->obj); 1609 robj = gem_to_radeon_bo(fb->obj[0]);
1610 /* don't unpin kernel fb objects */ 1610 /* don't unpin kernel fb objects */
1611 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 1611 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1612 r = radeon_bo_reserve(robj, false); 1612 r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 26129b2b082d..9d3ac8b981da 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_gem_framebuffer_helper.h>
35#include <drm/drm_fb_helper.h> 36#include <drm/drm_fb_helper.h>
36#include <drm/drm_plane_helper.h> 37#include <drm/drm_plane_helper.h>
37#include <drm/drm_edid.h> 38#include <drm/drm_edid.h>
@@ -478,8 +479,6 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
478 struct drm_device *dev = crtc->dev; 479 struct drm_device *dev = crtc->dev;
479 struct radeon_device *rdev = dev->dev_private; 480 struct radeon_device *rdev = dev->dev_private;
480 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 481 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
481 struct radeon_framebuffer *old_radeon_fb;
482 struct radeon_framebuffer *new_radeon_fb;
483 struct drm_gem_object *obj; 482 struct drm_gem_object *obj;
484 struct radeon_flip_work *work; 483 struct radeon_flip_work *work;
485 struct radeon_bo *new_rbo; 484 struct radeon_bo *new_rbo;
@@ -501,15 +500,13 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
501 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; 500 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
502 501
503 /* schedule unpin of the old buffer */ 502 /* schedule unpin of the old buffer */
504 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 503 obj = crtc->primary->fb->obj[0];
505 obj = old_radeon_fb->obj;
506 504
507 /* take a reference to the old object */ 505 /* take a reference to the old object */
508 drm_gem_object_get(obj); 506 drm_gem_object_get(obj);
509 work->old_rbo = gem_to_radeon_bo(obj); 507 work->old_rbo = gem_to_radeon_bo(obj);
510 508
511 new_radeon_fb = to_radeon_framebuffer(fb); 509 obj = fb->obj[0];
512 obj = new_radeon_fb->obj;
513 new_rbo = gem_to_radeon_bo(obj); 510 new_rbo = gem_to_radeon_bo(obj);
514 511
515 /* pin the new buffer */ 512 /* pin the new buffer */
@@ -1285,41 +1282,23 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
1285 1282
1286} 1283}
1287 1284
1288static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
1289{
1290 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1291
1292 drm_gem_object_put_unlocked(radeon_fb->obj);
1293 drm_framebuffer_cleanup(fb);
1294 kfree(radeon_fb);
1295}
1296
1297static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
1298 struct drm_file *file_priv,
1299 unsigned int *handle)
1300{
1301 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1302
1303 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
1304}
1305
1306static const struct drm_framebuffer_funcs radeon_fb_funcs = { 1285static const struct drm_framebuffer_funcs radeon_fb_funcs = {
1307 .destroy = radeon_user_framebuffer_destroy, 1286 .destroy = drm_gem_fb_destroy,
1308 .create_handle = radeon_user_framebuffer_create_handle, 1287 .create_handle = drm_gem_fb_create_handle,
1309}; 1288};
1310 1289
1311int 1290int
1312radeon_framebuffer_init(struct drm_device *dev, 1291radeon_framebuffer_init(struct drm_device *dev,
1313 struct radeon_framebuffer *rfb, 1292 struct drm_framebuffer *fb,
1314 const struct drm_mode_fb_cmd2 *mode_cmd, 1293 const struct drm_mode_fb_cmd2 *mode_cmd,
1315 struct drm_gem_object *obj) 1294 struct drm_gem_object *obj)
1316{ 1295{
1317 int ret; 1296 int ret;
1318 rfb->obj = obj; 1297 fb->obj[0] = obj;
1319 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); 1298 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
1320 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1299 ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
1321 if (ret) { 1300 if (ret) {
1322 rfb->obj = NULL; 1301 fb->obj[0] = NULL;
1323 return ret; 1302 return ret;
1324 } 1303 }
1325 return 0; 1304 return 0;
@@ -1331,7 +1310,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1331 const struct drm_mode_fb_cmd2 *mode_cmd) 1310 const struct drm_mode_fb_cmd2 *mode_cmd)
1332{ 1311{
1333 struct drm_gem_object *obj; 1312 struct drm_gem_object *obj;
1334 struct radeon_framebuffer *radeon_fb; 1313 struct drm_framebuffer *fb;
1335 int ret; 1314 int ret;
1336 1315
1337 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); 1316 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -1347,20 +1326,20 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1347 return ERR_PTR(-EINVAL); 1326 return ERR_PTR(-EINVAL);
1348 } 1327 }
1349 1328
1350 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1329 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
1351 if (radeon_fb == NULL) { 1330 if (fb == NULL) {
1352 drm_gem_object_put_unlocked(obj); 1331 drm_gem_object_put_unlocked(obj);
1353 return ERR_PTR(-ENOMEM); 1332 return ERR_PTR(-ENOMEM);
1354 } 1333 }
1355 1334
1356 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1335 ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
1357 if (ret) { 1336 if (ret) {
1358 kfree(radeon_fb); 1337 kfree(fb);
1359 drm_gem_object_put_unlocked(obj); 1338 drm_gem_object_put_unlocked(obj);
1360 return ERR_PTR(ret); 1339 return ERR_PTR(ret);
1361 } 1340 }
1362 1341
1363 return &radeon_fb->base; 1342 return fb;
1364} 1343}
1365 1344
1366static const struct drm_mode_config_funcs radeon_mode_funcs = { 1345static const struct drm_mode_config_funcs radeon_mode_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b28288a781ef..2a7977a23b31 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -168,7 +168,12 @@ int radeon_no_wb;
168int radeon_modeset = -1; 168int radeon_modeset = -1;
169int radeon_dynclks = -1; 169int radeon_dynclks = -1;
170int radeon_r4xx_atom = 0; 170int radeon_r4xx_atom = 0;
171#ifdef __powerpc__
172/* Default to PCI on PowerPC (fdo #95017) */
173int radeon_agpmode = -1;
174#else
171int radeon_agpmode = 0; 175int radeon_agpmode = 0;
176#endif
172int radeon_vram_limit = 0; 177int radeon_vram_limit = 0;
173int radeon_gart_size = -1; /* auto */ 178int radeon_gart_size = -1; /* auto */
174int radeon_benchmarking = 0; 179int radeon_benchmarking = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 57c5404a1654..1179034024ae 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -43,7 +43,7 @@
43 */ 43 */
44struct radeon_fbdev { 44struct radeon_fbdev {
45 struct drm_fb_helper helper; 45 struct drm_fb_helper helper;
46 struct radeon_framebuffer rfb; 46 struct drm_framebuffer fb;
47 struct radeon_device *rdev; 47 struct radeon_device *rdev;
48}; 48};
49 49
@@ -246,13 +246,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
246 246
247 info->par = rfbdev; 247 info->par = rfbdev;
248 248
249 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 249 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
250 if (ret) { 250 if (ret) {
251 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 251 DRM_ERROR("failed to initialize framebuffer %d\n", ret);
252 goto out; 252 goto out;
253 } 253 }
254 254
255 fb = &rfbdev->rfb.base; 255 fb = &rfbdev->fb;
256 256
257 /* setup helper */ 257 /* setup helper */
258 rfbdev->helper.fb = fb; 258 rfbdev->helper.fb = fb;
@@ -308,15 +308,15 @@ out:
308 308
309static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) 309static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
310{ 310{
311 struct radeon_framebuffer *rfb = &rfbdev->rfb; 311 struct drm_framebuffer *fb = &rfbdev->fb;
312 312
313 drm_fb_helper_unregister_fbi(&rfbdev->helper); 313 drm_fb_helper_unregister_fbi(&rfbdev->helper);
314 314
315 if (rfb->obj) { 315 if (fb->obj[0]) {
316 radeonfb_destroy_pinned_object(rfb->obj); 316 radeonfb_destroy_pinned_object(fb->obj[0]);
317 rfb->obj = NULL; 317 fb->obj[0] = NULL;
318 drm_framebuffer_unregister_private(&rfb->base); 318 drm_framebuffer_unregister_private(fb);
319 drm_framebuffer_cleanup(&rfb->base); 319 drm_framebuffer_cleanup(fb);
320 } 320 }
321 drm_fb_helper_fini(&rfbdev->helper); 321 drm_fb_helper_fini(&rfbdev->helper);
322 322
@@ -400,7 +400,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
400 if (!rdev->mode_info.rfbdev) 400 if (!rdev->mode_info.rfbdev)
401 return false; 401 return false;
402 402
403 if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) 403 if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->fb.obj[0]))
404 return true; 404 return true;
405 return false; 405 return false;
406} 406}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 1f1856e0b1e0..35a205ae4318 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -374,7 +374,6 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
374 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
375 struct radeon_device *rdev = dev->dev_private; 375 struct radeon_device *rdev = dev->dev_private;
376 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 376 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
377 struct radeon_framebuffer *radeon_fb;
378 struct drm_framebuffer *target_fb; 377 struct drm_framebuffer *target_fb;
379 struct drm_gem_object *obj; 378 struct drm_gem_object *obj;
380 struct radeon_bo *rbo; 379 struct radeon_bo *rbo;
@@ -393,14 +392,10 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
393 return 0; 392 return 0;
394 } 393 }
395 394
396 if (atomic) { 395 if (atomic)
397 radeon_fb = to_radeon_framebuffer(fb);
398 target_fb = fb; 396 target_fb = fb;
399 } 397 else
400 else {
401 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
402 target_fb = crtc->primary->fb; 398 target_fb = crtc->primary->fb;
403 }
404 399
405 switch (target_fb->format->cpp[0] * 8) { 400 switch (target_fb->format->cpp[0] * 8) {
406 case 8: 401 case 8:
@@ -423,7 +418,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
423 } 418 }
424 419
425 /* Pin framebuffer & get tilling informations */ 420 /* Pin framebuffer & get tilling informations */
426 obj = radeon_fb->obj; 421 obj = target_fb->obj[0];
427 rbo = gem_to_radeon_bo(obj); 422 rbo = gem_to_radeon_bo(obj);
428retry: 423retry:
429 r = radeon_bo_reserve(rbo, false); 424 r = radeon_bo_reserve(rbo, false);
@@ -451,7 +446,7 @@ retry:
451 struct radeon_bo *old_rbo; 446 struct radeon_bo *old_rbo;
452 unsigned long nsize, osize; 447 unsigned long nsize, osize;
453 448
454 old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj); 449 old_rbo = gem_to_radeon_bo(fb->obj[0]);
455 osize = radeon_bo_size(old_rbo); 450 osize = radeon_bo_size(old_rbo);
456 nsize = radeon_bo_size(rbo); 451 nsize = radeon_bo_size(rbo);
457 if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) { 452 if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
@@ -558,8 +553,7 @@ retry:
558 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); 553 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
559 554
560 if (!atomic && fb && fb != crtc->primary->fb) { 555 if (!atomic && fb && fb != crtc->primary->fb) {
561 radeon_fb = to_radeon_framebuffer(fb); 556 rbo = gem_to_radeon_bo(fb->obj[0]);
562 rbo = gem_to_radeon_bo(radeon_fb->obj);
563 r = radeon_bo_reserve(rbo, false); 557 r = radeon_bo_reserve(rbo, false);
564 if (unlikely(r != 0)) 558 if (unlikely(r != 0))
565 return r; 559 return r;
@@ -1093,11 +1087,9 @@ static void radeon_crtc_disable(struct drm_crtc *crtc)
1093 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1087 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1094 if (crtc->primary->fb) { 1088 if (crtc->primary->fb) {
1095 int r; 1089 int r;
1096 struct radeon_framebuffer *radeon_fb;
1097 struct radeon_bo *rbo; 1090 struct radeon_bo *rbo;
1098 1091
1099 radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 1092 rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
1100 rbo = gem_to_radeon_bo(radeon_fb->obj);
1101 r = radeon_bo_reserve(rbo, false); 1093 r = radeon_bo_reserve(rbo, false);
1102 if (unlikely(r)) 1094 if (unlikely(r))
1103 DRM_ERROR("failed to reserve rbo before unpin\n"); 1095 DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3243e5e01432..fd470d6bf3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,7 +46,6 @@ struct radeon_device;
46#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) 46#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
47#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) 47#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
48#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) 48#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
49#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
50 49
51#define RADEON_MAX_HPD_PINS 7 50#define RADEON_MAX_HPD_PINS 7
52#define RADEON_MAX_CRTCS 6 51#define RADEON_MAX_CRTCS 6
@@ -574,11 +573,6 @@ struct radeon_connector {
574 int enabled_attribs; 573 int enabled_attribs;
575}; 574};
576 575
577struct radeon_framebuffer {
578 struct drm_framebuffer base;
579 struct drm_gem_object *obj;
580};
581
582#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 576#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
583 ((em) == ATOM_ENCODER_MODE_DP_MST)) 577 ((em) == ATOM_ENCODER_MODE_DP_MST))
584 578
@@ -932,7 +926,7 @@ radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
932extern void 926extern void
933radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); 927radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
934int radeon_framebuffer_init(struct drm_device *dev, 928int radeon_framebuffer_init(struct drm_device *dev,
935 struct radeon_framebuffer *rfb, 929 struct drm_framebuffer *rfb,
936 const struct drm_mode_fb_cmd2 *mode_cmd, 930 const struct drm_mode_fb_cmd2 *mode_cmd,
937 struct drm_gem_object *obj); 931 struct drm_gem_object *obj);
938 932
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 0d95888ccc3e..a364fc0b38c3 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -30,7 +30,7 @@
30#include <drm/spsc_queue.h> 30#include <drm/spsc_queue.h>
31 31
32#define CREATE_TRACE_POINTS 32#define CREATE_TRACE_POINTS
33#include <drm/gpu_scheduler_trace.h> 33#include "gpu_scheduler_trace.h"
34 34
35#define to_drm_sched_job(sched_job) \ 35#define to_drm_sched_job(sched_job) \
36 container_of((sched_job), struct drm_sched_job, queue_node) 36 container_of((sched_job), struct drm_sched_job, queue_node)
@@ -117,15 +117,15 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
117 * @sched The pointer to the scheduler 117 * @sched The pointer to the scheduler
118 * @entity The pointer to a valid drm_sched_entity 118 * @entity The pointer to a valid drm_sched_entity
119 * @rq The run queue this entity belongs 119 * @rq The run queue this entity belongs
120 * @kernel If this is an entity for the kernel 120 * @guilty atomic_t set to 1 when a job on this queue
121 * @jobs The max number of jobs in the job queue 121 * is found to be guilty causing a timeout
122 * 122 *
123 * return 0 if succeed. negative error code on failure 123 * return 0 if succeed. negative error code on failure
124*/ 124*/
125int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 125int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
126 struct drm_sched_entity *entity, 126 struct drm_sched_entity *entity,
127 struct drm_sched_rq *rq, 127 struct drm_sched_rq *rq,
128 uint32_t jobs, atomic_t *guilty) 128 atomic_t *guilty)
129{ 129{
130 if (!(sched && entity && rq)) 130 if (!(sched && entity && rq))
131 return -EINVAL; 131 return -EINVAL;
@@ -135,6 +135,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
135 entity->rq = rq; 135 entity->rq = rq;
136 entity->sched = sched; 136 entity->sched = sched;
137 entity->guilty = guilty; 137 entity->guilty = guilty;
138 entity->fini_status = 0;
139 entity->last_scheduled = NULL;
138 140
139 spin_lock_init(&entity->rq_lock); 141 spin_lock_init(&entity->rq_lock);
140 spin_lock_init(&entity->queue_lock); 142 spin_lock_init(&entity->queue_lock);
@@ -196,19 +198,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
196 return true; 198 return true;
197} 199}
198 200
201static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
202 struct dma_fence_cb *cb)
203{
204 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
205 finish_cb);
206 drm_sched_fence_finished(job->s_fence);
207 WARN_ON(job->s_fence->parent);
208 dma_fence_put(&job->s_fence->finished);
209 job->sched->ops->free_job(job);
210}
211
212
199/** 213/**
200 * Destroy a context entity 214 * Destroy a context entity
201 * 215 *
202 * @sched Pointer to scheduler instance 216 * @sched Pointer to scheduler instance
203 * @entity The pointer to a valid scheduler entity 217 * @entity The pointer to a valid scheduler entity
204 * 218 *
205 * Cleanup and free the allocated resources. 219 * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
220 * removes the entity from the runqueue and returns an error when the process was killed.
206 */ 221 */
207void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 222void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
208 struct drm_sched_entity *entity) 223 struct drm_sched_entity *entity)
209{ 224{
210 int r;
211
212 if (!drm_sched_entity_is_initialized(sched, entity)) 225 if (!drm_sched_entity_is_initialized(sched, entity))
213 return; 226 return;
214 /** 227 /**
@@ -216,13 +229,28 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
216 * queued IBs or discard them on SIGKILL 229 * queued IBs or discard them on SIGKILL
217 */ 230 */
218 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) 231 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
219 r = -ERESTARTSYS; 232 entity->fini_status = -ERESTARTSYS;
220 else 233 else
221 r = wait_event_killable(sched->job_scheduled, 234 entity->fini_status = wait_event_killable(sched->job_scheduled,
222 drm_sched_entity_is_idle(entity)); 235 drm_sched_entity_is_idle(entity));
223 drm_sched_entity_set_rq(entity, NULL); 236 drm_sched_entity_set_rq(entity, NULL);
224 if (r) { 237}
238EXPORT_SYMBOL(drm_sched_entity_do_release);
239
240/**
241 * Destroy a context entity
242 *
243 * @sched Pointer to scheduler instance
244 * @entity The pointer to a valid scheduler entity
245 *
246 * The second one then goes over the entity and signals all jobs with an error code.
247 */
248void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
249 struct drm_sched_entity *entity)
250{
251 if (entity->fini_status) {
225 struct drm_sched_job *job; 252 struct drm_sched_job *job;
253 int r;
226 254
227 /* Park the kernel for a moment to make sure it isn't processing 255 /* Park the kernel for a moment to make sure it isn't processing
228 * our enity. 256 * our enity.
@@ -240,12 +268,25 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
240 struct drm_sched_fence *s_fence = job->s_fence; 268 struct drm_sched_fence *s_fence = job->s_fence;
241 drm_sched_fence_scheduled(s_fence); 269 drm_sched_fence_scheduled(s_fence);
242 dma_fence_set_error(&s_fence->finished, -ESRCH); 270 dma_fence_set_error(&s_fence->finished, -ESRCH);
243 drm_sched_fence_finished(s_fence); 271 r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
244 WARN_ON(s_fence->parent); 272 drm_sched_entity_kill_jobs_cb);
245 dma_fence_put(&s_fence->finished); 273 if (r == -ENOENT)
246 sched->ops->free_job(job); 274 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
275 else if (r)
276 DRM_ERROR("fence add callback failed (%d)\n", r);
247 } 277 }
248 } 278 }
279
280 dma_fence_put(entity->last_scheduled);
281 entity->last_scheduled = NULL;
282}
283EXPORT_SYMBOL(drm_sched_entity_cleanup);
284
285void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
286 struct drm_sched_entity *entity)
287{
288 drm_sched_entity_do_release(sched, entity);
289 drm_sched_entity_cleanup(sched, entity);
249} 290}
250EXPORT_SYMBOL(drm_sched_entity_fini); 291EXPORT_SYMBOL(drm_sched_entity_fini);
251 292
@@ -360,6 +401,9 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
360 if (entity->guilty && atomic_read(entity->guilty)) 401 if (entity->guilty && atomic_read(entity->guilty))
361 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 402 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
362 403
404 dma_fence_put(entity->last_scheduled);
405 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
406
363 spsc_queue_pop(&entity->job_queue); 407 spsc_queue_pop(&entity->job_queue);
364 return sched_job; 408 return sched_job;
365} 409}
@@ -529,6 +573,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
529 spin_unlock(&sched->job_list_lock); 573 spin_unlock(&sched->job_list_lock);
530 fence = sched->ops->run_job(s_job); 574 fence = sched->ops->run_job(s_job);
531 atomic_inc(&sched->hw_rq_count); 575 atomic_inc(&sched->hw_rq_count);
576
532 if (fence) { 577 if (fence) {
533 s_fence->parent = dma_fence_get(fence); 578 s_fence->parent = dma_fence_get(fence);
534 r = dma_fence_add_callback(fence, &s_fence->cb, 579 r = dma_fence_add_callback(fence, &s_fence->cb,
@@ -555,6 +600,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
555 void *owner) 600 void *owner)
556{ 601{
557 job->sched = sched; 602 job->sched = sched;
603 job->entity = entity;
558 job->s_priority = entity->rq - sched->sched_rq; 604 job->s_priority = entity->rq - sched->sched_rq;
559 job->s_fence = drm_sched_fence_create(entity, owner); 605 job->s_fence = drm_sched_fence_create(entity, owner);
560 if (!job->s_fence) 606 if (!job->s_fence)
diff --git a/include/drm/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 0789e8d0a0e1..4998ad950a48 100644
--- a/include/drm/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -78,5 +78,5 @@ TRACE_EVENT(drm_sched_process_job,
78 78
79/* This part must be outside protection */ 79/* This part must be outside protection */
80#undef TRACE_INCLUDE_PATH 80#undef TRACE_INCLUDE_PATH
81#define TRACE_INCLUDE_PATH . 81#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
82#include <trace/define_trace.h> 82#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 7c2485fe88d8..ea4d59eb8966 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98e06f8bf23b..5d8688e522d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -1175,7 +1176,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1175 reservation_object_init(&bo->ttm_resv); 1176 reservation_object_init(&bo->ttm_resv);
1176 atomic_inc(&bo->bdev->glob->bo_count); 1177 atomic_inc(&bo->bdev->glob->bo_count);
1177 drm_vma_node_reset(&bo->vma_node); 1178 drm_vma_node_reset(&bo->vma_node);
1178 bo->priority = 0;
1179 1179
1180 /* 1180 /*
1181 * For ttm_bo_type_device buffers, allocate 1181 * For ttm_bo_type_device buffers, allocate
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index a7c232dc39cb..18d3debcc949 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ebbae6067ab..f2c167702eef 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -39,6 +40,11 @@
39#include <linux/module.h> 40#include <linux/module.h>
40#include <linux/reservation.h> 41#include <linux/reservation.h>
41 42
43struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
46};
47
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{ 49{
44 ttm_bo_mem_put(bo, &bo->mem); 50 ttm_bo_mem_put(bo, &bo->mem);
@@ -454,7 +460,11 @@ EXPORT_SYMBOL(ttm_bo_move_memcpy);
454 460
455static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 461static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
456{ 462{
457 kfree(bo); 463 struct ttm_transfer_obj *fbo;
464
465 fbo = container_of(bo, struct ttm_transfer_obj, base);
466 ttm_bo_unref(&fbo->bo);
467 kfree(fbo);
458} 468}
459 469
460/** 470/**
@@ -475,14 +485,15 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
475static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 485static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
476 struct ttm_buffer_object **new_obj) 486 struct ttm_buffer_object **new_obj)
477{ 487{
478 struct ttm_buffer_object *fbo; 488 struct ttm_transfer_obj *fbo;
479 int ret; 489 int ret;
480 490
481 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 491 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
482 if (!fbo) 492 if (!fbo)
483 return -ENOMEM; 493 return -ENOMEM;
484 494
485 *fbo = *bo; 495 fbo->base = *bo;
496 fbo->bo = ttm_bo_reference(bo);
486 497
487 /** 498 /**
488 * Fix up members that we shouldn't copy directly: 499 * Fix up members that we shouldn't copy directly:
@@ -490,25 +501,25 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
490 */ 501 */
491 502
492 atomic_inc(&bo->bdev->glob->bo_count); 503 atomic_inc(&bo->bdev->glob->bo_count);
493 INIT_LIST_HEAD(&fbo->ddestroy); 504 INIT_LIST_HEAD(&fbo->base.ddestroy);
494 INIT_LIST_HEAD(&fbo->lru); 505 INIT_LIST_HEAD(&fbo->base.lru);
495 INIT_LIST_HEAD(&fbo->swap); 506 INIT_LIST_HEAD(&fbo->base.swap);
496 INIT_LIST_HEAD(&fbo->io_reserve_lru); 507 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
497 mutex_init(&fbo->wu_mutex); 508 mutex_init(&fbo->base.wu_mutex);
498 fbo->moving = NULL; 509 fbo->base.moving = NULL;
499 drm_vma_node_reset(&fbo->vma_node); 510 drm_vma_node_reset(&fbo->base.vma_node);
500 atomic_set(&fbo->cpu_writers, 0); 511 atomic_set(&fbo->base.cpu_writers, 0);
501 512
502 kref_init(&fbo->list_kref); 513 kref_init(&fbo->base.list_kref);
503 kref_init(&fbo->kref); 514 kref_init(&fbo->base.kref);
504 fbo->destroy = &ttm_transfered_destroy; 515 fbo->base.destroy = &ttm_transfered_destroy;
505 fbo->acc_size = 0; 516 fbo->base.acc_size = 0;
506 fbo->resv = &fbo->ttm_resv; 517 fbo->base.resv = &fbo->base.ttm_resv;
507 reservation_object_init(fbo->resv); 518 reservation_object_init(fbo->base.resv);
508 ret = reservation_object_trylock(fbo->resv); 519 ret = reservation_object_trylock(fbo->base.resv);
509 WARN_ON(!ret); 520 WARN_ON(!ret);
510 521
511 *new_obj = fbo; 522 *new_obj = &fbo->base;
512 return 0; 523 return 0;
513} 524}
514 525
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8eba95b3c737..c7ece7613a6a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3dca206e85f7..e73ae0d22897 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 913f4318cdc0..20694b8a01ca 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 27856c55dc84..450387c92b63 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 66fc6395eb54..6ff40c041d79 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1aa2baa83959..74f1b1eb1f8e 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f0481b7b60c5..06c94e3a5f15 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
910 while (npages >= HPAGE_PMD_NR) { 910 while (npages >= HPAGE_PMD_NR) {
911 gfp_t huge_flags = gfp_flags; 911 gfp_t huge_flags = gfp_flags;
912 912
913 huge_flags |= GFP_TRANSHUGE; 913 huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
914 __GFP_KSWAPD_RECLAIM;
914 huge_flags &= ~__GFP_MOVABLE; 915 huge_flags &= ~__GFP_MOVABLE;
915 huge_flags &= ~__GFP_COMP; 916 huge_flags &= ~__GFP_COMP;
916 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); 917 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1027 GFP_USER | GFP_DMA32, "uc dma", 0); 1028 GFP_USER | GFP_DMA32, "uc dma", 0);
1028 1029
1029 ttm_page_pool_init_locked(&_manager->wc_pool_huge, 1030 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
1030 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), 1031 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
1032 __GFP_KSWAPD_RECLAIM) &
1033 ~(__GFP_MOVABLE | __GFP_COMP),
1031 "wc huge", order); 1034 "wc huge", order);
1032 1035
1033 ttm_page_pool_init_locked(&_manager->uc_pool_huge, 1036 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
1034 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) 1037 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
1038 __GFP_KSWAPD_RECLAIM) &
1039 ~(__GFP_MOVABLE | __GFP_COMP)
1035 , "uc huge", order); 1040 , "uc huge", order);
1036 1041
1037 _manager->options.max_size = max_pages; 1042 _manager->options.max_size = max_pages;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 8a25d1974385..f63d99c302e4 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
910 gfp_flags |= __GFP_ZERO; 910 gfp_flags |= __GFP_ZERO;
911 911
912 if (huge) { 912 if (huge) {
913 gfp_flags |= GFP_TRANSHUGE; 913 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
914 __GFP_KSWAPD_RECLAIM;
914 gfp_flags &= ~__GFP_MOVABLE; 915 gfp_flags &= ~__GFP_MOVABLE;
915 gfp_flags &= ~__GFP_COMP; 916 gfp_flags &= ~__GFP_COMP;
916 } 917 }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7e672be987b5..a1e543972ca7 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1/************************************************************************** 2/**************************************************************************
2 * 3 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 38e8041b5f0c..cdb582043b4f 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -135,7 +135,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
135 drm_sched_entity_init(&v3d->queue[i].sched, 135 drm_sched_entity_init(&v3d->queue[i].sched,
136 &v3d_priv->sched_entity[i], 136 &v3d_priv->sched_entity[i],
137 &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], 137 &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
138 32, NULL); 138 NULL);
139 } 139 }
140 140
141 file->driver_priv = v3d_priv; 141 file->driver_priv = v3d_priv;
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 6c731c52c071..695bde7eb055 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -44,6 +44,7 @@ enum amd_asic_type {
44 CHIP_POLARIS10, 44 CHIP_POLARIS10,
45 CHIP_POLARIS11, 45 CHIP_POLARIS11,
46 CHIP_POLARIS12, 46 CHIP_POLARIS12,
47 CHIP_VEGAM,
47 CHIP_VEGA10, 48 CHIP_VEGA10,
48 CHIP_VEGA12, 49 CHIP_VEGA12,
49 CHIP_RAVEN, 50 CHIP_RAVEN,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb94e10..52380067a43f 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
43}; 43};
44 44
45/** 45/**
46 * A scheduler entity is a wrapper around a job queue or a group 46 * drm_sched_entity - A wrapper around a job queue (typically attached
47 * of other entities. Entities take turns emitting jobs from their 47 * to the DRM file_priv).
48 * job queues to corresponding hardware ring based on scheduling 48 *
49 * policy. 49 * Entities will emit jobs in order to their corresponding hardware
50 * ring, and the scheduler will alternate between entities based on
51 * scheduling policy.
50*/ 52*/
51struct drm_sched_entity { 53struct drm_sched_entity {
52 struct list_head list; 54 struct list_head list;
@@ -63,6 +65,8 @@ struct drm_sched_entity {
63 struct dma_fence *dependency; 65 struct dma_fence *dependency;
64 struct dma_fence_cb cb; 66 struct dma_fence_cb cb;
65 atomic_t *guilty; /* points to ctx's guilty */ 67 atomic_t *guilty; /* points to ctx's guilty */
68 int fini_status;
69 struct dma_fence *last_scheduled;
66}; 70};
67 71
68/** 72/**
@@ -78,7 +82,18 @@ struct drm_sched_rq {
78 82
79struct drm_sched_fence { 83struct drm_sched_fence {
80 struct dma_fence scheduled; 84 struct dma_fence scheduled;
85
86 /* This fence is what will be signaled by the scheduler when
87 * the job is completed.
88 *
89 * When setting up an out fence for the job, you should use
90 * this, since it's available immediately upon
91 * drm_sched_job_init(), and the fence returned by the driver
92 * from run_job() won't be created until the dependencies have
93 * resolved.
94 */
81 struct dma_fence finished; 95 struct dma_fence finished;
96
82 struct dma_fence_cb cb; 97 struct dma_fence_cb cb;
83 struct dma_fence *parent; 98 struct dma_fence *parent;
84 struct drm_gpu_scheduler *sched; 99 struct drm_gpu_scheduler *sched;
@@ -88,6 +103,13 @@ struct drm_sched_fence {
88 103
89struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 104struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
90 105
106/**
107 * drm_sched_job - A job to be run by an entity.
108 *
109 * A job is created by the driver using drm_sched_job_init(), and
110 * should call drm_sched_entity_push_job() once it wants the scheduler
111 * to schedule the job.
112 */
91struct drm_sched_job { 113struct drm_sched_job {
92 struct spsc_node queue_node; 114 struct spsc_node queue_node;
93 struct drm_gpu_scheduler *sched; 115 struct drm_gpu_scheduler *sched;
@@ -99,6 +121,7 @@ struct drm_sched_job {
99 uint64_t id; 121 uint64_t id;
100 atomic_t karma; 122 atomic_t karma;
101 enum drm_sched_priority s_priority; 123 enum drm_sched_priority s_priority;
124 struct drm_sched_entity *entity;
102}; 125};
103 126
104static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 127static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -112,10 +135,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
112 * these functions should be implemented in driver side 135 * these functions should be implemented in driver side
113*/ 136*/
114struct drm_sched_backend_ops { 137struct drm_sched_backend_ops {
138 /* Called when the scheduler is considering scheduling this
139 * job next, to get another struct dma_fence for this job to
140 * block on. Once it returns NULL, run_job() may be called.
141 */
115 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 142 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
116 struct drm_sched_entity *s_entity); 143 struct drm_sched_entity *s_entity);
144
145 /* Called to execute the job once all of the dependencies have
146 * been resolved. This may be called multiple times, if
147 * timedout_job() has happened and drm_sched_job_recovery()
148 * decides to try it again.
149 */
117 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 150 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
151
152 /* Called when a job has taken too long to execute, to trigger
153 * GPU recovery.
154 */
118 void (*timedout_job)(struct drm_sched_job *sched_job); 155 void (*timedout_job)(struct drm_sched_job *sched_job);
156
157 /* Called once the job's finished fence has been signaled and
158 * it's time to clean it up.
159 */
119 void (*free_job)(struct drm_sched_job *sched_job); 160 void (*free_job)(struct drm_sched_job *sched_job);
120}; 161};
121 162
@@ -147,7 +188,11 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
147int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 188int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
148 struct drm_sched_entity *entity, 189 struct drm_sched_entity *entity,
149 struct drm_sched_rq *rq, 190 struct drm_sched_rq *rq,
150 uint32_t jobs, atomic_t *guilty); 191 atomic_t *guilty);
192void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
193 struct drm_sched_entity *entity);
194void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
195 struct drm_sched_entity *entity);
151void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 196void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
152 struct drm_sched_entity *entity); 197 struct drm_sched_entity *entity);
153void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 198void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c363b67f2d0a..78b4dd89fcb4 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -78,6 +78,12 @@ extern "C" {
78#define AMDGPU_GEM_DOMAIN_GDS 0x8 78#define AMDGPU_GEM_DOMAIN_GDS 0x8
79#define AMDGPU_GEM_DOMAIN_GWS 0x10 79#define AMDGPU_GEM_DOMAIN_GWS 0x10
80#define AMDGPU_GEM_DOMAIN_OA 0x20 80#define AMDGPU_GEM_DOMAIN_OA 0x20
81#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
82 AMDGPU_GEM_DOMAIN_GTT | \
83 AMDGPU_GEM_DOMAIN_VRAM | \
84 AMDGPU_GEM_DOMAIN_GDS | \
85 AMDGPU_GEM_DOMAIN_GWS | \
86 AMDGPU_GEM_DOMAIN_OA)
81 87
82/* Flag that CPU access will be required for the case of VRAM domain */ 88/* Flag that CPU access will be required for the case of VRAM domain */
83#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) 89#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -95,6 +101,10 @@ extern "C" {
95#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) 101#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
96/* Flag that BO sharing will be explicitly synchronized */ 102/* Flag that BO sharing will be explicitly synchronized */
97#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) 103#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
104/* Flag that indicates allocating MQD gart on GFX9, where the mtype
105 * for the second page onward should be set to NC.
106 */
107#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
98 108
99struct drm_amdgpu_gem_create_in { 109struct drm_amdgpu_gem_create_in {
100 /** the requested memory size */ 110 /** the requested memory size */
@@ -520,6 +530,10 @@ union drm_amdgpu_cs {
520/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */ 530/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
521#define AMDGPU_IB_FLAG_PREEMPT (1<<2) 531#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
522 532
533/* The IB fence should do the L2 writeback but not invalidate any shader
534 * caches (L2/vL1/sL1/I$). */
535#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
536
523struct drm_amdgpu_cs_chunk_ib { 537struct drm_amdgpu_cs_chunk_ib {
524 __u32 _pad; 538 __u32 _pad;
525 /** AMDGPU_IB_FLAG_* */ 539 /** AMDGPU_IB_FLAG_* */
@@ -620,6 +634,12 @@ struct drm_amdgpu_cs_chunk_data {
620 #define AMDGPU_INFO_FW_ASD 0x0d 634 #define AMDGPU_INFO_FW_ASD 0x0d
621 /* Subquery id: Query VCN firmware version */ 635 /* Subquery id: Query VCN firmware version */
622 #define AMDGPU_INFO_FW_VCN 0x0e 636 #define AMDGPU_INFO_FW_VCN 0x0e
637 /* Subquery id: Query GFX RLC SRLC firmware version */
638 #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
639 /* Subquery id: Query GFX RLC SRLG firmware version */
640 #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
641 /* Subquery id: Query GFX RLC SRLS firmware version */
642 #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
623/* number of bytes moved for TTM migration */ 643/* number of bytes moved for TTM migration */
624#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 644#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
625/* the used VRAM size */ 645/* the used VRAM size */