aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-01-30 23:19:45 -0500
committerDave Airlie <airlied@redhat.com>2019-01-31 18:34:20 -0500
commite09191d360ab54c7ac7a9b09287dcab81c30d8e3 (patch)
treebe0c0a0ed2f0603ecaa430bd09811484adc04ee6
parentf91168f48556486743392b8838e20afbd84b7b7a (diff)
parent10117450735c7a7c0858095fb46a860e7037cb9a (diff)
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-next
New stuff for 5.1. amdgpu: - DC bandwidth formula updates - Support for DCC on scanout surfaces - Support for multiple IH rings on soc15 asics - Fix xgmi locking - Add sysfs interface to get pcie usage stats - Simplify DC i2c/aux code - Initial support for BACO on vega10/20 - New runtime SMU feature debug interface - Expand existing sysfs power interfaces to new clock domains - Handle kexec properly - Simplify IH programming - Rework doorbell handling across asics - Drop old CI DPM implementation - DC page flipping fixes - Misc SR-IOV fixes amdkfd: - Simplify the interfaces between amdkfd and amdgpu ttm: - Add a callback to notify the driver when the lru changes sched: - Refactor mirror list handling - Rework hw fence processing Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190125231517.26268-1-alexander.deucher@amd.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c379
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6844
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h349
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_smc.c279
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c167
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c324
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1262
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c647
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile99
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c606
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c505
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c574
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c329
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c875
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c251
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c601
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c491
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h (renamed from drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c)45
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/vm_helper.h (renamed from drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h)38
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h33
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c78
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c106
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c597
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h58
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h53
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h3
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h53
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h (renamed from drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h)39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c136
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h (renamed from drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h)22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c201
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c72
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c37
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c219
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h (renamed from drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h)18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c398
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c11
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c219
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c19
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c13
-rw-r--r--include/drm/gpu_scheduler.h13
-rw-r--r--include/drm/ttm/ttm_bo_driver.h9
253 files changed, 6565 insertions, 18723 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index f76bcb9c45e4..466da5954a68 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -57,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
57 57
58# add asic specific block 58# add asic specific block
59amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ 59amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
60 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o 60 dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
61 61
62amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o 62amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
63 63
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bcef6ea4bcf9..9efa681d0878 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -542,6 +542,11 @@ struct amdgpu_asic_funcs {
542 bool (*need_full_reset)(struct amdgpu_device *adev); 542 bool (*need_full_reset)(struct amdgpu_device *adev);
543 /* initialize doorbell layout for specific asic*/ 543 /* initialize doorbell layout for specific asic*/
544 void (*init_doorbell_index)(struct amdgpu_device *adev); 544 void (*init_doorbell_index)(struct amdgpu_device *adev);
545 /* PCIe bandwidth usage */
546 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
547 uint64_t *count1);
548 /* do we need to reset the asic at init time (e.g., kexec) */
549 bool (*need_reset_on_init)(struct amdgpu_device *adev);
545}; 550};
546 551
547/* 552/*
@@ -634,7 +639,7 @@ struct amdgpu_nbio_funcs {
634 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 639 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
635 u32 (*get_memsize)(struct amdgpu_device *adev); 640 u32 (*get_memsize)(struct amdgpu_device *adev);
636 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, 641 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
637 bool use_doorbell, int doorbell_index); 642 bool use_doorbell, int doorbell_index, int doorbell_size);
638 void (*enable_doorbell_aperture)(struct amdgpu_device *adev, 643 void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
639 bool enable); 644 bool enable);
640 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, 645 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
@@ -1042,6 +1047,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
1042#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) 1047#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
1043#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1048#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1044#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1049#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1050#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1051#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1045 1052
1046/* Common functions */ 1053/* Common functions */
1047bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1054bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 2dfaf158ef07..e957e42c539a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -28,8 +28,6 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/dma-buf.h> 29#include <linux/dma-buf.h>
30 30
31const struct kgd2kfd_calls *kgd2kfd;
32
33static const unsigned int compute_vmid_bitmap = 0xFF00; 31static const unsigned int compute_vmid_bitmap = 0xFF00;
34 32
35/* Total memory size in system memory and all GPU VRAM. Used to 33/* Total memory size in system memory and all GPU VRAM. Used to
@@ -47,12 +45,9 @@ int amdgpu_amdkfd_init(void)
47 amdgpu_amdkfd_total_mem_size *= si.mem_unit; 45 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
48 46
49#ifdef CONFIG_HSA_AMD 47#ifdef CONFIG_HSA_AMD
50 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); 48 ret = kgd2kfd_init();
51 if (ret)
52 kgd2kfd = NULL;
53 amdgpu_amdkfd_gpuvm_init_mem_limits(); 49 amdgpu_amdkfd_gpuvm_init_mem_limits();
54#else 50#else
55 kgd2kfd = NULL;
56 ret = -ENOENT; 51 ret = -ENOENT;
57#endif 52#endif
58 53
@@ -61,17 +56,13 @@ int amdgpu_amdkfd_init(void)
61 56
62void amdgpu_amdkfd_fini(void) 57void amdgpu_amdkfd_fini(void)
63{ 58{
64 if (kgd2kfd) 59 kgd2kfd_exit();
65 kgd2kfd->exit();
66} 60}
67 61
68void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) 62void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
69{ 63{
70 const struct kfd2kgd_calls *kfd2kgd; 64 const struct kfd2kgd_calls *kfd2kgd;
71 65
72 if (!kgd2kfd)
73 return;
74
75 switch (adev->asic_type) { 66 switch (adev->asic_type) {
76#ifdef CONFIG_DRM_AMDGPU_CIK 67#ifdef CONFIG_DRM_AMDGPU_CIK
77 case CHIP_KAVERI: 68 case CHIP_KAVERI:
@@ -98,8 +89,8 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
98 return; 89 return;
99 } 90 }
100 91
101 adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev, 92 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
102 adev->pdev, kfd2kgd); 93 adev->pdev, kfd2kgd);
103 94
104 if (adev->kfd.dev) 95 if (adev->kfd.dev)
105 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; 96 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -182,7 +173,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
182 &gpu_resources.doorbell_start_offset); 173 &gpu_resources.doorbell_start_offset);
183 174
184 if (adev->asic_type < CHIP_VEGA10) { 175 if (adev->asic_type < CHIP_VEGA10) {
185 kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); 176 kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
186 return; 177 return;
187 } 178 }
188 179
@@ -197,13 +188,13 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
197 * can use each doorbell assignment twice. 188 * can use each doorbell assignment twice.
198 */ 189 */
199 gpu_resources.sdma_doorbell[0][i] = 190 gpu_resources.sdma_doorbell[0][i] =
200 adev->doorbell_index.sdma_engine0 + (i >> 1); 191 adev->doorbell_index.sdma_engine[0] + (i >> 1);
201 gpu_resources.sdma_doorbell[0][i+1] = 192 gpu_resources.sdma_doorbell[0][i+1] =
202 adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1); 193 adev->doorbell_index.sdma_engine[0] + 0x200 + (i >> 1);
203 gpu_resources.sdma_doorbell[1][i] = 194 gpu_resources.sdma_doorbell[1][i] =
204 adev->doorbell_index.sdma_engine1 + (i >> 1); 195 adev->doorbell_index.sdma_engine[1] + (i >> 1);
205 gpu_resources.sdma_doorbell[1][i+1] = 196 gpu_resources.sdma_doorbell[1][i+1] =
206 adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1); 197 adev->doorbell_index.sdma_engine[1] + 0x200 + (i >> 1);
207 } 198 }
208 /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for 199 /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
209 * SDMA, IH and VCN. So don't use them for the CP. 200 * SDMA, IH and VCN. So don't use them for the CP.
@@ -211,14 +202,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
211 gpu_resources.reserved_doorbell_mask = 0x1e0; 202 gpu_resources.reserved_doorbell_mask = 0x1e0;
212 gpu_resources.reserved_doorbell_val = 0x0e0; 203 gpu_resources.reserved_doorbell_val = 0x0e0;
213 204
214 kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); 205 kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
215 } 206 }
216} 207}
217 208
218void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) 209void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
219{ 210{
220 if (adev->kfd.dev) { 211 if (adev->kfd.dev) {
221 kgd2kfd->device_exit(adev->kfd.dev); 212 kgd2kfd_device_exit(adev->kfd.dev);
222 adev->kfd.dev = NULL; 213 adev->kfd.dev = NULL;
223 } 214 }
224} 215}
@@ -227,13 +218,13 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
227 const void *ih_ring_entry) 218 const void *ih_ring_entry)
228{ 219{
229 if (adev->kfd.dev) 220 if (adev->kfd.dev)
230 kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry); 221 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
231} 222}
232 223
233void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) 224void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
234{ 225{
235 if (adev->kfd.dev) 226 if (adev->kfd.dev)
236 kgd2kfd->suspend(adev->kfd.dev); 227 kgd2kfd_suspend(adev->kfd.dev);
237} 228}
238 229
239int amdgpu_amdkfd_resume(struct amdgpu_device *adev) 230int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
@@ -241,7 +232,7 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
241 int r = 0; 232 int r = 0;
242 233
243 if (adev->kfd.dev) 234 if (adev->kfd.dev)
244 r = kgd2kfd->resume(adev->kfd.dev); 235 r = kgd2kfd_resume(adev->kfd.dev);
245 236
246 return r; 237 return r;
247} 238}
@@ -251,7 +242,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
251 int r = 0; 242 int r = 0;
252 243
253 if (adev->kfd.dev) 244 if (adev->kfd.dev)
254 r = kgd2kfd->pre_reset(adev->kfd.dev); 245 r = kgd2kfd_pre_reset(adev->kfd.dev);
255 246
256 return r; 247 return r;
257} 248}
@@ -261,7 +252,7 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
261 int r = 0; 252 int r = 0;
262 253
263 if (adev->kfd.dev) 254 if (adev->kfd.dev)
264 r = kgd2kfd->post_reset(adev->kfd.dev); 255 r = kgd2kfd_post_reset(adev->kfd.dev);
265 256
266 return r; 257 return r;
267} 258}
@@ -619,4 +610,47 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
619{ 610{
620 return NULL; 611 return NULL;
621} 612}
613
614struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
615 const struct kfd2kgd_calls *f2g)
616{
617 return NULL;
618}
619
620bool kgd2kfd_device_init(struct kfd_dev *kfd,
621 const struct kgd2kfd_shared_resources *gpu_resources)
622{
623 return false;
624}
625
626void kgd2kfd_device_exit(struct kfd_dev *kfd)
627{
628}
629
630void kgd2kfd_exit(void)
631{
632}
633
634void kgd2kfd_suspend(struct kfd_dev *kfd)
635{
636}
637
638int kgd2kfd_resume(struct kfd_dev *kfd)
639{
640 return 0;
641}
642
643int kgd2kfd_pre_reset(struct kfd_dev *kfd)
644{
645 return 0;
646}
647
648int kgd2kfd_post_reset(struct kfd_dev *kfd)
649{
650 return 0;
651}
652
653void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
654{
655}
622#endif 656#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 70429f7aa9a8..0b31a1859023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -33,7 +33,6 @@
33#include "amdgpu_sync.h" 33#include "amdgpu_sync.h"
34#include "amdgpu_vm.h" 34#include "amdgpu_vm.h"
35 35
36extern const struct kgd2kfd_calls *kgd2kfd;
37extern uint64_t amdgpu_amdkfd_total_mem_size; 36extern uint64_t amdgpu_amdkfd_total_mem_size;
38 37
39struct amdgpu_device; 38struct amdgpu_device;
@@ -214,4 +213,22 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
214void amdgpu_amdkfd_gpuvm_init_mem_limits(void); 213void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
215void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); 214void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
216 215
216/* KGD2KFD callbacks */
217int kgd2kfd_init(void);
218void kgd2kfd_exit(void);
219struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
220 const struct kfd2kgd_calls *f2g);
221bool kgd2kfd_device_init(struct kfd_dev *kfd,
222 const struct kgd2kfd_shared_resources *gpu_resources);
223void kgd2kfd_device_exit(struct kfd_dev *kfd);
224void kgd2kfd_suspend(struct kfd_dev *kfd);
225int kgd2kfd_resume(struct kfd_dev *kfd);
226int kgd2kfd_pre_reset(struct kfd_dev *kfd);
227int kgd2kfd_post_reset(struct kfd_dev *kfd);
228void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
229int kgd2kfd_quiesce_mm(struct mm_struct *mm);
230int kgd2kfd_resume_mm(struct mm_struct *mm);
231int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
232 struct dma_fence *fence);
233
217#endif /* AMDGPU_AMDKFD_H_INCLUDED */ 234#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 574c1181ae9a..3107b9575929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -122,7 +122,7 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
122 if (dma_fence_is_signaled(f)) 122 if (dma_fence_is_signaled(f))
123 return true; 123 return true;
124 124
125 if (!kgd2kfd->schedule_evict_and_restore_process(fence->mm, f)) 125 if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f))
126 return true; 126 return true;
127 127
128 return false; 128 return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index be1ab43473c6..d7b10d79f1de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1790,7 +1790,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1790 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1790 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1791 if (evicted_bos == 1) { 1791 if (evicted_bos == 1) {
1792 /* First eviction, stop the queues */ 1792 /* First eviction, stop the queues */
1793 r = kgd2kfd->quiesce_mm(mm); 1793 r = kgd2kfd_quiesce_mm(mm);
1794 if (r) 1794 if (r)
1795 pr_err("Failed to quiesce KFD\n"); 1795 pr_err("Failed to quiesce KFD\n");
1796 schedule_delayed_work(&process_info->restore_userptr_work, 1796 schedule_delayed_work(&process_info->restore_userptr_work,
@@ -2082,7 +2082,7 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2082 evicted_bos) 2082 evicted_bos)
2083 goto unlock_out; 2083 goto unlock_out;
2084 evicted_bos = 0; 2084 evicted_bos = 0;
2085 if (kgd2kfd->resume_mm(mm)) { 2085 if (kgd2kfd_resume_mm(mm)) {
2086 pr_err("%s: Failed to resume KFD\n", __func__); 2086 pr_err("%s: Failed to resume KFD\n", __func__);
2087 /* No recovery from this failure. Probably the CP is 2087 /* No recovery from this failure. Probably the CP is
2088 * hanging. No point trying again. 2088 * hanging. No point trying again.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
580}; 581};
581 582
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 28bccceaa363..c898b19f335a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1645,7 +1645,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1645 if (r) { 1645 if (r) {
1646 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1646 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1647 adev->ip_blocks[i].version->funcs->name, r); 1647 adev->ip_blocks[i].version->funcs->name, r);
1648 return r; 1648 goto init_failed;
1649 } 1649 }
1650 adev->ip_blocks[i].status.sw = true; 1650 adev->ip_blocks[i].status.sw = true;
1651 1651
@@ -1654,17 +1654,17 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1654 r = amdgpu_device_vram_scratch_init(adev); 1654 r = amdgpu_device_vram_scratch_init(adev);
1655 if (r) { 1655 if (r) {
1656 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1656 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1657 return r; 1657 goto init_failed;
1658 } 1658 }
1659 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1659 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1660 if (r) { 1660 if (r) {
1661 DRM_ERROR("hw_init %d failed %d\n", i, r); 1661 DRM_ERROR("hw_init %d failed %d\n", i, r);
1662 return r; 1662 goto init_failed;
1663 } 1663 }
1664 r = amdgpu_device_wb_init(adev); 1664 r = amdgpu_device_wb_init(adev);
1665 if (r) { 1665 if (r) {
1666 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 1666 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1667 return r; 1667 goto init_failed;
1668 } 1668 }
1669 adev->ip_blocks[i].status.hw = true; 1669 adev->ip_blocks[i].status.hw = true;
1670 1670
@@ -1675,7 +1675,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1675 AMDGPU_CSA_SIZE); 1675 AMDGPU_CSA_SIZE);
1676 if (r) { 1676 if (r) {
1677 DRM_ERROR("allocate CSA failed %d\n", r); 1677 DRM_ERROR("allocate CSA failed %d\n", r);
1678 return r; 1678 goto init_failed;
1679 } 1679 }
1680 } 1680 }
1681 } 1681 }
@@ -1683,28 +1683,32 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1683 1683
1684 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 1684 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1685 if (r) 1685 if (r)
1686 return r; 1686 goto init_failed;
1687 1687
1688 r = amdgpu_device_ip_hw_init_phase1(adev); 1688 r = amdgpu_device_ip_hw_init_phase1(adev);
1689 if (r) 1689 if (r)
1690 return r; 1690 goto init_failed;
1691 1691
1692 r = amdgpu_device_fw_loading(adev); 1692 r = amdgpu_device_fw_loading(adev);
1693 if (r) 1693 if (r)
1694 return r; 1694 goto init_failed;
1695 1695
1696 r = amdgpu_device_ip_hw_init_phase2(adev); 1696 r = amdgpu_device_ip_hw_init_phase2(adev);
1697 if (r) 1697 if (r)
1698 return r; 1698 goto init_failed;
1699 1699
1700 if (adev->gmc.xgmi.num_physical_nodes > 1) 1700 if (adev->gmc.xgmi.num_physical_nodes > 1)
1701 amdgpu_xgmi_add_device(adev); 1701 amdgpu_xgmi_add_device(adev);
1702 amdgpu_amdkfd_device_init(adev); 1702 amdgpu_amdkfd_device_init(adev);
1703 1703
1704 if (amdgpu_sriov_vf(adev)) 1704init_failed:
1705 if (amdgpu_sriov_vf(adev)) {
1706 if (!r)
1707 amdgpu_virt_init_data_exchange(adev);
1705 amdgpu_virt_release_full_gpu(adev, true); 1708 amdgpu_virt_release_full_gpu(adev, true);
1709 }
1706 1710
1707 return 0; 1711 return r;
1708} 1712}
1709 1713
1710/** 1714/**
@@ -2131,7 +2135,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2131 continue; 2135 continue;
2132 2136
2133 r = block->version->funcs->hw_init(adev); 2137 r = block->version->funcs->hw_init(adev);
2134 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2138 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2135 if (r) 2139 if (r)
2136 return r; 2140 return r;
2137 } 2141 }
@@ -2165,7 +2169,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2165 continue; 2169 continue;
2166 2170
2167 r = block->version->funcs->hw_init(adev); 2171 r = block->version->funcs->hw_init(adev);
2168 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2172 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2169 if (r) 2173 if (r)
2170 return r; 2174 return r;
2171 } 2175 }
@@ -2546,6 +2550,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
2546 /* detect if we are with an SRIOV vbios */ 2550 /* detect if we are with an SRIOV vbios */
2547 amdgpu_device_detect_sriov_bios(adev); 2551 amdgpu_device_detect_sriov_bios(adev);
2548 2552
2553 /* check if we need to reset the asic
2554 * E.g., driver was not cleanly unloaded previously, etc.
2555 */
2556 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
2557 r = amdgpu_asic_reset(adev);
2558 if (r) {
2559 dev_err(adev->dev, "asic reset on init failed\n");
2560 goto failed;
2561 }
2562 }
2563
2549 /* Post card if necessary */ 2564 /* Post card if necessary */
2550 if (amdgpu_device_need_post(adev)) { 2565 if (amdgpu_device_need_post(adev)) {
2551 if (!adev->bios) { 2566 if (!adev->bios) {
@@ -2610,6 +2625,8 @@ fence_driver_init:
2610 } 2625 }
2611 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 2626 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2612 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 2627 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2628 if (amdgpu_virt_request_full_gpu(adev, false))
2629 amdgpu_virt_release_full_gpu(adev, false);
2613 goto failed; 2630 goto failed;
2614 } 2631 }
2615 2632
@@ -2632,9 +2649,6 @@ fence_driver_init:
2632 goto failed; 2649 goto failed;
2633 } 2650 }
2634 2651
2635 if (amdgpu_sriov_vf(adev))
2636 amdgpu_virt_init_data_exchange(adev);
2637
2638 amdgpu_fbdev_init(adev); 2652 amdgpu_fbdev_init(adev);
2639 2653
2640 r = amdgpu_pm_sysfs_init(adev); 2654 r = amdgpu_pm_sysfs_init(adev);
@@ -2798,7 +2812,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2798 struct drm_framebuffer *fb = crtc->primary->fb; 2812 struct drm_framebuffer *fb = crtc->primary->fb;
2799 struct amdgpu_bo *robj; 2813 struct amdgpu_bo *robj;
2800 2814
2801 if (amdgpu_crtc->cursor_bo) { 2815 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2802 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2816 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2803 r = amdgpu_bo_reserve(aobj, true); 2817 r = amdgpu_bo_reserve(aobj, true);
2804 if (r == 0) { 2818 if (r == 0) {
@@ -2906,7 +2920,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2920 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2907 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2921 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2908 2922
2909 if (amdgpu_crtc->cursor_bo) { 2923 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2910 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2924 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2911 r = amdgpu_bo_reserve(aobj, true); 2925 r = amdgpu_bo_reserve(aobj, true);
2912 if (r == 0) { 2926 if (r == 0) {
@@ -3226,6 +3240,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3226 r = amdgpu_ib_ring_tests(adev); 3240 r = amdgpu_ib_ring_tests(adev);
3227 3241
3228error: 3242error:
3243 amdgpu_virt_init_data_exchange(adev);
3229 amdgpu_virt_release_full_gpu(adev, true); 3244 amdgpu_virt_release_full_gpu(adev, true);
3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3245 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3231 atomic_inc(&adev->vram_lost_counter); 3246 atomic_inc(&adev->vram_lost_counter);
@@ -3298,17 +3313,15 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3298 if (!ring || !ring->sched.thread) 3313 if (!ring || !ring->sched.thread)
3299 continue; 3314 continue;
3300 3315
3301 kthread_park(ring->sched.thread); 3316 drm_sched_stop(&ring->sched);
3302
3303 if (job && job->base.sched != &ring->sched)
3304 continue;
3305
3306 drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
3307 3317
3308 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 3318 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3309 amdgpu_fence_driver_force_completion(ring); 3319 amdgpu_fence_driver_force_completion(ring);
3310 } 3320 }
3311 3321
3322 if(job)
3323 drm_sched_increase_karma(&job->base);
3324
3312 3325
3313 3326
3314 if (!amdgpu_sriov_vf(adev)) { 3327 if (!amdgpu_sriov_vf(adev)) {
@@ -3454,14 +3467,10 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
3454 if (!ring || !ring->sched.thread) 3467 if (!ring || !ring->sched.thread)
3455 continue; 3468 continue;
3456 3469
3457 /* only need recovery sched of the given job's ring 3470 if (!adev->asic_reset_res)
3458 * or all rings (in the case @job is NULL) 3471 drm_sched_resubmit_jobs(&ring->sched);
3459 * after above amdgpu_reset accomplished
3460 */
3461 if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
3462 drm_sched_job_recovery(&ring->sched);
3463 3472
3464 kthread_unpark(ring->sched.thread); 3473 drm_sched_start(&ring->sched, !adev->asic_reset_res);
3465 } 3474 }
3466 3475
3467 if (!amdgpu_device_has_dc_support(adev)) { 3476 if (!amdgpu_device_has_dc_support(adev)) {
@@ -3521,9 +3530,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3521 * by different nodes. No point also since the one node already executing 3530 * by different nodes. No point also since the one node already executing
3522 * reset will also reset all the other nodes in the hive. 3531 * reset will also reset all the other nodes in the hive.
3523 */ 3532 */
3524 hive = amdgpu_get_xgmi_hive(adev); 3533 hive = amdgpu_get_xgmi_hive(adev, 0);
3525 if (hive && adev->gmc.xgmi.num_physical_nodes > 1 && 3534 if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
3526 !mutex_trylock(&hive->hive_lock)) 3535 !mutex_trylock(&hive->reset_lock))
3527 return 0; 3536 return 0;
3528 3537
3529 /* Start with adev pre asic reset first for soft reset check.*/ 3538 /* Start with adev pre asic reset first for soft reset check.*/
@@ -3602,7 +3611,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
3602 } 3611 }
3603 3612
3604 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 3613 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
3605 mutex_unlock(&hive->hive_lock); 3614 mutex_unlock(&hive->reset_lock);
3606 3615
3607 if (r) 3616 if (r)
3608 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 3617 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
188 goto cleanup; 188 goto cleanup;
189 } 189 }
190 190
191 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 191 if (!adev->enable_virtual_display) {
192 if (unlikely(r != 0)) { 192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
193 DRM_ERROR("failed to pin new abo buffer before flip\n"); 193 if (unlikely(r != 0)) {
194 goto unreserve; 194 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve;
196 }
195 } 197 }
196 198
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo); 199 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 213 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
212 amdgpu_bo_unreserve(new_abo); 214 amdgpu_bo_unreserve(new_abo);
213 215
214 work->base = amdgpu_bo_gpu_offset(new_abo); 216 if (!adev->enable_virtual_display)
217 work->base = amdgpu_bo_gpu_offset(new_abo);
215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 218 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 219 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
217 220
@@ -242,9 +245,10 @@ pflip_cleanup:
242 goto cleanup; 245 goto cleanup;
243 } 246 }
244unpin: 247unpin:
245 if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { 248 if (!adev->enable_virtual_display)
246 DRM_ERROR("failed to unpin new abo in error path\n"); 249 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
247 } 250 DRM_ERROR("failed to unpin new abo in error path\n");
251
248unreserve: 252unreserve:
249 amdgpu_bo_unreserve(new_abo); 253 amdgpu_bo_unreserve(new_abo);
250 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index be620b29f4aa..1cfec06f81d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -51,14 +51,7 @@ struct amdgpu_doorbell_index {
51 uint32_t userqueue_start; 51 uint32_t userqueue_start;
52 uint32_t userqueue_end; 52 uint32_t userqueue_end;
53 uint32_t gfx_ring0; 53 uint32_t gfx_ring0;
54 uint32_t sdma_engine0; 54 uint32_t sdma_engine[8];
55 uint32_t sdma_engine1;
56 uint32_t sdma_engine2;
57 uint32_t sdma_engine3;
58 uint32_t sdma_engine4;
59 uint32_t sdma_engine5;
60 uint32_t sdma_engine6;
61 uint32_t sdma_engine7;
62 uint32_t ih; 55 uint32_t ih;
63 union { 56 union {
64 struct { 57 struct {
@@ -79,6 +72,8 @@ struct amdgpu_doorbell_index {
79 } uvd_vce; 72 } uvd_vce;
80 }; 73 };
81 uint32_t max_assignment; 74 uint32_t max_assignment;
75 /* Per engine SDMA doorbell size in dword */
76 uint32_t sdma_doorbell_range;
82}; 77};
83 78
84typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 79typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index f972cd156795..2f61e9edb1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -364,6 +364,14 @@ enum amdgpu_pcie_gen {
364 ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\ 364 ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
365 (adev)->powerplay.pp_handle)) 365 (adev)->powerplay.pp_handle))
366 366
367#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
368 ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
369 (adev)->powerplay.pp_handle, (buf)))
370
371#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
372 ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
373 (adev)->powerplay.pp_handle, (ppfeatures)))
374
367struct amdgpu_dpm { 375struct amdgpu_dpm {
368 struct amdgpu_ps *ps; 376 struct amdgpu_ps *ps;
369 /* number of valid power states */ 377 /* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index c48207b377bc..0b8ef2d27d6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -202,12 +202,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
202 amdgpu_asic_flush_hdp(adev, ring); 202 amdgpu_asic_flush_hdp(adev, ring);
203 } 203 }
204 204
205 if (need_ctx_switch)
206 status |= AMDGPU_HAVE_CTX_SWITCH;
207
205 skip_preamble = ring->current_ctx == fence_ctx; 208 skip_preamble = ring->current_ctx == fence_ctx;
206 if (job && ring->funcs->emit_cntxcntl) { 209 if (job && ring->funcs->emit_cntxcntl) {
207 if (need_ctx_switch)
208 status |= AMDGPU_HAVE_CTX_SWITCH;
209 status |= job->preamble_status; 210 status |= job->preamble_status;
210
211 amdgpu_ring_emit_cntxcntl(ring, status); 211 amdgpu_ring_emit_cntxcntl(ring, status);
212 } 212 }
213 213
@@ -221,8 +221,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
221 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ 221 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
222 continue; 222 continue;
223 223
224 amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); 224 amdgpu_ring_emit_ib(ring, job, ib, status);
225 need_ctx_switch = false; 225 status &= ~AMDGPU_HAVE_CTX_SWITCH;
226 } 226 }
227 227
228 if (ring->funcs->emit_tmz) 228 if (ring->funcs->emit_tmz)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 8af67f649660..d0a5db777b6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -52,6 +52,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
52 ih->use_bus_addr = use_bus_addr; 52 ih->use_bus_addr = use_bus_addr;
53 53
54 if (use_bus_addr) { 54 if (use_bus_addr) {
55 dma_addr_t dma_addr;
56
55 if (ih->ring) 57 if (ih->ring)
56 return 0; 58 return 0;
57 59
@@ -59,21 +61,26 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
59 * add them to the end of the ring allocation. 61 * add them to the end of the ring allocation.
60 */ 62 */
61 ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8, 63 ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
62 &ih->rb_dma_addr, GFP_KERNEL); 64 &dma_addr, GFP_KERNEL);
63 if (ih->ring == NULL) 65 if (ih->ring == NULL)
64 return -ENOMEM; 66 return -ENOMEM;
65 67
66 memset((void *)ih->ring, 0, ih->ring_size + 8); 68 memset((void *)ih->ring, 0, ih->ring_size + 8);
67 ih->wptr_offs = (ih->ring_size / 4) + 0; 69 ih->gpu_addr = dma_addr;
68 ih->rptr_offs = (ih->ring_size / 4) + 1; 70 ih->wptr_addr = dma_addr + ih->ring_size;
71 ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
72 ih->rptr_addr = dma_addr + ih->ring_size + 4;
73 ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
69 } else { 74 } else {
70 r = amdgpu_device_wb_get(adev, &ih->wptr_offs); 75 unsigned wptr_offs, rptr_offs;
76
77 r = amdgpu_device_wb_get(adev, &wptr_offs);
71 if (r) 78 if (r)
72 return r; 79 return r;
73 80
74 r = amdgpu_device_wb_get(adev, &ih->rptr_offs); 81 r = amdgpu_device_wb_get(adev, &rptr_offs);
75 if (r) { 82 if (r) {
76 amdgpu_device_wb_free(adev, ih->wptr_offs); 83 amdgpu_device_wb_free(adev, wptr_offs);
77 return r; 84 return r;
78 } 85 }
79 86
@@ -82,10 +89,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
82 &ih->ring_obj, &ih->gpu_addr, 89 &ih->ring_obj, &ih->gpu_addr,
83 (void **)&ih->ring); 90 (void **)&ih->ring);
84 if (r) { 91 if (r) {
85 amdgpu_device_wb_free(adev, ih->rptr_offs); 92 amdgpu_device_wb_free(adev, rptr_offs);
86 amdgpu_device_wb_free(adev, ih->wptr_offs); 93 amdgpu_device_wb_free(adev, wptr_offs);
87 return r; 94 return r;
88 } 95 }
96
97 ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
98 ih->wptr_cpu = &adev->wb.wb[wptr_offs];
99 ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
100 ih->rptr_cpu = &adev->wb.wb[rptr_offs];
89 } 101 }
90 return 0; 102 return 0;
91} 103}
@@ -109,13 +121,13 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
109 * add them to the end of the ring allocation. 121 * add them to the end of the ring allocation.
110 */ 122 */
111 dma_free_coherent(adev->dev, ih->ring_size + 8, 123 dma_free_coherent(adev->dev, ih->ring_size + 8,
112 (void *)ih->ring, ih->rb_dma_addr); 124 (void *)ih->ring, ih->gpu_addr);
113 ih->ring = NULL; 125 ih->ring = NULL;
114 } else { 126 } else {
115 amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr, 127 amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
116 (void **)&ih->ring); 128 (void **)&ih->ring);
117 amdgpu_device_wb_free(adev, ih->wptr_offs); 129 amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4);
118 amdgpu_device_wb_free(adev, ih->rptr_offs); 130 amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4);
119 } 131 }
120} 132}
121 133
@@ -137,7 +149,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
137 if (!ih->enabled || adev->shutdown) 149 if (!ih->enabled || adev->shutdown)
138 return IRQ_NONE; 150 return IRQ_NONE;
139 151
140 wptr = amdgpu_ih_get_wptr(adev); 152 wptr = amdgpu_ih_get_wptr(adev, ih);
141 153
142restart_ih: 154restart_ih:
143 /* is somebody else already processing irqs? */ 155 /* is somebody else already processing irqs? */
@@ -154,11 +166,11 @@ restart_ih:
154 ih->rptr &= ih->ptr_mask; 166 ih->rptr &= ih->ptr_mask;
155 } 167 }
156 168
157 amdgpu_ih_set_rptr(adev); 169 amdgpu_ih_set_rptr(adev, ih);
158 atomic_set(&ih->lock, 0); 170 atomic_set(&ih->lock, 0);
159 171
160 /* make sure wptr hasn't changed while processing */ 172 /* make sure wptr hasn't changed while processing */
161 wptr = amdgpu_ih_get_wptr(adev); 173 wptr = amdgpu_ih_get_wptr(adev, ih);
162 if (wptr != ih->rptr) 174 if (wptr != ih->rptr)
163 goto restart_ih; 175 goto restart_ih;
164 176
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index f877bb78d10a..1ccb1831382a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -31,34 +31,40 @@ struct amdgpu_iv_entry;
31 * R6xx+ IH ring 31 * R6xx+ IH ring
32 */ 32 */
33struct amdgpu_ih_ring { 33struct amdgpu_ih_ring {
34 struct amdgpu_bo *ring_obj;
35 volatile uint32_t *ring;
36 unsigned rptr;
37 unsigned ring_size; 34 unsigned ring_size;
38 uint64_t gpu_addr;
39 uint32_t ptr_mask; 35 uint32_t ptr_mask;
40 atomic_t lock;
41 bool enabled;
42 unsigned wptr_offs;
43 unsigned rptr_offs;
44 u32 doorbell_index; 36 u32 doorbell_index;
45 bool use_doorbell; 37 bool use_doorbell;
46 bool use_bus_addr; 38 bool use_bus_addr;
47 dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ 39
40 struct amdgpu_bo *ring_obj;
41 volatile uint32_t *ring;
42 uint64_t gpu_addr;
43
44 uint64_t wptr_addr;
45 volatile uint32_t *wptr_cpu;
46
47 uint64_t rptr_addr;
48 volatile uint32_t *rptr_cpu;
49
50 bool enabled;
51 unsigned rptr;
52 atomic_t lock;
48}; 53};
49 54
50/* provided by the ih block */ 55/* provided by the ih block */
51struct amdgpu_ih_funcs { 56struct amdgpu_ih_funcs {
52 /* ring read/write ptr handling, called from interrupt context */ 57 /* ring read/write ptr handling, called from interrupt context */
53 u32 (*get_wptr)(struct amdgpu_device *adev); 58 u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
54 void (*decode_iv)(struct amdgpu_device *adev, 59 void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
55 struct amdgpu_iv_entry *entry); 60 struct amdgpu_iv_entry *entry);
56 void (*set_rptr)(struct amdgpu_device *adev); 61 void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
57}; 62};
58 63
59#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 64#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih))
60#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 65#define amdgpu_ih_decode_iv(adev, iv) \
61#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 66 (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv))
67#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih))
62 68
63int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, 69int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
64 unsigned ring_size, bool use_bus_addr); 70 unsigned ring_size, bool use_bus_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b7968f426862..8bfb3dab46f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -148,6 +148,8 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
148 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; 148 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
149 amdgpu_ih_decode_iv(adev, &entry); 149 amdgpu_ih_decode_iv(adev, &entry);
150 150
151 trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
152
151 amdgpu_irq_dispatch(adev, &entry); 153 amdgpu_irq_dispatch(adev, &entry);
152} 154}
153 155
@@ -175,6 +177,36 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
175} 177}
176 178
177/** 179/**
180 * amdgpu_irq_handle_ih1 - kick of processing for IH1
181 *
182 * @work: work structure in struct amdgpu_irq
183 *
184 * Kick of processing IH ring 1.
185 */
186static void amdgpu_irq_handle_ih1(struct work_struct *work)
187{
188 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
189 irq.ih1_work);
190
191 amdgpu_ih_process(adev, &adev->irq.ih1, amdgpu_irq_callback);
192}
193
194/**
195 * amdgpu_irq_handle_ih2 - kick of processing for IH2
196 *
197 * @work: work structure in struct amdgpu_irq
198 *
199 * Kick of processing IH ring 2.
200 */
201static void amdgpu_irq_handle_ih2(struct work_struct *work)
202{
203 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
204 irq.ih2_work);
205
206 amdgpu_ih_process(adev, &adev->irq.ih2, amdgpu_irq_callback);
207}
208
209/**
178 * amdgpu_msi_ok - check whether MSI functionality is enabled 210 * amdgpu_msi_ok - check whether MSI functionality is enabled
179 * 211 *
180 * @adev: amdgpu device pointer (unused) 212 * @adev: amdgpu device pointer (unused)
@@ -238,6 +270,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
238 amdgpu_hotplug_work_func); 270 amdgpu_hotplug_work_func);
239 } 271 }
240 272
273 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
274 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
275
241 adev->irq.installed = true; 276 adev->irq.installed = true;
242 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 277 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
243 if (r) { 278 if (r) {
@@ -367,8 +402,6 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
367 bool handled = false; 402 bool handled = false;
368 int r; 403 int r;
369 404
370 trace_amdgpu_iv(entry);
371
372 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { 405 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
373 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 406 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
374 407
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index f6ce171cb8aa..c27decfda494 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -87,9 +87,11 @@ struct amdgpu_irq {
87 /* status, etc. */ 87 /* status, etc. */
88 bool msi_enabled; /* msi enabled */ 88 bool msi_enabled; /* msi enabled */
89 89
90 /* interrupt ring */ 90 /* interrupt rings */
91 struct amdgpu_ih_ring ih; 91 struct amdgpu_ih_ring ih, ih1, ih2;
92 const struct amdgpu_ih_funcs *ih_funcs; 92 const struct amdgpu_ih_funcs *ih_funcs;
93 struct work_struct ih1_work, ih2_work;
94 struct amdgpu_irq_src self_irq;
93 95
94 /* gen irq stuff */ 96 /* gen irq stuff */
95 struct irq_domain *domain; /* GPU irq controller domain */ 97 struct irq_domain *domain; /* GPU irq controller domain */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..a7adb7b6bd98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -626,11 +626,71 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
626} 626}
627 627
628/** 628/**
629 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie 629 * DOC: ppfeatures
630 *
631 * The amdgpu driver provides a sysfs API for adjusting what powerplay
632 * features to be enabled. The file ppfeatures is used for this. And
633 * this is only available for Vega10 and later dGPUs.
634 *
635 * Reading back the file will show you the followings:
636 * - Current ppfeature masks
637 * - List of the all supported powerplay features with their naming,
638 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
639 *
640 * To manually enable or disable a specific feature, just set or clear
641 * the corresponding bit from original ppfeature masks and input the
642 * new ppfeature masks.
643 */
644static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
645 struct device_attribute *attr,
646 const char *buf,
647 size_t count)
648{
649 struct drm_device *ddev = dev_get_drvdata(dev);
650 struct amdgpu_device *adev = ddev->dev_private;
651 uint64_t featuremask;
652 int ret;
653
654 ret = kstrtou64(buf, 0, &featuremask);
655 if (ret)
656 return -EINVAL;
657
658 pr_debug("featuremask = 0x%llx\n", featuremask);
659
660 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
661 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
662 if (ret)
663 return -EINVAL;
664 }
665
666 return count;
667}
668
669static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
670 struct device_attribute *attr,
671 char *buf)
672{
673 struct drm_device *ddev = dev_get_drvdata(dev);
674 struct amdgpu_device *adev = ddev->dev_private;
675
676 if (adev->powerplay.pp_funcs->get_ppfeature_status)
677 return amdgpu_dpm_get_ppfeature_status(adev, buf);
678
679 return snprintf(buf, PAGE_SIZE, "\n");
680}
681
682/**
683 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
684 * pp_dpm_pcie
630 * 685 *
631 * The amdgpu driver provides a sysfs API for adjusting what power levels 686 * The amdgpu driver provides a sysfs API for adjusting what power levels
632 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 687 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
633 * and pp_dpm_pcie are used for this. 688 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
689 * this.
690 *
691 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
692 * Vega10 and later ASICs.
693 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
634 * 694 *
635 * Reading back the files will show you the available power levels within 695 * Reading back the files will show you the available power levels within
636 * the power state and the clock information for those levels. 696 * the power state and the clock information for those levels.
@@ -640,6 +700,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
640 * Secondly,Enter a new value for each level by inputing a string that 700 * Secondly,Enter a new value for each level by inputing a string that
641 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 701 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
642 * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. 702 * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
703 *
704 * NOTE: change to the dcefclk max dpm level is not supported now
643 */ 705 */
644 706
645static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 707static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
@@ -750,6 +812,114 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
750 return count; 812 return count;
751} 813}
752 814
815static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
816 struct device_attribute *attr,
817 char *buf)
818{
819 struct drm_device *ddev = dev_get_drvdata(dev);
820 struct amdgpu_device *adev = ddev->dev_private;
821
822 if (adev->powerplay.pp_funcs->print_clock_levels)
823 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
824 else
825 return snprintf(buf, PAGE_SIZE, "\n");
826}
827
828static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
829 struct device_attribute *attr,
830 const char *buf,
831 size_t count)
832{
833 struct drm_device *ddev = dev_get_drvdata(dev);
834 struct amdgpu_device *adev = ddev->dev_private;
835 int ret;
836 uint32_t mask = 0;
837
838 ret = amdgpu_read_mask(buf, count, &mask);
839 if (ret)
840 return ret;
841
842 if (adev->powerplay.pp_funcs->force_clock_level)
843 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
844
845 if (ret)
846 return -EINVAL;
847
848 return count;
849}
850
851static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
852 struct device_attribute *attr,
853 char *buf)
854{
855 struct drm_device *ddev = dev_get_drvdata(dev);
856 struct amdgpu_device *adev = ddev->dev_private;
857
858 if (adev->powerplay.pp_funcs->print_clock_levels)
859 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
860 else
861 return snprintf(buf, PAGE_SIZE, "\n");
862}
863
864static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
865 struct device_attribute *attr,
866 const char *buf,
867 size_t count)
868{
869 struct drm_device *ddev = dev_get_drvdata(dev);
870 struct amdgpu_device *adev = ddev->dev_private;
871 int ret;
872 uint32_t mask = 0;
873
874 ret = amdgpu_read_mask(buf, count, &mask);
875 if (ret)
876 return ret;
877
878 if (adev->powerplay.pp_funcs->force_clock_level)
879 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
880
881 if (ret)
882 return -EINVAL;
883
884 return count;
885}
886
887static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
888 struct device_attribute *attr,
889 char *buf)
890{
891 struct drm_device *ddev = dev_get_drvdata(dev);
892 struct amdgpu_device *adev = ddev->dev_private;
893
894 if (adev->powerplay.pp_funcs->print_clock_levels)
895 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
896 else
897 return snprintf(buf, PAGE_SIZE, "\n");
898}
899
900static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
901 struct device_attribute *attr,
902 const char *buf,
903 size_t count)
904{
905 struct drm_device *ddev = dev_get_drvdata(dev);
906 struct amdgpu_device *adev = ddev->dev_private;
907 int ret;
908 uint32_t mask = 0;
909
910 ret = amdgpu_read_mask(buf, count, &mask);
911 if (ret)
912 return ret;
913
914 if (adev->powerplay.pp_funcs->force_clock_level)
915 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
916
917 if (ret)
918 return -EINVAL;
919
920 return count;
921}
922
753static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 923static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
754 struct device_attribute *attr, 924 struct device_attribute *attr,
755 char *buf) 925 char *buf)
@@ -990,6 +1160,31 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
990 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1160 return snprintf(buf, PAGE_SIZE, "%d\n", value);
991} 1161}
992 1162
1163/**
1164 * DOC: pcie_bw
1165 *
1166 * The amdgpu driver provides a sysfs API for estimating how much data
1167 * has been received and sent by the GPU in the last second through PCIe.
1168 * The file pcie_bw is used for this.
1169 * The Perf counters count the number of received and sent messages and return
1170 * those values, as well as the maximum payload size of a PCIe packet (mps).
1171 * Note that it is not possible to easily and quickly obtain the size of each
1172 * packet transmitted, so we output the max payload size (mps) to allow for
1173 * quick estimation of the PCIe bandwidth usage
1174 */
1175static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1176 struct device_attribute *attr,
1177 char *buf)
1178{
1179 struct drm_device *ddev = dev_get_drvdata(dev);
1180 struct amdgpu_device *adev = ddev->dev_private;
1181 uint64_t count0, count1;
1182
1183 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1184 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1185 count0, count1, pcie_get_mps(adev->pdev));
1186}
1187
993static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 1188static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
994static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 1189static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
995 amdgpu_get_dpm_forced_performance_level, 1190 amdgpu_get_dpm_forced_performance_level,
@@ -1008,6 +1203,15 @@ static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1008static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 1203static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1009 amdgpu_get_pp_dpm_mclk, 1204 amdgpu_get_pp_dpm_mclk,
1010 amdgpu_set_pp_dpm_mclk); 1205 amdgpu_set_pp_dpm_mclk);
1206static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1207 amdgpu_get_pp_dpm_socclk,
1208 amdgpu_set_pp_dpm_socclk);
1209static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1210 amdgpu_get_pp_dpm_fclk,
1211 amdgpu_set_pp_dpm_fclk);
1212static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1213 amdgpu_get_pp_dpm_dcefclk,
1214 amdgpu_set_pp_dpm_dcefclk);
1011static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 1215static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1012 amdgpu_get_pp_dpm_pcie, 1216 amdgpu_get_pp_dpm_pcie,
1013 amdgpu_set_pp_dpm_pcie); 1217 amdgpu_set_pp_dpm_pcie);
@@ -1025,6 +1229,10 @@ static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1025 amdgpu_set_pp_od_clk_voltage); 1229 amdgpu_set_pp_od_clk_voltage);
1026static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, 1230static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1027 amdgpu_get_busy_percent, NULL); 1231 amdgpu_get_busy_percent, NULL);
1232static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1233static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
1234 amdgpu_get_ppfeature_status,
1235 amdgpu_set_ppfeature_status);
1028 1236
1029static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1237static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1030 struct device_attribute *attr, 1238 struct device_attribute *attr,
@@ -1516,6 +1724,75 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1516 return count; 1724 return count;
1517} 1725}
1518 1726
1727static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
1728 struct device_attribute *attr,
1729 char *buf)
1730{
1731 struct amdgpu_device *adev = dev_get_drvdata(dev);
1732 struct drm_device *ddev = adev->ddev;
1733 uint32_t sclk;
1734 int r, size = sizeof(sclk);
1735
1736 /* Can't get voltage when the card is off */
1737 if ((adev->flags & AMD_IS_PX) &&
1738 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1739 return -EINVAL;
1740
1741 /* sanity check PP is enabled */
1742 if (!(adev->powerplay.pp_funcs &&
1743 adev->powerplay.pp_funcs->read_sensor))
1744 return -EINVAL;
1745
1746 /* get the sclk */
1747 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
1748 (void *)&sclk, &size);
1749 if (r)
1750 return r;
1751
1752 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
1753}
1754
1755static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
1756 struct device_attribute *attr,
1757 char *buf)
1758{
1759 return snprintf(buf, PAGE_SIZE, "sclk\n");
1760}
1761
1762static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
1763 struct device_attribute *attr,
1764 char *buf)
1765{
1766 struct amdgpu_device *adev = dev_get_drvdata(dev);
1767 struct drm_device *ddev = adev->ddev;
1768 uint32_t mclk;
1769 int r, size = sizeof(mclk);
1770
1771 /* Can't get voltage when the card is off */
1772 if ((adev->flags & AMD_IS_PX) &&
1773 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1774 return -EINVAL;
1775
1776 /* sanity check PP is enabled */
1777 if (!(adev->powerplay.pp_funcs &&
1778 adev->powerplay.pp_funcs->read_sensor))
1779 return -EINVAL;
1780
1781 /* get the sclk */
1782 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
1783 (void *)&mclk, &size);
1784 if (r)
1785 return r;
1786
1787 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
1788}
1789
1790static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
1791 struct device_attribute *attr,
1792 char *buf)
1793{
1794 return snprintf(buf, PAGE_SIZE, "mclk\n");
1795}
1519 1796
1520/** 1797/**
1521 * DOC: hwmon 1798 * DOC: hwmon
@@ -1532,6 +1809,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1532 * 1809 *
1533 * - GPU fan 1810 * - GPU fan
1534 * 1811 *
1812 * - GPU gfx/compute engine clock
1813 *
1814 * - GPU memory clock (dGPU only)
1815 *
1535 * hwmon interfaces for GPU temperature: 1816 * hwmon interfaces for GPU temperature:
1536 * 1817 *
1537 * - temp1_input: the on die GPU temperature in millidegrees Celsius 1818 * - temp1_input: the on die GPU temperature in millidegrees Celsius
@@ -1576,6 +1857,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1576 * 1857 *
1577 * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable 1858 * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
1578 * 1859 *
1860 * hwmon interfaces for GPU clocks:
1861 *
1862 * - freq1_input: the gfx/compute clock in hertz
1863 *
1864 * - freq2_input: the memory clock in hertz
1865 *
1579 * You can use hwmon tools like sensors to view this information on your system. 1866 * You can use hwmon tools like sensors to view this information on your system.
1580 * 1867 *
1581 */ 1868 */
@@ -1600,6 +1887,10 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
1600static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 1887static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
1601static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 1888static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
1602static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 1889static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
1890static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
1891static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
1892static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
1893static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
1603 1894
1604static struct attribute *hwmon_attributes[] = { 1895static struct attribute *hwmon_attributes[] = {
1605 &sensor_dev_attr_temp1_input.dev_attr.attr, 1896 &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -1622,6 +1913,10 @@ static struct attribute *hwmon_attributes[] = {
1622 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 1913 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
1623 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 1914 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
1624 &sensor_dev_attr_power1_cap.dev_attr.attr, 1915 &sensor_dev_attr_power1_cap.dev_attr.attr,
1916 &sensor_dev_attr_freq1_input.dev_attr.attr,
1917 &sensor_dev_attr_freq1_label.dev_attr.attr,
1918 &sensor_dev_attr_freq2_input.dev_attr.attr,
1919 &sensor_dev_attr_freq2_label.dev_attr.attr,
1625 NULL 1920 NULL
1626}; 1921};
1627 1922
@@ -1686,7 +1981,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1686 effective_mode &= ~S_IWUSR; 1981 effective_mode &= ~S_IWUSR;
1687 1982
1688 if ((adev->flags & AMD_IS_APU) && 1983 if ((adev->flags & AMD_IS_APU) &&
1689 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1984 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1985 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1986 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1691 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1987 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1692 return 0; 1988 return 0;
@@ -1712,6 +2008,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1712 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 2008 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
1713 return 0; 2009 return 0;
1714 2010
2011 /* no mclk on APUs */
2012 if ((adev->flags & AMD_IS_APU) &&
2013 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2014 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2015 return 0;
2016
1715 return effective_mode; 2017 return effective_mode;
1716} 2018}
1717 2019
@@ -2008,6 +2310,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2008 2310
2009int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2311int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2010{ 2312{
2313 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2011 int ret; 2314 int ret;
2012 2315
2013 if (adev->pm.sysfs_initialized) 2316 if (adev->pm.sysfs_initialized)
@@ -2069,6 +2372,25 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2069 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 2372 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2070 return ret; 2373 return ret;
2071 } 2374 }
2375 if (adev->asic_type >= CHIP_VEGA10) {
2376 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
2377 if (ret) {
2378 DRM_ERROR("failed to create device file pp_dpm_socclk\n");
2379 return ret;
2380 }
2381 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2382 if (ret) {
2383 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
2384 return ret;
2385 }
2386 }
2387 if (adev->asic_type >= CHIP_VEGA20) {
2388 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
2389 if (ret) {
2390 DRM_ERROR("failed to create device file pp_dpm_fclk\n");
2391 return ret;
2392 }
2393 }
2072 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 2394 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
2073 if (ret) { 2395 if (ret) {
2074 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 2396 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
@@ -2091,12 +2413,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2091 "pp_power_profile_mode\n"); 2413 "pp_power_profile_mode\n");
2092 return ret; 2414 return ret;
2093 } 2415 }
2094 ret = device_create_file(adev->dev, 2416 if (hwmgr->od_enabled) {
2095 &dev_attr_pp_od_clk_voltage); 2417 ret = device_create_file(adev->dev,
2096 if (ret) { 2418 &dev_attr_pp_od_clk_voltage);
2097 DRM_ERROR("failed to create device file " 2419 if (ret) {
2098 "pp_od_clk_voltage\n"); 2420 DRM_ERROR("failed to create device file "
2099 return ret; 2421 "pp_od_clk_voltage\n");
2422 return ret;
2423 }
2100 } 2424 }
2101 ret = device_create_file(adev->dev, 2425 ret = device_create_file(adev->dev,
2102 &dev_attr_gpu_busy_percent); 2426 &dev_attr_gpu_busy_percent);
@@ -2105,12 +2429,31 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2105 "gpu_busy_level\n"); 2429 "gpu_busy_level\n");
2106 return ret; 2430 return ret;
2107 } 2431 }
2432 /* PCIe Perf counters won't work on APU nodes */
2433 if (!(adev->flags & AMD_IS_APU)) {
2434 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
2435 if (ret) {
2436 DRM_ERROR("failed to create device file pcie_bw\n");
2437 return ret;
2438 }
2439 }
2108 ret = amdgpu_debugfs_pm_init(adev); 2440 ret = amdgpu_debugfs_pm_init(adev);
2109 if (ret) { 2441 if (ret) {
2110 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 2442 DRM_ERROR("Failed to register debugfs file for dpm!\n");
2111 return ret; 2443 return ret;
2112 } 2444 }
2113 2445
2446 if ((adev->asic_type >= CHIP_VEGA10) &&
2447 !(adev->flags & AMD_IS_APU)) {
2448 ret = device_create_file(adev->dev,
2449 &dev_attr_ppfeatures);
2450 if (ret) {
2451 DRM_ERROR("failed to create device file "
2452 "ppfeatures\n");
2453 return ret;
2454 }
2455 }
2456
2114 adev->pm.sysfs_initialized = true; 2457 adev->pm.sysfs_initialized = true;
2115 2458
2116 return 0; 2459 return 0;
@@ -2118,6 +2461,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2118 2461
2119void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2462void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2120{ 2463{
2464 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2465
2121 if (adev->pm.dpm_enabled == 0) 2466 if (adev->pm.dpm_enabled == 0)
2122 return; 2467 return;
2123 2468
@@ -2133,14 +2478,26 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2133 2478
2134 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 2479 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2135 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 2480 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2481 if (adev->asic_type >= CHIP_VEGA10) {
2482 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
2483 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2484 }
2136 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 2485 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
2486 if (adev->asic_type >= CHIP_VEGA20)
2487 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
2137 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 2488 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
2138 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2489 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2139 device_remove_file(adev->dev, 2490 device_remove_file(adev->dev,
2140 &dev_attr_pp_power_profile_mode); 2491 &dev_attr_pp_power_profile_mode);
2141 device_remove_file(adev->dev, 2492 if (hwmgr->od_enabled)
2142 &dev_attr_pp_od_clk_voltage); 2493 device_remove_file(adev->dev,
2494 &dev_attr_pp_od_clk_voltage);
2143 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2495 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2496 if (!(adev->flags & AMD_IS_APU))
2497 device_remove_file(adev->dev, &dev_attr_pcie_bw);
2498 if ((adev->asic_type >= CHIP_VEGA10) &&
2499 !(adev->flags & AMD_IS_APU))
2500 device_remove_file(adev->dev, &dev_attr_ppfeatures);
2144} 2501}
2145 2502
2146void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 2503void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fab0d637ee5..d87f165e3a23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -67,9 +67,6 @@ static int psp_sw_init(void *handle)
67 67
68 psp->adev = adev; 68 psp->adev = adev;
69 69
70 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
71 return 0;
72
73 ret = psp_init_microcode(psp); 70 ret = psp_init_microcode(psp);
74 if (ret) { 71 if (ret) {
75 DRM_ERROR("Failed to load psp firmware!\n"); 72 DRM_ERROR("Failed to load psp firmware!\n");
@@ -83,9 +80,6 @@ static int psp_sw_fini(void *handle)
83{ 80{
84 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 82
86 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
87 return 0;
88
89 release_firmware(adev->psp.sos_fw); 83 release_firmware(adev->psp.sos_fw);
90 adev->psp.sos_fw = NULL; 84 adev->psp.sos_fw = NULL;
91 release_firmware(adev->psp.asd_fw); 85 release_firmware(adev->psp.asd_fw);
@@ -140,13 +134,24 @@ psp_cmd_submit_buf(struct psp_context *psp,
140 while (*((unsigned int *)psp->fence_buf) != index) 134 while (*((unsigned int *)psp->fence_buf) != index)
141 msleep(1); 135 msleep(1);
142 136
143 /* the status field must be 0 after FW is loaded */ 137 /* In some cases, psp response status is not 0 even there is no
144 if (ucode && psp->cmd_buf_mem->resp.status) { 138 * problem while the command is submitted. Some version of PSP FW
145 DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n", 139 * doesn't write 0 to that field.
146 psp->cmd_buf_mem->resp.status, ucode->ucode_id); 140 * So here we would like to only print a warning instead of an error
147 return -EINVAL; 141 * during psp initialization to avoid breaking hw_init and it doesn't
142 * return -EINVAL.
143 */
144 if (psp->cmd_buf_mem->resp.status) {
145 if (ucode)
146 DRM_WARN("failed to load ucode id (%d) ",
147 ucode->ucode_id);
148 DRM_WARN("psp command failed and response status is (%d)\n",
149 psp->cmd_buf_mem->resp.status);
148 } 150 }
149 151
152 /* get xGMI session id from response buffer */
153 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
154
150 if (ucode) { 155 if (ucode) {
151 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 156 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
152 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 157 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
@@ -495,6 +500,98 @@ static int psp_hw_start(struct psp_context *psp)
495 return 0; 500 return 0;
496} 501}
497 502
503static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
504 enum psp_gfx_fw_type *type)
505{
506 switch (ucode->ucode_id) {
507 case AMDGPU_UCODE_ID_SDMA0:
508 *type = GFX_FW_TYPE_SDMA0;
509 break;
510 case AMDGPU_UCODE_ID_SDMA1:
511 *type = GFX_FW_TYPE_SDMA1;
512 break;
513 case AMDGPU_UCODE_ID_CP_CE:
514 *type = GFX_FW_TYPE_CP_CE;
515 break;
516 case AMDGPU_UCODE_ID_CP_PFP:
517 *type = GFX_FW_TYPE_CP_PFP;
518 break;
519 case AMDGPU_UCODE_ID_CP_ME:
520 *type = GFX_FW_TYPE_CP_ME;
521 break;
522 case AMDGPU_UCODE_ID_CP_MEC1:
523 *type = GFX_FW_TYPE_CP_MEC;
524 break;
525 case AMDGPU_UCODE_ID_CP_MEC1_JT:
526 *type = GFX_FW_TYPE_CP_MEC_ME1;
527 break;
528 case AMDGPU_UCODE_ID_CP_MEC2:
529 *type = GFX_FW_TYPE_CP_MEC;
530 break;
531 case AMDGPU_UCODE_ID_CP_MEC2_JT:
532 *type = GFX_FW_TYPE_CP_MEC_ME2;
533 break;
534 case AMDGPU_UCODE_ID_RLC_G:
535 *type = GFX_FW_TYPE_RLC_G;
536 break;
537 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
538 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
539 break;
540 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
541 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
542 break;
543 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
544 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
545 break;
546 case AMDGPU_UCODE_ID_SMC:
547 *type = GFX_FW_TYPE_SMU;
548 break;
549 case AMDGPU_UCODE_ID_UVD:
550 *type = GFX_FW_TYPE_UVD;
551 break;
552 case AMDGPU_UCODE_ID_UVD1:
553 *type = GFX_FW_TYPE_UVD1;
554 break;
555 case AMDGPU_UCODE_ID_VCE:
556 *type = GFX_FW_TYPE_VCE;
557 break;
558 case AMDGPU_UCODE_ID_VCN:
559 *type = GFX_FW_TYPE_VCN;
560 break;
561 case AMDGPU_UCODE_ID_DMCU_ERAM:
562 *type = GFX_FW_TYPE_DMCU_ERAM;
563 break;
564 case AMDGPU_UCODE_ID_DMCU_INTV:
565 *type = GFX_FW_TYPE_DMCU_ISR;
566 break;
567 case AMDGPU_UCODE_ID_MAXIMUM:
568 default:
569 return -EINVAL;
570 }
571
572 return 0;
573}
574
575static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
576 struct psp_gfx_cmd_resp *cmd)
577{
578 int ret;
579 uint64_t fw_mem_mc_addr = ucode->mc_addr;
580
581 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
582
583 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
584 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
585 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
586 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
587
588 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
589 if (ret)
590 DRM_ERROR("Unknown firmware type\n");
591
592 return ret;
593}
594
498static int psp_np_fw_load(struct psp_context *psp) 595static int psp_np_fw_load(struct psp_context *psp)
499{ 596{
500 int i, ret; 597 int i, ret;
@@ -516,7 +613,7 @@ static int psp_np_fw_load(struct psp_context *psp)
516 /*skip ucode loading in SRIOV VF */ 613 /*skip ucode loading in SRIOV VF */
517 continue; 614 continue;
518 615
519 ret = psp_prep_cmd_buf(ucode, psp->cmd); 616 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
520 if (ret) 617 if (ret)
521 return ret; 618 return ret;
522 619
@@ -541,7 +638,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
541 struct psp_context *psp = &adev->psp; 638 struct psp_context *psp = &adev->psp;
542 639
543 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 640 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
544 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 641 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
545 goto skip_memalloc; 642 goto skip_memalloc;
546 } 643 }
547 644
@@ -618,10 +715,6 @@ static int psp_hw_init(void *handle)
618 int ret; 715 int ret;
619 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 716 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
620 717
621
622 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
623 return 0;
624
625 mutex_lock(&adev->firmware.mutex); 718 mutex_lock(&adev->firmware.mutex);
626 /* 719 /*
627 * This sequence is just used on hw_init only once, no need on 720 * This sequence is just used on hw_init only once, no need on
@@ -651,9 +744,6 @@ static int psp_hw_fini(void *handle)
651 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
652 struct psp_context *psp = &adev->psp; 745 struct psp_context *psp = &adev->psp;
653 746
654 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
655 return 0;
656
657 if (adev->gmc.xgmi.num_physical_nodes > 1 && 747 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
658 psp->xgmi_context.initialized == 1) 748 psp->xgmi_context.initialized == 1)
659 psp_xgmi_terminate(psp); 749 psp_xgmi_terminate(psp);
@@ -682,9 +772,6 @@ static int psp_suspend(void *handle)
682 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 772 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
683 struct psp_context *psp = &adev->psp; 773 struct psp_context *psp = &adev->psp;
684 774
685 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
686 return 0;
687
688 if (adev->gmc.xgmi.num_physical_nodes > 1 && 775 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
689 psp->xgmi_context.initialized == 1) { 776 psp->xgmi_context.initialized == 1) {
690 ret = psp_xgmi_terminate(psp); 777 ret = psp_xgmi_terminate(psp);
@@ -709,9 +796,6 @@ static int psp_resume(void *handle)
709 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 796 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
710 struct psp_context *psp = &adev->psp; 797 struct psp_context *psp = &adev->psp;
711 798
712 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
713 return 0;
714
715 DRM_INFO("PSP is resuming...\n"); 799 DRM_INFO("PSP is resuming...\n");
716 800
717 mutex_lock(&adev->firmware.mutex); 801 mutex_lock(&adev->firmware.mutex);
@@ -747,11 +831,6 @@ static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
747{ 831{
748 struct amdgpu_firmware_info *ucode = NULL; 832 struct amdgpu_firmware_info *ucode = NULL;
749 833
750 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
751 DRM_INFO("firmware is not loaded by PSP\n");
752 return true;
753 }
754
755 if (!adev->firmware.fw_size) 834 if (!adev->firmware.fw_size)
756 return false; 835 return false;
757 836
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3ee573b4016e..2ef98cc755d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -65,8 +65,6 @@ struct psp_funcs
65 int (*init_microcode)(struct psp_context *psp); 65 int (*init_microcode)(struct psp_context *psp);
66 int (*bootloader_load_sysdrv)(struct psp_context *psp); 66 int (*bootloader_load_sysdrv)(struct psp_context *psp);
67 int (*bootloader_load_sos)(struct psp_context *psp); 67 int (*bootloader_load_sos)(struct psp_context *psp);
68 int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
69 struct psp_gfx_cmd_resp *cmd);
70 int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); 68 int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
71 int (*ring_create)(struct psp_context *psp, 69 int (*ring_create)(struct psp_context *psp,
72 enum psp_ring_type ring_type); 70 enum psp_ring_type ring_type);
@@ -176,7 +174,6 @@ struct psp_xgmi_topology_info {
176 struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; 174 struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
177}; 175};
178 176
179#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
180#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type)) 177#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
181#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) 178#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
182#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) 179#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index d87e828a084b..d7fae2676269 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -131,7 +131,7 @@ struct amdgpu_ring_funcs {
131 void (*emit_ib)(struct amdgpu_ring *ring, 131 void (*emit_ib)(struct amdgpu_ring *ring,
132 struct amdgpu_job *job, 132 struct amdgpu_job *job,
133 struct amdgpu_ib *ib, 133 struct amdgpu_ib *ib,
134 bool ctx_switch); 134 uint32_t flags);
135 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 135 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
136 uint64_t seq, unsigned flags); 136 uint64_t seq, unsigned flags);
137 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 137 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -229,7 +229,7 @@ struct amdgpu_ring {
229#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 229#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
230#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 230#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
231#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 231#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
232#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c))) 232#define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
233#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 233#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
234#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 234#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
235#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 235#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 626abca770a0..d3ca2424b5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -76,9 +76,10 @@ TRACE_EVENT(amdgpu_mm_wreg,
76); 76);
77 77
78TRACE_EVENT(amdgpu_iv, 78TRACE_EVENT(amdgpu_iv,
79 TP_PROTO(struct amdgpu_iv_entry *iv), 79 TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv),
80 TP_ARGS(iv), 80 TP_ARGS(ih, iv),
81 TP_STRUCT__entry( 81 TP_STRUCT__entry(
82 __field(unsigned, ih)
82 __field(unsigned, client_id) 83 __field(unsigned, client_id)
83 __field(unsigned, src_id) 84 __field(unsigned, src_id)
84 __field(unsigned, ring_id) 85 __field(unsigned, ring_id)
@@ -90,6 +91,7 @@ TRACE_EVENT(amdgpu_iv,
90 __array(unsigned, src_data, 4) 91 __array(unsigned, src_data, 4)
91 ), 92 ),
92 TP_fast_assign( 93 TP_fast_assign(
94 __entry->ih = ih;
93 __entry->client_id = iv->client_id; 95 __entry->client_id = iv->client_id;
94 __entry->src_id = iv->src_id; 96 __entry->src_id = iv->src_id;
95 __entry->ring_id = iv->ring_id; 97 __entry->ring_id = iv->ring_id;
@@ -103,8 +105,9 @@ TRACE_EVENT(amdgpu_iv,
103 __entry->src_data[2] = iv->src_data[2]; 105 __entry->src_data[2] = iv->src_data[2];
104 __entry->src_data[3] = iv->src_data[3]; 106 __entry->src_data[3] = iv->src_data[3];
105 ), 107 ),
106 TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x", 108 TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u "
107 __entry->client_id, __entry->src_id, 109 "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
110 __entry->ih, __entry->client_id, __entry->src_id,
108 __entry->ring_id, __entry->vmid, 111 __entry->ring_id, __entry->vmid,
109 __entry->timestamp, __entry->pasid, 112 __entry->timestamp, __entry->pasid,
110 __entry->src_data[0], __entry->src_data[1], 113 __entry->src_data[0], __entry->src_data[1],
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c91ec3101d00..b852abb9db0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1546,7 +1546,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
1546 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1546 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1547 .io_mem_free = &amdgpu_ttm_io_mem_free, 1547 .io_mem_free = &amdgpu_ttm_io_mem_free,
1548 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1548 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1549 .access_memory = &amdgpu_ttm_access_memory 1549 .access_memory = &amdgpu_ttm_access_memory,
1550 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1550}; 1551};
1551 1552
1552/* 1553/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 98a1b2ce2b9d..c021b114c8a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1035,7 +1035,7 @@ out:
1035void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, 1035void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1036 struct amdgpu_job *job, 1036 struct amdgpu_job *job,
1037 struct amdgpu_ib *ib, 1037 struct amdgpu_ib *ib,
1038 bool ctx_switch) 1038 uint32_t flags)
1039{ 1039{
1040 amdgpu_ring_write(ring, VCE_CMD_IB); 1040 amdgpu_ring_write(ring, VCE_CMD_IB);
1041 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1041 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 50293652af14..30ea54dd9117 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -66,7 +66,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
66int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 66int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
67int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); 67int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
68void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, 68void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
69 struct amdgpu_ib *ib, bool ctx_switch); 69 struct amdgpu_ib *ib, uint32_t flags);
70void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 70void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
71 unsigned flags); 71 unsigned flags);
72int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); 72int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e73d152659a2..0bc6f553dc08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -623,6 +623,28 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
623 list_add(&entry->tv.head, validated); 623 list_add(&entry->tv.head, validated);
624} 624}
625 625
626void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
627{
628 struct amdgpu_bo *abo;
629 struct amdgpu_vm_bo_base *bo_base;
630
631 if (!amdgpu_bo_is_amdgpu_bo(bo))
632 return;
633
634 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
635 return;
636
637 abo = ttm_to_amdgpu_bo(bo);
638 if (!abo->parent)
639 return;
640 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
641 struct amdgpu_vm *vm = bo_base->vm;
642
643 if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
644 vm->bulk_moveable = false;
645 }
646
647}
626/** 648/**
627 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 649 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
628 * 650 *
@@ -799,9 +821,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
799 addr += ats_entries * 8; 821 addr += ats_entries * 8;
800 } 822 }
801 823
802 if (entries) 824 if (entries) {
825 uint64_t value = 0;
826
827 /* Workaround for fault priority problem on GMC9 */
828 if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
829 value = AMDGPU_PTE_EXECUTABLE;
830
803 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, 831 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
804 entries, 0, 0); 832 entries, 0, value);
833 }
805 834
806 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 835 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
807 836
@@ -847,9 +876,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
847 bp->size = amdgpu_vm_bo_size(adev, level); 876 bp->size = amdgpu_vm_bo_size(adev, level);
848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 877 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 878 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
850 if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
851 adev->flags & AMD_IS_APU)
852 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
853 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 879 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
854 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 880 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
855 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 881 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -1506,20 +1532,27 @@ error:
1506} 1532}
1507 1533
1508/** 1534/**
1509 * amdgpu_vm_update_huge - figure out parameters for PTE updates 1535 * amdgpu_vm_update_flags - figure out flags for PTE updates
1510 * 1536 *
1511 * Make sure to set the right flags for the PTEs at the desired level. 1537 * Make sure to set the right flags for the PTEs at the desired level.
1512 */ 1538 */
1513static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params, 1539static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
1514 struct amdgpu_bo *bo, unsigned level, 1540 struct amdgpu_bo *bo, unsigned level,
1515 uint64_t pe, uint64_t addr, 1541 uint64_t pe, uint64_t addr,
1516 unsigned count, uint32_t incr, 1542 unsigned count, uint32_t incr,
1517 uint64_t flags) 1543 uint64_t flags)
1518 1544
1519{ 1545{
1520 if (level != AMDGPU_VM_PTB) { 1546 if (level != AMDGPU_VM_PTB) {
1521 flags |= AMDGPU_PDE_PTE; 1547 flags |= AMDGPU_PDE_PTE;
1522 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); 1548 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1549
1550 } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1551 !(flags & AMDGPU_PTE_VALID) &&
1552 !(flags & AMDGPU_PTE_PRT)) {
1553
1554 /* Workaround for fault priority problem on GMC9 */
1555 flags |= AMDGPU_PTE_EXECUTABLE;
1523 } 1556 }
1524 1557
1525 amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags); 1558 amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
@@ -1676,9 +1709,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1676 uint64_t upd_end = min(entry_end, frag_end); 1709 uint64_t upd_end = min(entry_end, frag_end);
1677 unsigned nptes = (upd_end - frag_start) >> shift; 1710 unsigned nptes = (upd_end - frag_start) >> shift;
1678 1711
1679 amdgpu_vm_update_huge(params, pt, cursor.level, 1712 amdgpu_vm_update_flags(params, pt, cursor.level,
1680 pe_start, dst, nptes, incr, 1713 pe_start, dst, nptes, incr,
1681 flags | AMDGPU_PTE_FRAG(frag)); 1714 flags | AMDGPU_PTE_FRAG(frag));
1682 1715
1683 pe_start += nptes * 8; 1716 pe_start += nptes * 8;
1684 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1717 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
@@ -3006,7 +3039,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
3006 } 3039 }
3007 DRM_DEBUG_DRIVER("VM update mode is %s\n", 3040 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3008 vm->use_cpu_for_update ? "CPU" : "SDMA"); 3041 vm->use_cpu_for_update ? "CPU" : "SDMA");
3009 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)), 3042 WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3010 "CPU update of VM recommended only for large BAR system\n"); 3043 "CPU update of VM recommended only for large BAR system\n");
3011 vm->last_update = NULL; 3044 vm->last_update = NULL;
3012 3045
@@ -3136,7 +3169,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
3136 vm->pte_support_ats = pte_support_ats; 3169 vm->pte_support_ats = pte_support_ats;
3137 DRM_DEBUG_DRIVER("VM update mode is %s\n", 3170 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3138 vm->use_cpu_for_update ? "CPU" : "SDMA"); 3171 vm->use_cpu_for_update ? "CPU" : "SDMA");
3139 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)), 3172 WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3140 "CPU update of VM recommended only for large BAR system\n"); 3173 "CPU update of VM recommended only for large BAR system\n");
3141 3174
3142 if (vm->pasid) { 3175 if (vm->pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index e8dcfd59fc93..81ff8177f092 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -363,4 +363,6 @@ int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
363 363
364void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key); 364void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
365 365
366void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
367
366#endif 368#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 8a8bc60cb6b4..dac187454b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -40,26 +40,40 @@ void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
40 return &hive->device_list; 40 return &hive->device_list;
41} 41}
42 42
43struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) 43struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
44{ 44{
45 int i; 45 int i;
46 struct amdgpu_hive_info *tmp; 46 struct amdgpu_hive_info *tmp;
47 47
48 if (!adev->gmc.xgmi.hive_id) 48 if (!adev->gmc.xgmi.hive_id)
49 return NULL; 49 return NULL;
50
51 mutex_lock(&xgmi_mutex);
52
50 for (i = 0 ; i < hive_count; ++i) { 53 for (i = 0 ; i < hive_count; ++i) {
51 tmp = &xgmi_hives[i]; 54 tmp = &xgmi_hives[i];
52 if (tmp->hive_id == adev->gmc.xgmi.hive_id) 55 if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
56 if (lock)
57 mutex_lock(&tmp->hive_lock);
58 mutex_unlock(&xgmi_mutex);
53 return tmp; 59 return tmp;
60 }
54 } 61 }
55 if (i >= AMDGPU_MAX_XGMI_HIVE) 62 if (i >= AMDGPU_MAX_XGMI_HIVE) {
63 mutex_unlock(&xgmi_mutex);
56 return NULL; 64 return NULL;
65 }
57 66
58 /* initialize new hive if not exist */ 67 /* initialize new hive if not exist */
59 tmp = &xgmi_hives[hive_count++]; 68 tmp = &xgmi_hives[hive_count++];
60 tmp->hive_id = adev->gmc.xgmi.hive_id; 69 tmp->hive_id = adev->gmc.xgmi.hive_id;
61 INIT_LIST_HEAD(&tmp->device_list); 70 INIT_LIST_HEAD(&tmp->device_list);
62 mutex_init(&tmp->hive_lock); 71 mutex_init(&tmp->hive_lock);
72 mutex_init(&tmp->reset_lock);
73 if (lock)
74 mutex_lock(&tmp->hive_lock);
75
76 mutex_unlock(&xgmi_mutex);
63 77
64 return tmp; 78 return tmp;
65} 79}
@@ -111,10 +125,14 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
111 return ret; 125 return ret;
112 } 126 }
113 127
114 mutex_lock(&xgmi_mutex); 128 hive = amdgpu_get_xgmi_hive(adev, 1);
115 hive = amdgpu_get_xgmi_hive(adev); 129 if (!hive) {
116 if (!hive) 130 ret = -EINVAL;
131 dev_err(adev->dev,
132 "XGMI: node 0x%llx, can not matech hive 0x%llx in the hive list.\n",
133 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
117 goto exit; 134 goto exit;
135 }
118 136
119 hive_topology = &hive->topology_info; 137 hive_topology = &hive->topology_info;
120 138
@@ -142,8 +160,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
142 break; 160 break;
143 } 161 }
144 162
163 mutex_unlock(&hive->hive_lock);
145exit: 164exit:
146 mutex_unlock(&xgmi_mutex);
147 return ret; 165 return ret;
148} 166}
149 167
@@ -154,15 +172,14 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
154 if (!adev->gmc.xgmi.supported) 172 if (!adev->gmc.xgmi.supported)
155 return; 173 return;
156 174
157 mutex_lock(&xgmi_mutex); 175 hive = amdgpu_get_xgmi_hive(adev, 1);
158
159 hive = amdgpu_get_xgmi_hive(adev);
160 if (!hive) 176 if (!hive)
161 goto exit; 177 return;
162 178
163 if (!(hive->number_devices--)) 179 if (!(hive->number_devices--)) {
164 mutex_destroy(&hive->hive_lock); 180 mutex_destroy(&hive->hive_lock);
165 181 mutex_destroy(&hive->reset_lock);
166exit: 182 } else {
167 mutex_unlock(&xgmi_mutex); 183 mutex_unlock(&hive->hive_lock);
184 }
168} 185}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6151eb9c8ad3..14bc60664159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -29,10 +29,11 @@ struct amdgpu_hive_info {
29 struct list_head device_list; 29 struct list_head device_list;
30 struct psp_xgmi_topology_info topology_info; 30 struct psp_xgmi_topology_info topology_info;
31 int number_devices; 31 int number_devices;
32 struct mutex hive_lock; 32 struct mutex hive_lock,
33 reset_lock;
33}; 34};
34 35
35struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); 36struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
36int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); 37int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
37int amdgpu_xgmi_add_device(struct amdgpu_device *adev); 38int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
38void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); 39void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
deleted file mode 100644
index 86e14c754dd4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ /dev/null
@@ -1,6844 +0,0 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
34#include "amd_pcie.h"
35#include <linux/seq_file.h>
36
37#include "smu/smu_7_0_1_d.h"
38#include "smu/smu_7_0_1_sh_mask.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "bif/bif_4_1_d.h"
44#include "bif/bif_4_1_sh_mask.h"
45
46#include "gca/gfx_7_2_d.h"
47#include "gca/gfx_7_2_sh_mask.h"
48
49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h"
51
52MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
53MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
54MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
55MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
56
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68static const struct amd_pm_funcs ci_dpm_funcs;
69
70static const struct ci_pt_defaults defaults_hawaii_xt =
71{
72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
73 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
74 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
75};
76
77static const struct ci_pt_defaults defaults_hawaii_pro =
78{
79 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
80 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
81 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
82};
83
84static const struct ci_pt_defaults defaults_bonaire_xt =
85{
86 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
87 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
88 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
89};
90
91#if 0
92static const struct ci_pt_defaults defaults_bonaire_pro =
93{
94 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
95 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
96 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
97};
98#endif
99
100static const struct ci_pt_defaults defaults_saturn_xt =
101{
102 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
103 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
104 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
105};
106
107#if 0
108static const struct ci_pt_defaults defaults_saturn_pro =
109{
110 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
111 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
112 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
113};
114#endif
115
116static const struct ci_pt_config_reg didt_config_ci[] =
117{
118 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
188 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
189 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
190 { 0xFFFFFFFF }
191};
192
193static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
194{
195 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
196}
197
198#define MC_CG_ARB_FREQ_F0 0x0a
199#define MC_CG_ARB_FREQ_F1 0x0b
200#define MC_CG_ARB_FREQ_F2 0x0c
201#define MC_CG_ARB_FREQ_F3 0x0d
202
203static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
204 u32 arb_freq_src, u32 arb_freq_dest)
205{
206 u32 mc_arb_dram_timing;
207 u32 mc_arb_dram_timing2;
208 u32 burst_time;
209 u32 mc_cg_config;
210
211 switch (arb_freq_src) {
212 case MC_CG_ARB_FREQ_F0:
213 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
214 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
215 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
216 MC_ARB_BURST_TIME__STATE0__SHIFT;
217 break;
218 case MC_CG_ARB_FREQ_F1:
219 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
220 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
221 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
222 MC_ARB_BURST_TIME__STATE1__SHIFT;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 switch (arb_freq_dest) {
229 case MC_CG_ARB_FREQ_F0:
230 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
231 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
232 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
233 ~MC_ARB_BURST_TIME__STATE0_MASK);
234 break;
235 case MC_CG_ARB_FREQ_F1:
236 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
237 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
238 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
239 ~MC_ARB_BURST_TIME__STATE1_MASK);
240 break;
241 default:
242 return -EINVAL;
243 }
244
245 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
246 WREG32(mmMC_CG_CONFIG, mc_cg_config);
247 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
248 ~MC_ARB_CG__CG_ARB_REQ_MASK);
249
250 return 0;
251}
252
253static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
254{
255 u8 mc_para_index;
256
257 if (memory_clock < 10000)
258 mc_para_index = 0;
259 else if (memory_clock >= 80000)
260 mc_para_index = 0x0f;
261 else
262 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
263 return mc_para_index;
264}
265
266static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
267{
268 u8 mc_para_index;
269
270 if (strobe_mode) {
271 if (memory_clock < 12500)
272 mc_para_index = 0x00;
273 else if (memory_clock > 47500)
274 mc_para_index = 0x0f;
275 else
276 mc_para_index = (u8)((memory_clock - 10000) / 2500);
277 } else {
278 if (memory_clock < 65000)
279 mc_para_index = 0x00;
280 else if (memory_clock > 135000)
281 mc_para_index = 0x0f;
282 else
283 mc_para_index = (u8)((memory_clock - 60000) / 5000);
284 }
285 return mc_para_index;
286}
287
288static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
289 u32 max_voltage_steps,
290 struct atom_voltage_table *voltage_table)
291{
292 unsigned int i, diff;
293
294 if (voltage_table->count <= max_voltage_steps)
295 return;
296
297 diff = voltage_table->count - max_voltage_steps;
298
299 for (i = 0; i < max_voltage_steps; i++)
300 voltage_table->entries[i] = voltage_table->entries[i + diff];
301
302 voltage_table->count = max_voltage_steps;
303}
304
305static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
306 struct atom_voltage_table_entry *voltage_table,
307 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
308static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
309static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
310 u32 target_tdp);
311static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
312static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
313
314static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
315 PPSMC_Msg msg, u32 parameter);
316static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
317static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
318
319static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
320{
321 struct ci_power_info *pi = adev->pm.dpm.priv;
322
323 return pi;
324}
325
326static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
327{
328 struct ci_ps *ps = rps->ps_priv;
329
330 return ps;
331}
332
333static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
334{
335 struct ci_power_info *pi = ci_get_pi(adev);
336
337 switch (adev->pdev->device) {
338 case 0x6649:
339 case 0x6650:
340 case 0x6651:
341 case 0x6658:
342 case 0x665C:
343 case 0x665D:
344 default:
345 pi->powertune_defaults = &defaults_bonaire_xt;
346 break;
347 case 0x6640:
348 case 0x6641:
349 case 0x6646:
350 case 0x6647:
351 pi->powertune_defaults = &defaults_saturn_xt;
352 break;
353 case 0x67B8:
354 case 0x67B0:
355 pi->powertune_defaults = &defaults_hawaii_xt;
356 break;
357 case 0x67BA:
358 case 0x67B1:
359 pi->powertune_defaults = &defaults_hawaii_pro;
360 break;
361 case 0x67A0:
362 case 0x67A1:
363 case 0x67A2:
364 case 0x67A8:
365 case 0x67A9:
366 case 0x67AA:
367 case 0x67B9:
368 case 0x67BE:
369 pi->powertune_defaults = &defaults_bonaire_xt;
370 break;
371 }
372
373 pi->dte_tj_offset = 0;
374
375 pi->caps_power_containment = true;
376 pi->caps_cac = false;
377 pi->caps_sq_ramping = false;
378 pi->caps_db_ramping = false;
379 pi->caps_td_ramping = false;
380 pi->caps_tcp_ramping = false;
381
382 if (pi->caps_power_containment) {
383 pi->caps_cac = true;
384 if (adev->asic_type == CHIP_HAWAII)
385 pi->enable_bapm_feature = false;
386 else
387 pi->enable_bapm_feature = true;
388 pi->enable_tdc_limit_feature = true;
389 pi->enable_pkg_pwr_tracking_feature = true;
390 }
391}
392
393static u8 ci_convert_to_vid(u16 vddc)
394{
395 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
396}
397
398static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
399{
400 struct ci_power_info *pi = ci_get_pi(adev);
401 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
402 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
403 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
404 u32 i;
405
406 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
407 return -EINVAL;
408 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
409 return -EINVAL;
410 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
411 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
412 return -EINVAL;
413
414 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
415 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
416 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
417 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
418 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
419 } else {
420 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
421 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
422 }
423 }
424 return 0;
425}
426
427static int ci_populate_vddc_vid(struct amdgpu_device *adev)
428{
429 struct ci_power_info *pi = ci_get_pi(adev);
430 u8 *vid = pi->smc_powertune_table.VddCVid;
431 u32 i;
432
433 if (pi->vddc_voltage_table.count > 8)
434 return -EINVAL;
435
436 for (i = 0; i < pi->vddc_voltage_table.count; i++)
437 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
438
439 return 0;
440}
441
442static int ci_populate_svi_load_line(struct amdgpu_device *adev)
443{
444 struct ci_power_info *pi = ci_get_pi(adev);
445 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
446
447 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
448 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
449 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
450 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
451
452 return 0;
453}
454
455static int ci_populate_tdc_limit(struct amdgpu_device *adev)
456{
457 struct ci_power_info *pi = ci_get_pi(adev);
458 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
459 u16 tdc_limit;
460
461 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
462 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
463 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
464 pt_defaults->tdc_vddc_throttle_release_limit_perc;
465 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
466
467 return 0;
468}
469
470static int ci_populate_dw8(struct amdgpu_device *adev)
471{
472 struct ci_power_info *pi = ci_get_pi(adev);
473 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
474 int ret;
475
476 ret = amdgpu_ci_read_smc_sram_dword(adev,
477 SMU7_FIRMWARE_HEADER_LOCATION +
478 offsetof(SMU7_Firmware_Header, PmFuseTable) +
479 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
480 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
481 pi->sram_end);
482 if (ret)
483 return -EINVAL;
484 else
485 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
486
487 return 0;
488}
489
490static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
491{
492 struct ci_power_info *pi = ci_get_pi(adev);
493
494 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
495 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
496 adev->pm.dpm.fan.fan_output_sensitivity =
497 adev->pm.dpm.fan.default_fan_output_sensitivity;
498
499 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
500 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
501
502 return 0;
503}
504
505static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
506{
507 struct ci_power_info *pi = ci_get_pi(adev);
508 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
509 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
510 int i, min, max;
511
512 min = max = hi_vid[0];
513 for (i = 0; i < 8; i++) {
514 if (0 != hi_vid[i]) {
515 if (min > hi_vid[i])
516 min = hi_vid[i];
517 if (max < hi_vid[i])
518 max = hi_vid[i];
519 }
520
521 if (0 != lo_vid[i]) {
522 if (min > lo_vid[i])
523 min = lo_vid[i];
524 if (max < lo_vid[i])
525 max = lo_vid[i];
526 }
527 }
528
529 if ((min == 0) || (max == 0))
530 return -EINVAL;
531 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
532 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
533
534 return 0;
535}
536
537static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
538{
539 struct ci_power_info *pi = ci_get_pi(adev);
540 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
541 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
542 struct amdgpu_cac_tdp_table *cac_tdp_table =
543 adev->pm.dpm.dyn_state.cac_tdp_table;
544
545 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
546 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
547
548 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
549 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
550
551 return 0;
552}
553
554static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
555{
556 struct ci_power_info *pi = ci_get_pi(adev);
557 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
558 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
559 struct amdgpu_cac_tdp_table *cac_tdp_table =
560 adev->pm.dpm.dyn_state.cac_tdp_table;
561 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
562 int i, j, k;
563 const u16 *def1;
564 const u16 *def2;
565
566 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
567 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
568
569 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
570 dpm_table->GpuTjMax =
571 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
572 dpm_table->GpuTjHyst = 8;
573
574 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
575
576 if (ppm) {
577 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
578 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
579 } else {
580 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
581 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
582 }
583
584 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
585 def1 = pt_defaults->bapmti_r;
586 def2 = pt_defaults->bapmti_rc;
587
588 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
589 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
590 for (k = 0; k < SMU7_DTE_SINKS; k++) {
591 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
592 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
593 def1++;
594 def2++;
595 }
596 }
597 }
598
599 return 0;
600}
601
602static int ci_populate_pm_base(struct amdgpu_device *adev)
603{
604 struct ci_power_info *pi = ci_get_pi(adev);
605 u32 pm_fuse_table_offset;
606 int ret;
607
608 if (pi->caps_power_containment) {
609 ret = amdgpu_ci_read_smc_sram_dword(adev,
610 SMU7_FIRMWARE_HEADER_LOCATION +
611 offsetof(SMU7_Firmware_Header, PmFuseTable),
612 &pm_fuse_table_offset, pi->sram_end);
613 if (ret)
614 return ret;
615 ret = ci_populate_bapm_vddc_vid_sidd(adev);
616 if (ret)
617 return ret;
618 ret = ci_populate_vddc_vid(adev);
619 if (ret)
620 return ret;
621 ret = ci_populate_svi_load_line(adev);
622 if (ret)
623 return ret;
624 ret = ci_populate_tdc_limit(adev);
625 if (ret)
626 return ret;
627 ret = ci_populate_dw8(adev);
628 if (ret)
629 return ret;
630 ret = ci_populate_fuzzy_fan(adev);
631 if (ret)
632 return ret;
633 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
634 if (ret)
635 return ret;
636 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
637 if (ret)
638 return ret;
639 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
640 (u8 *)&pi->smc_powertune_table,
641 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
642 if (ret)
643 return ret;
644 }
645
646 return 0;
647}
648
649static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
650{
651 struct ci_power_info *pi = ci_get_pi(adev);
652 u32 data;
653
654 if (pi->caps_sq_ramping) {
655 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
656 if (enable)
657 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
658 else
659 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
660 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
661 }
662
663 if (pi->caps_db_ramping) {
664 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
665 if (enable)
666 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
667 else
668 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
669 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
670 }
671
672 if (pi->caps_td_ramping) {
673 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
674 if (enable)
675 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
676 else
677 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
678 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
679 }
680
681 if (pi->caps_tcp_ramping) {
682 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
683 if (enable)
684 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
685 else
686 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
687 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
688 }
689}
690
691static int ci_program_pt_config_registers(struct amdgpu_device *adev,
692 const struct ci_pt_config_reg *cac_config_regs)
693{
694 const struct ci_pt_config_reg *config_regs = cac_config_regs;
695 u32 data;
696 u32 cache = 0;
697
698 if (config_regs == NULL)
699 return -EINVAL;
700
701 while (config_regs->offset != 0xFFFFFFFF) {
702 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
703 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
704 } else {
705 switch (config_regs->type) {
706 case CISLANDS_CONFIGREG_SMC_IND:
707 data = RREG32_SMC(config_regs->offset);
708 break;
709 case CISLANDS_CONFIGREG_DIDT_IND:
710 data = RREG32_DIDT(config_regs->offset);
711 break;
712 default:
713 data = RREG32(config_regs->offset);
714 break;
715 }
716
717 data &= ~config_regs->mask;
718 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
719 data |= cache;
720
721 switch (config_regs->type) {
722 case CISLANDS_CONFIGREG_SMC_IND:
723 WREG32_SMC(config_regs->offset, data);
724 break;
725 case CISLANDS_CONFIGREG_DIDT_IND:
726 WREG32_DIDT(config_regs->offset, data);
727 break;
728 default:
729 WREG32(config_regs->offset, data);
730 break;
731 }
732 cache = 0;
733 }
734 config_regs++;
735 }
736 return 0;
737}
738
739static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
740{
741 struct ci_power_info *pi = ci_get_pi(adev);
742 int ret;
743
744 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
745 pi->caps_td_ramping || pi->caps_tcp_ramping) {
746 amdgpu_gfx_rlc_enter_safe_mode(adev);
747
748 if (enable) {
749 ret = ci_program_pt_config_registers(adev, didt_config_ci);
750 if (ret) {
751 amdgpu_gfx_rlc_exit_safe_mode(adev);
752 return ret;
753 }
754 }
755
756 ci_do_enable_didt(adev, enable);
757
758 amdgpu_gfx_rlc_exit_safe_mode(adev);
759 }
760
761 return 0;
762}
763
764static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
765{
766 struct ci_power_info *pi = ci_get_pi(adev);
767 PPSMC_Result smc_result;
768 int ret = 0;
769
770 if (enable) {
771 pi->power_containment_features = 0;
772 if (pi->caps_power_containment) {
773 if (pi->enable_bapm_feature) {
774 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
775 if (smc_result != PPSMC_Result_OK)
776 ret = -EINVAL;
777 else
778 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
779 }
780
781 if (pi->enable_tdc_limit_feature) {
782 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
783 if (smc_result != PPSMC_Result_OK)
784 ret = -EINVAL;
785 else
786 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
787 }
788
789 if (pi->enable_pkg_pwr_tracking_feature) {
790 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
791 if (smc_result != PPSMC_Result_OK) {
792 ret = -EINVAL;
793 } else {
794 struct amdgpu_cac_tdp_table *cac_tdp_table =
795 adev->pm.dpm.dyn_state.cac_tdp_table;
796 u32 default_pwr_limit =
797 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
798
799 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
800
801 ci_set_power_limit(adev, default_pwr_limit);
802 }
803 }
804 }
805 } else {
806 if (pi->caps_power_containment && pi->power_containment_features) {
807 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
808 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
809
810 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
811 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
812
813 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
814 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
815 pi->power_containment_features = 0;
816 }
817 }
818
819 return ret;
820}
821
822static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
823{
824 struct ci_power_info *pi = ci_get_pi(adev);
825 PPSMC_Result smc_result;
826 int ret = 0;
827
828 if (pi->caps_cac) {
829 if (enable) {
830 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
831 if (smc_result != PPSMC_Result_OK) {
832 ret = -EINVAL;
833 pi->cac_enabled = false;
834 } else {
835 pi->cac_enabled = true;
836 }
837 } else if (pi->cac_enabled) {
838 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
839 pi->cac_enabled = false;
840 }
841 }
842
843 return ret;
844}
845
846static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
847 bool enable)
848{
849 struct ci_power_info *pi = ci_get_pi(adev);
850 PPSMC_Result smc_result = PPSMC_Result_OK;
851
852 if (pi->thermal_sclk_dpm_enabled) {
853 if (enable)
854 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
855 else
856 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
857 }
858
859 if (smc_result == PPSMC_Result_OK)
860 return 0;
861 else
862 return -EINVAL;
863}
864
865static int ci_power_control_set_level(struct amdgpu_device *adev)
866{
867 struct ci_power_info *pi = ci_get_pi(adev);
868 struct amdgpu_cac_tdp_table *cac_tdp_table =
869 adev->pm.dpm.dyn_state.cac_tdp_table;
870 s32 adjust_percent;
871 s32 target_tdp;
872 int ret = 0;
873 bool adjust_polarity = false; /* ??? */
874
875 if (pi->caps_power_containment) {
876 adjust_percent = adjust_polarity ?
877 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
878 target_tdp = ((100 + adjust_percent) *
879 (s32)cac_tdp_table->configurable_tdp) / 100;
880
881 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
882 }
883
884 return ret;
885}
886
887static void ci_dpm_powergate_uvd(void *handle, bool gate)
888{
889 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890 struct ci_power_info *pi = ci_get_pi(adev);
891
892 pi->uvd_power_gated = gate;
893
894 if (gate) {
895 /* stop the UVD block */
896 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
897 AMD_PG_STATE_GATE);
898 ci_update_uvd_dpm(adev, gate);
899 } else {
900 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
901 AMD_PG_STATE_UNGATE);
902 ci_update_uvd_dpm(adev, gate);
903 }
904}
905
906static bool ci_dpm_vblank_too_short(void *handle)
907{
908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
909 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
910 u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
911
912 /* disable mclk switching if the refresh is >120Hz, even if the
913 * blanking period would allow it
914 */
915 if (amdgpu_dpm_get_vrefresh(adev) > 120)
916 return true;
917
918 if (vblank_time < switch_limit)
919 return true;
920 else
921 return false;
922
923}
924
925static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
926 struct amdgpu_ps *rps)
927{
928 struct ci_ps *ps = ci_get_ps(rps);
929 struct ci_power_info *pi = ci_get_pi(adev);
930 struct amdgpu_clock_and_voltage_limits *max_limits;
931 bool disable_mclk_switching;
932 u32 sclk, mclk;
933 int i;
934
935 if (rps->vce_active) {
936 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
937 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
938 } else {
939 rps->evclk = 0;
940 rps->ecclk = 0;
941 }
942
943 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
944 ci_dpm_vblank_too_short(adev))
945 disable_mclk_switching = true;
946 else
947 disable_mclk_switching = false;
948
949 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
950 pi->battery_state = true;
951 else
952 pi->battery_state = false;
953
954 if (adev->pm.ac_power)
955 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
956 else
957 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
958
959 if (adev->pm.ac_power == false) {
960 for (i = 0; i < ps->performance_level_count; i++) {
961 if (ps->performance_levels[i].mclk > max_limits->mclk)
962 ps->performance_levels[i].mclk = max_limits->mclk;
963 if (ps->performance_levels[i].sclk > max_limits->sclk)
964 ps->performance_levels[i].sclk = max_limits->sclk;
965 }
966 }
967
968 /* XXX validate the min clocks required for display */
969
970 if (disable_mclk_switching) {
971 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
972 sclk = ps->performance_levels[0].sclk;
973 } else {
974 mclk = ps->performance_levels[0].mclk;
975 sclk = ps->performance_levels[0].sclk;
976 }
977
978 if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
979 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
980
981 if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
982 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
983
984 if (rps->vce_active) {
985 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
986 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
987 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
988 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
989 }
990
991 ps->performance_levels[0].sclk = sclk;
992 ps->performance_levels[0].mclk = mclk;
993
994 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
995 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
996
997 if (disable_mclk_switching) {
998 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
999 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
1000 } else {
1001 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
1002 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1003 }
1004}
1005
1006static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1007 int min_temp, int max_temp)
1008{
1009 int low_temp = 0 * 1000;
1010 int high_temp = 255 * 1000;
1011 u32 tmp;
1012
1013 if (low_temp < min_temp)
1014 low_temp = min_temp;
1015 if (high_temp > max_temp)
1016 high_temp = max_temp;
1017 if (high_temp < low_temp) {
1018 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1019 return -EINVAL;
1020 }
1021
1022 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1023 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1024 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1025 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1026 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1027
1028#if 0
1029 /* XXX: need to figure out how to handle this properly */
1030 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1031 tmp &= DIG_THERM_DPM_MASK;
1032 tmp |= DIG_THERM_DPM(high_temp / 1000);
1033 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1034#endif
1035
1036 adev->pm.dpm.thermal.min_temp = low_temp;
1037 adev->pm.dpm.thermal.max_temp = high_temp;
1038 return 0;
1039}
1040
1041static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1042 bool enable)
1043{
1044 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1045 PPSMC_Result result;
1046
1047 if (enable) {
1048 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1049 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1050 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1051 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1052 if (result != PPSMC_Result_OK) {
1053 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1054 return -EINVAL;
1055 }
1056 } else {
1057 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1058 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1059 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1060 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1061 if (result != PPSMC_Result_OK) {
1062 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1063 return -EINVAL;
1064 }
1065 }
1066
1067 return 0;
1068}
1069
1070static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1071{
1072 struct ci_power_info *pi = ci_get_pi(adev);
1073 u32 tmp;
1074
1075 if (pi->fan_ctrl_is_in_default_mode) {
1076 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1077 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1078 pi->fan_ctrl_default_mode = tmp;
1079 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1080 >> CG_FDO_CTRL2__TMIN__SHIFT;
1081 pi->t_min = tmp;
1082 pi->fan_ctrl_is_in_default_mode = false;
1083 }
1084
1085 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1086 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1087 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1088
1089 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1090 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1091 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1092}
1093
1094static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1095{
1096 struct ci_power_info *pi = ci_get_pi(adev);
1097 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1098 u32 duty100;
1099 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1100 u16 fdo_min, slope1, slope2;
1101 u32 reference_clock, tmp;
1102 int ret;
1103 u64 tmp64;
1104
1105 if (!pi->fan_table_start) {
1106 adev->pm.dpm.fan.ucode_fan_control = false;
1107 return 0;
1108 }
1109
1110 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1111 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1112
1113 if (duty100 == 0) {
1114 adev->pm.dpm.fan.ucode_fan_control = false;
1115 return 0;
1116 }
1117
1118 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1119 do_div(tmp64, 10000);
1120 fdo_min = (u16)tmp64;
1121
1122 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1123 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1124
1125 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1126 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1127
1128 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1129 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1130
1131 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1132 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1133 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1134
1135 fan_table.Slope1 = cpu_to_be16(slope1);
1136 fan_table.Slope2 = cpu_to_be16(slope2);
1137
1138 fan_table.FdoMin = cpu_to_be16(fdo_min);
1139
1140 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1141
1142 fan_table.HystUp = cpu_to_be16(1);
1143
1144 fan_table.HystSlope = cpu_to_be16(1);
1145
1146 fan_table.TempRespLim = cpu_to_be16(5);
1147
1148 reference_clock = amdgpu_asic_get_xclk(adev);
1149
1150 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1151 reference_clock) / 1600);
1152
1153 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1154
1155 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1156 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1157 fan_table.TempSrc = (uint8_t)tmp;
1158
1159 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1160 pi->fan_table_start,
1161 (u8 *)(&fan_table),
1162 sizeof(fan_table),
1163 pi->sram_end);
1164
1165 if (ret) {
1166 DRM_ERROR("Failed to load fan table to the SMC.");
1167 adev->pm.dpm.fan.ucode_fan_control = false;
1168 }
1169
1170 return 0;
1171}
1172
1173static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1174{
1175 struct ci_power_info *pi = ci_get_pi(adev);
1176 PPSMC_Result ret;
1177
1178 if (pi->caps_od_fuzzy_fan_control_support) {
1179 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1180 PPSMC_StartFanControl,
1181 FAN_CONTROL_FUZZY);
1182 if (ret != PPSMC_Result_OK)
1183 return -EINVAL;
1184 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1185 PPSMC_MSG_SetFanPwmMax,
1186 adev->pm.dpm.fan.default_max_fan_pwm);
1187 if (ret != PPSMC_Result_OK)
1188 return -EINVAL;
1189 } else {
1190 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1191 PPSMC_StartFanControl,
1192 FAN_CONTROL_TABLE);
1193 if (ret != PPSMC_Result_OK)
1194 return -EINVAL;
1195 }
1196
1197 pi->fan_is_controlled_by_smc = true;
1198 return 0;
1199}
1200
1201
1202static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1203{
1204 PPSMC_Result ret;
1205 struct ci_power_info *pi = ci_get_pi(adev);
1206
1207 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1208 if (ret == PPSMC_Result_OK) {
1209 pi->fan_is_controlled_by_smc = false;
1210 return 0;
1211 } else {
1212 return -EINVAL;
1213 }
1214}
1215
1216static int ci_dpm_get_fan_speed_percent(void *handle,
1217 u32 *speed)
1218{
1219 u32 duty, duty100;
1220 u64 tmp64;
1221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222
1223 if (adev->pm.no_fan)
1224 return -ENOENT;
1225
1226 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1227 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1228 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1229 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1230
1231 if (duty100 == 0)
1232 return -EINVAL;
1233
1234 tmp64 = (u64)duty * 100;
1235 do_div(tmp64, duty100);
1236 *speed = (u32)tmp64;
1237
1238 if (*speed > 100)
1239 *speed = 100;
1240
1241 return 0;
1242}
1243
1244static int ci_dpm_set_fan_speed_percent(void *handle,
1245 u32 speed)
1246{
1247 u32 tmp;
1248 u32 duty, duty100;
1249 u64 tmp64;
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251 struct ci_power_info *pi = ci_get_pi(adev);
1252
1253 if (adev->pm.no_fan)
1254 return -ENOENT;
1255
1256 if (pi->fan_is_controlled_by_smc)
1257 return -EINVAL;
1258
1259 if (speed > 100)
1260 return -EINVAL;
1261
1262 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1263 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1264
1265 if (duty100 == 0)
1266 return -EINVAL;
1267
1268 tmp64 = (u64)speed * duty100;
1269 do_div(tmp64, 100);
1270 duty = (u32)tmp64;
1271
1272 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1273 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1274 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1275
1276 return 0;
1277}
1278
1279static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
1280{
1281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1282
1283 switch (mode) {
1284 case AMD_FAN_CTRL_NONE:
1285 if (adev->pm.dpm.fan.ucode_fan_control)
1286 ci_fan_ctrl_stop_smc_fan_control(adev);
1287 ci_dpm_set_fan_speed_percent(adev, 100);
1288 break;
1289 case AMD_FAN_CTRL_MANUAL:
1290 if (adev->pm.dpm.fan.ucode_fan_control)
1291 ci_fan_ctrl_stop_smc_fan_control(adev);
1292 break;
1293 case AMD_FAN_CTRL_AUTO:
1294 if (adev->pm.dpm.fan.ucode_fan_control)
1295 ci_thermal_start_smc_fan_control(adev);
1296 break;
1297 default:
1298 break;
1299 }
1300}
1301
1302static u32 ci_dpm_get_fan_control_mode(void *handle)
1303{
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 struct ci_power_info *pi = ci_get_pi(adev);
1306
1307 if (pi->fan_is_controlled_by_smc)
1308 return AMD_FAN_CTRL_AUTO;
1309 else
1310 return AMD_FAN_CTRL_MANUAL;
1311}
1312
1313#if 0
1314static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1315 u32 *speed)
1316{
1317 u32 tach_period;
1318 u32 xclk = amdgpu_asic_get_xclk(adev);
1319
1320 if (adev->pm.no_fan)
1321 return -ENOENT;
1322
1323 if (adev->pm.fan_pulses_per_revolution == 0)
1324 return -ENOENT;
1325
1326 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1327 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1328 if (tach_period == 0)
1329 return -ENOENT;
1330
1331 *speed = 60 * xclk * 10000 / tach_period;
1332
1333 return 0;
1334}
1335
1336static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1337 u32 speed)
1338{
1339 u32 tach_period, tmp;
1340 u32 xclk = amdgpu_asic_get_xclk(adev);
1341
1342 if (adev->pm.no_fan)
1343 return -ENOENT;
1344
1345 if (adev->pm.fan_pulses_per_revolution == 0)
1346 return -ENOENT;
1347
1348 if ((speed < adev->pm.fan_min_rpm) ||
1349 (speed > adev->pm.fan_max_rpm))
1350 return -EINVAL;
1351
1352 if (adev->pm.dpm.fan.ucode_fan_control)
1353 ci_fan_ctrl_stop_smc_fan_control(adev);
1354
1355 tach_period = 60 * xclk * 10000 / (8 * speed);
1356 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1357 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1358 WREG32_SMC(CG_TACH_CTRL, tmp);
1359
1360 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1361
1362 return 0;
1363}
1364#endif
1365
1366static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1367{
1368 struct ci_power_info *pi = ci_get_pi(adev);
1369 u32 tmp;
1370
1371 if (!pi->fan_ctrl_is_in_default_mode) {
1372 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1373 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1374 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1375
1376 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1377 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1378 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1379 pi->fan_ctrl_is_in_default_mode = true;
1380 }
1381}
1382
1383static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1384{
1385 if (adev->pm.dpm.fan.ucode_fan_control) {
1386 ci_fan_ctrl_start_smc_fan_control(adev);
1387 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1388 }
1389}
1390
1391static void ci_thermal_initialize(struct amdgpu_device *adev)
1392{
1393 u32 tmp;
1394
1395 if (adev->pm.fan_pulses_per_revolution) {
1396 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1397 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1398 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1399 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1400 }
1401
1402 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1403 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1404 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1405}
1406
1407static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1408{
1409 int ret;
1410
1411 ci_thermal_initialize(adev);
1412 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1413 if (ret)
1414 return ret;
1415 ret = ci_thermal_enable_alert(adev, true);
1416 if (ret)
1417 return ret;
1418 if (adev->pm.dpm.fan.ucode_fan_control) {
1419 ret = ci_thermal_setup_fan_table(adev);
1420 if (ret)
1421 return ret;
1422 ci_thermal_start_smc_fan_control(adev);
1423 }
1424
1425 return 0;
1426}
1427
1428static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1429{
1430 if (!adev->pm.no_fan)
1431 ci_fan_ctrl_set_default_mode(adev);
1432}
1433
1434static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1435 u16 reg_offset, u32 *value)
1436{
1437 struct ci_power_info *pi = ci_get_pi(adev);
1438
1439 return amdgpu_ci_read_smc_sram_dword(adev,
1440 pi->soft_regs_start + reg_offset,
1441 value, pi->sram_end);
1442}
1443
1444static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1445 u16 reg_offset, u32 value)
1446{
1447 struct ci_power_info *pi = ci_get_pi(adev);
1448
1449 return amdgpu_ci_write_smc_sram_dword(adev,
1450 pi->soft_regs_start + reg_offset,
1451 value, pi->sram_end);
1452}
1453
1454static void ci_init_fps_limits(struct amdgpu_device *adev)
1455{
1456 struct ci_power_info *pi = ci_get_pi(adev);
1457 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1458
1459 if (pi->caps_fps) {
1460 u16 tmp;
1461
1462 tmp = 45;
1463 table->FpsHighT = cpu_to_be16(tmp);
1464
1465 tmp = 30;
1466 table->FpsLowT = cpu_to_be16(tmp);
1467 }
1468}
1469
1470static int ci_update_sclk_t(struct amdgpu_device *adev)
1471{
1472 struct ci_power_info *pi = ci_get_pi(adev);
1473 int ret = 0;
1474 u32 low_sclk_interrupt_t = 0;
1475
1476 if (pi->caps_sclk_throttle_low_notification) {
1477 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1478
1479 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1480 pi->dpm_table_start +
1481 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1482 (u8 *)&low_sclk_interrupt_t,
1483 sizeof(u32), pi->sram_end);
1484
1485 }
1486
1487 return ret;
1488}
1489
1490static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1491{
1492 struct ci_power_info *pi = ci_get_pi(adev);
1493 u16 leakage_id, virtual_voltage_id;
1494 u16 vddc, vddci;
1495 int i;
1496
1497 pi->vddc_leakage.count = 0;
1498 pi->vddci_leakage.count = 0;
1499
1500 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1501 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1502 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1503 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1504 continue;
1505 if (vddc != 0 && vddc != virtual_voltage_id) {
1506 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1507 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1508 pi->vddc_leakage.count++;
1509 }
1510 }
1511 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1512 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1513 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1514 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1515 virtual_voltage_id,
1516 leakage_id) == 0) {
1517 if (vddc != 0 && vddc != virtual_voltage_id) {
1518 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1519 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1520 pi->vddc_leakage.count++;
1521 }
1522 if (vddci != 0 && vddci != virtual_voltage_id) {
1523 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1524 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1525 pi->vddci_leakage.count++;
1526 }
1527 }
1528 }
1529 }
1530}
1531
1532static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1533{
1534 struct ci_power_info *pi = ci_get_pi(adev);
1535 bool want_thermal_protection;
1536 enum amdgpu_dpm_event_src dpm_event_src;
1537 u32 tmp;
1538
1539 switch (sources) {
1540 case 0:
1541 default:
1542 want_thermal_protection = false;
1543 break;
1544 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1545 want_thermal_protection = true;
1546 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1547 break;
1548 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1549 want_thermal_protection = true;
1550 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1551 break;
1552 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1553 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1554 want_thermal_protection = true;
1555 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1556 break;
1557 }
1558
1559 if (want_thermal_protection) {
1560#if 0
1561 /* XXX: need to figure out how to handle this properly */
1562 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1563 tmp &= DPM_EVENT_SRC_MASK;
1564 tmp |= DPM_EVENT_SRC(dpm_event_src);
1565 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1566#endif
1567
1568 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1569 if (pi->thermal_protection)
1570 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1571 else
1572 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1573 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1574 } else {
1575 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1576 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1577 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1578 }
1579}
1580
1581static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1582 enum amdgpu_dpm_auto_throttle_src source,
1583 bool enable)
1584{
1585 struct ci_power_info *pi = ci_get_pi(adev);
1586
1587 if (enable) {
1588 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1589 pi->active_auto_throttle_sources |= 1 << source;
1590 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1591 }
1592 } else {
1593 if (pi->active_auto_throttle_sources & (1 << source)) {
1594 pi->active_auto_throttle_sources &= ~(1 << source);
1595 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1596 }
1597 }
1598}
1599
1600static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1601{
1602 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1603 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1604}
1605
1606static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1607{
1608 struct ci_power_info *pi = ci_get_pi(adev);
1609 PPSMC_Result smc_result;
1610
1611 if (!pi->need_update_smu7_dpm_table)
1612 return 0;
1613
1614 if ((!pi->sclk_dpm_key_disabled) &&
1615 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1616 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1617 if (smc_result != PPSMC_Result_OK)
1618 return -EINVAL;
1619 }
1620
1621 if ((!pi->mclk_dpm_key_disabled) &&
1622 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1623 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1624 if (smc_result != PPSMC_Result_OK)
1625 return -EINVAL;
1626 }
1627
1628 pi->need_update_smu7_dpm_table = 0;
1629 return 0;
1630}
1631
1632static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1633{
1634 struct ci_power_info *pi = ci_get_pi(adev);
1635 PPSMC_Result smc_result;
1636
1637 if (enable) {
1638 if (!pi->sclk_dpm_key_disabled) {
1639 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1640 if (smc_result != PPSMC_Result_OK)
1641 return -EINVAL;
1642 }
1643
1644 if (!pi->mclk_dpm_key_disabled) {
1645 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1646 if (smc_result != PPSMC_Result_OK)
1647 return -EINVAL;
1648
1649 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1650 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1651
1652 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1653 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1654 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1655
1656 udelay(10);
1657
1658 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1659 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1660 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1661 }
1662 } else {
1663 if (!pi->sclk_dpm_key_disabled) {
1664 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1665 if (smc_result != PPSMC_Result_OK)
1666 return -EINVAL;
1667 }
1668
1669 if (!pi->mclk_dpm_key_disabled) {
1670 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1671 if (smc_result != PPSMC_Result_OK)
1672 return -EINVAL;
1673 }
1674 }
1675
1676 return 0;
1677}
1678
1679static int ci_start_dpm(struct amdgpu_device *adev)
1680{
1681 struct ci_power_info *pi = ci_get_pi(adev);
1682 PPSMC_Result smc_result;
1683 int ret;
1684 u32 tmp;
1685
1686 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1687 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1688 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1689
1690 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1691 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1692 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1693
1694 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1695
1696 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1697
1698 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1699 if (smc_result != PPSMC_Result_OK)
1700 return -EINVAL;
1701
1702 ret = ci_enable_sclk_mclk_dpm(adev, true);
1703 if (ret)
1704 return ret;
1705
1706 if (!pi->pcie_dpm_key_disabled) {
1707 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1708 if (smc_result != PPSMC_Result_OK)
1709 return -EINVAL;
1710 }
1711
1712 return 0;
1713}
1714
1715static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1716{
1717 struct ci_power_info *pi = ci_get_pi(adev);
1718 PPSMC_Result smc_result;
1719
1720 if (!pi->need_update_smu7_dpm_table)
1721 return 0;
1722
1723 if ((!pi->sclk_dpm_key_disabled) &&
1724 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1725 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1726 if (smc_result != PPSMC_Result_OK)
1727 return -EINVAL;
1728 }
1729
1730 if ((!pi->mclk_dpm_key_disabled) &&
1731 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1732 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1733 if (smc_result != PPSMC_Result_OK)
1734 return -EINVAL;
1735 }
1736
1737 return 0;
1738}
1739
1740static int ci_stop_dpm(struct amdgpu_device *adev)
1741{
1742 struct ci_power_info *pi = ci_get_pi(adev);
1743 PPSMC_Result smc_result;
1744 int ret;
1745 u32 tmp;
1746
1747 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1748 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1749 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1750
1751 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1752 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1753 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1754
1755 if (!pi->pcie_dpm_key_disabled) {
1756 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1757 if (smc_result != PPSMC_Result_OK)
1758 return -EINVAL;
1759 }
1760
1761 ret = ci_enable_sclk_mclk_dpm(adev, false);
1762 if (ret)
1763 return ret;
1764
1765 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1766 if (smc_result != PPSMC_Result_OK)
1767 return -EINVAL;
1768
1769 return 0;
1770}
1771
1772static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1773{
1774 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1775
1776 if (enable)
1777 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1778 else
1779 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1780 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1781}
1782
1783#if 0
1784static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1785 bool ac_power)
1786{
1787 struct ci_power_info *pi = ci_get_pi(adev);
1788 struct amdgpu_cac_tdp_table *cac_tdp_table =
1789 adev->pm.dpm.dyn_state.cac_tdp_table;
1790 u32 power_limit;
1791
1792 if (ac_power)
1793 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1794 else
1795 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1796
1797 ci_set_power_limit(adev, power_limit);
1798
1799 if (pi->caps_automatic_dc_transition) {
1800 if (ac_power)
1801 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1802 else
1803 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1804 }
1805
1806 return 0;
1807}
1808#endif
1809
1810static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1811 PPSMC_Msg msg, u32 parameter)
1812{
1813 WREG32(mmSMC_MSG_ARG_0, parameter);
1814 return amdgpu_ci_send_msg_to_smc(adev, msg);
1815}
1816
1817static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1818 PPSMC_Msg msg, u32 *parameter)
1819{
1820 PPSMC_Result smc_result;
1821
1822 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1823
1824 if ((smc_result == PPSMC_Result_OK) && parameter)
1825 *parameter = RREG32(mmSMC_MSG_ARG_0);
1826
1827 return smc_result;
1828}
1829
1830static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1831{
1832 struct ci_power_info *pi = ci_get_pi(adev);
1833
1834 if (!pi->sclk_dpm_key_disabled) {
1835 PPSMC_Result smc_result =
1836 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1837 if (smc_result != PPSMC_Result_OK)
1838 return -EINVAL;
1839 }
1840
1841 return 0;
1842}
1843
1844static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1845{
1846 struct ci_power_info *pi = ci_get_pi(adev);
1847
1848 if (!pi->mclk_dpm_key_disabled) {
1849 PPSMC_Result smc_result =
1850 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1851 if (smc_result != PPSMC_Result_OK)
1852 return -EINVAL;
1853 }
1854
1855 return 0;
1856}
1857
1858static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1859{
1860 struct ci_power_info *pi = ci_get_pi(adev);
1861
1862 if (!pi->pcie_dpm_key_disabled) {
1863 PPSMC_Result smc_result =
1864 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1865 if (smc_result != PPSMC_Result_OK)
1866 return -EINVAL;
1867 }
1868
1869 return 0;
1870}
1871
1872static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1873{
1874 struct ci_power_info *pi = ci_get_pi(adev);
1875
1876 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1877 PPSMC_Result smc_result =
1878 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1879 if (smc_result != PPSMC_Result_OK)
1880 return -EINVAL;
1881 }
1882
1883 return 0;
1884}
1885
1886static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1887 u32 target_tdp)
1888{
1889 PPSMC_Result smc_result =
1890 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1891 if (smc_result != PPSMC_Result_OK)
1892 return -EINVAL;
1893 return 0;
1894}
1895
1896#if 0
1897static int ci_set_boot_state(struct amdgpu_device *adev)
1898{
1899 return ci_enable_sclk_mclk_dpm(adev, false);
1900}
1901#endif
1902
1903static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1904{
1905 u32 sclk_freq;
1906 PPSMC_Result smc_result =
1907 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1908 PPSMC_MSG_API_GetSclkFrequency,
1909 &sclk_freq);
1910 if (smc_result != PPSMC_Result_OK)
1911 sclk_freq = 0;
1912
1913 return sclk_freq;
1914}
1915
1916static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1917{
1918 u32 mclk_freq;
1919 PPSMC_Result smc_result =
1920 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1921 PPSMC_MSG_API_GetMclkFrequency,
1922 &mclk_freq);
1923 if (smc_result != PPSMC_Result_OK)
1924 mclk_freq = 0;
1925
1926 return mclk_freq;
1927}
1928
1929static void ci_dpm_start_smc(struct amdgpu_device *adev)
1930{
1931 int i;
1932
1933 amdgpu_ci_program_jump_on_start(adev);
1934 amdgpu_ci_start_smc_clock(adev);
1935 amdgpu_ci_start_smc(adev);
1936 for (i = 0; i < adev->usec_timeout; i++) {
1937 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1938 break;
1939 }
1940}
1941
1942static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1943{
1944 amdgpu_ci_reset_smc(adev);
1945 amdgpu_ci_stop_smc_clock(adev);
1946}
1947
1948static int ci_process_firmware_header(struct amdgpu_device *adev)
1949{
1950 struct ci_power_info *pi = ci_get_pi(adev);
1951 u32 tmp;
1952 int ret;
1953
1954 ret = amdgpu_ci_read_smc_sram_dword(adev,
1955 SMU7_FIRMWARE_HEADER_LOCATION +
1956 offsetof(SMU7_Firmware_Header, DpmTable),
1957 &tmp, pi->sram_end);
1958 if (ret)
1959 return ret;
1960
1961 pi->dpm_table_start = tmp;
1962
1963 ret = amdgpu_ci_read_smc_sram_dword(adev,
1964 SMU7_FIRMWARE_HEADER_LOCATION +
1965 offsetof(SMU7_Firmware_Header, SoftRegisters),
1966 &tmp, pi->sram_end);
1967 if (ret)
1968 return ret;
1969
1970 pi->soft_regs_start = tmp;
1971
1972 ret = amdgpu_ci_read_smc_sram_dword(adev,
1973 SMU7_FIRMWARE_HEADER_LOCATION +
1974 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1975 &tmp, pi->sram_end);
1976 if (ret)
1977 return ret;
1978
1979 pi->mc_reg_table_start = tmp;
1980
1981 ret = amdgpu_ci_read_smc_sram_dword(adev,
1982 SMU7_FIRMWARE_HEADER_LOCATION +
1983 offsetof(SMU7_Firmware_Header, FanTable),
1984 &tmp, pi->sram_end);
1985 if (ret)
1986 return ret;
1987
1988 pi->fan_table_start = tmp;
1989
1990 ret = amdgpu_ci_read_smc_sram_dword(adev,
1991 SMU7_FIRMWARE_HEADER_LOCATION +
1992 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1993 &tmp, pi->sram_end);
1994 if (ret)
1995 return ret;
1996
1997 pi->arb_table_start = tmp;
1998
1999 return 0;
2000}
2001
2002static void ci_read_clock_registers(struct amdgpu_device *adev)
2003{
2004 struct ci_power_info *pi = ci_get_pi(adev);
2005
2006 pi->clock_registers.cg_spll_func_cntl =
2007 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2008 pi->clock_registers.cg_spll_func_cntl_2 =
2009 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2010 pi->clock_registers.cg_spll_func_cntl_3 =
2011 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2012 pi->clock_registers.cg_spll_func_cntl_4 =
2013 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2014 pi->clock_registers.cg_spll_spread_spectrum =
2015 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2016 pi->clock_registers.cg_spll_spread_spectrum_2 =
2017 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2018 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2019 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2020 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2021 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2022 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2023 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2024 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2025 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2026 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2027}
2028
2029static void ci_init_sclk_t(struct amdgpu_device *adev)
2030{
2031 struct ci_power_info *pi = ci_get_pi(adev);
2032
2033 pi->low_sclk_interrupt_t = 0;
2034}
2035
2036static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2037 bool enable)
2038{
2039 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2040
2041 if (enable)
2042 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2043 else
2044 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2045 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2046}
2047
2048static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2049{
2050 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2051
2052 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2053
2054 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2055}
2056
2057#if 0
2058static int ci_enter_ulp_state(struct amdgpu_device *adev)
2059{
2060
2061 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2062
2063 udelay(25000);
2064
2065 return 0;
2066}
2067
2068static int ci_exit_ulp_state(struct amdgpu_device *adev)
2069{
2070 int i;
2071
2072 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2073
2074 udelay(7000);
2075
2076 for (i = 0; i < adev->usec_timeout; i++) {
2077 if (RREG32(mmSMC_RESP_0) == 1)
2078 break;
2079 udelay(1000);
2080 }
2081
2082 return 0;
2083}
2084#endif
2085
2086static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2087 bool has_display)
2088{
2089 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2090
2091 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2092}
2093
2094static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2095 bool enable)
2096{
2097 struct ci_power_info *pi = ci_get_pi(adev);
2098
2099 if (enable) {
2100 if (pi->caps_sclk_ds) {
2101 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2102 return -EINVAL;
2103 } else {
2104 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2105 return -EINVAL;
2106 }
2107 } else {
2108 if (pi->caps_sclk_ds) {
2109 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2110 return -EINVAL;
2111 }
2112 }
2113
2114 return 0;
2115}
2116
2117static void ci_program_display_gap(struct amdgpu_device *adev)
2118{
2119 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2120 u32 pre_vbi_time_in_us;
2121 u32 frame_time_in_us;
2122 u32 ref_clock = adev->clock.spll.reference_freq;
2123 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2124 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2125
2126 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2127 if (adev->pm.dpm.new_active_crtc_count > 0)
2128 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2129 else
2130 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2131 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2132
2133 if (refresh_rate == 0)
2134 refresh_rate = 60;
2135 if (vblank_time == 0xffffffff)
2136 vblank_time = 500;
2137 frame_time_in_us = 1000000 / refresh_rate;
2138 pre_vbi_time_in_us =
2139 frame_time_in_us - 200 - vblank_time;
2140 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2141
2142 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2143 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2144 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2145
2146
2147 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2148
2149}
2150
2151static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2152{
2153 struct ci_power_info *pi = ci_get_pi(adev);
2154 u32 tmp;
2155
2156 if (enable) {
2157 if (pi->caps_sclk_ss_support) {
2158 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2159 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2160 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2161 }
2162 } else {
2163 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2164 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2165 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2166
2167 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2168 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2169 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2170 }
2171}
2172
2173static void ci_program_sstp(struct amdgpu_device *adev)
2174{
2175 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2176 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2177 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2178}
2179
2180static void ci_enable_display_gap(struct amdgpu_device *adev)
2181{
2182 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2183
2184 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2185 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2186 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2187 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2188
2189 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2190}
2191
2192static void ci_program_vc(struct amdgpu_device *adev)
2193{
2194 u32 tmp;
2195
2196 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2197 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2198 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2199
2200 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2201 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2202 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2203 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2204 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2205 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2206 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2207 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2208}
2209
2210static void ci_clear_vc(struct amdgpu_device *adev)
2211{
2212 u32 tmp;
2213
2214 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2215 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2216 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2217
2218 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2219 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2220 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2221 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2222 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2223 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2224 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2225 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2226}
2227
2228static int ci_upload_firmware(struct amdgpu_device *adev)
2229{
2230 int i, ret;
2231
2232 if (amdgpu_ci_is_smc_running(adev)) {
2233 DRM_INFO("smc is running, no need to load smc firmware\n");
2234 return 0;
2235 }
2236
2237 for (i = 0; i < adev->usec_timeout; i++) {
2238 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2239 break;
2240 }
2241 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2242
2243 amdgpu_ci_stop_smc_clock(adev);
2244 amdgpu_ci_reset_smc(adev);
2245
2246 ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2247
2248 return ret;
2249
2250}
2251
2252static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2253 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2254 struct atom_voltage_table *voltage_table)
2255{
2256 u32 i;
2257
2258 if (voltage_dependency_table == NULL)
2259 return -EINVAL;
2260
2261 voltage_table->mask_low = 0;
2262 voltage_table->phase_delay = 0;
2263
2264 voltage_table->count = voltage_dependency_table->count;
2265 for (i = 0; i < voltage_table->count; i++) {
2266 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2267 voltage_table->entries[i].smio_low = 0;
2268 }
2269
2270 return 0;
2271}
2272
2273static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2274{
2275 struct ci_power_info *pi = ci_get_pi(adev);
2276 int ret;
2277
2278 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2279 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2280 VOLTAGE_OBJ_GPIO_LUT,
2281 &pi->vddc_voltage_table);
2282 if (ret)
2283 return ret;
2284 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2285 ret = ci_get_svi2_voltage_table(adev,
2286 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2287 &pi->vddc_voltage_table);
2288 if (ret)
2289 return ret;
2290 }
2291
2292 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2293 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2294 &pi->vddc_voltage_table);
2295
2296 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2297 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2298 VOLTAGE_OBJ_GPIO_LUT,
2299 &pi->vddci_voltage_table);
2300 if (ret)
2301 return ret;
2302 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2303 ret = ci_get_svi2_voltage_table(adev,
2304 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2305 &pi->vddci_voltage_table);
2306 if (ret)
2307 return ret;
2308 }
2309
2310 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2311 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2312 &pi->vddci_voltage_table);
2313
2314 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2315 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2316 VOLTAGE_OBJ_GPIO_LUT,
2317 &pi->mvdd_voltage_table);
2318 if (ret)
2319 return ret;
2320 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2321 ret = ci_get_svi2_voltage_table(adev,
2322 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2323 &pi->mvdd_voltage_table);
2324 if (ret)
2325 return ret;
2326 }
2327
2328 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2329 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2330 &pi->mvdd_voltage_table);
2331
2332 return 0;
2333}
2334
2335static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2336 struct atom_voltage_table_entry *voltage_table,
2337 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2338{
2339 int ret;
2340
2341 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2342 &smc_voltage_table->StdVoltageHiSidd,
2343 &smc_voltage_table->StdVoltageLoSidd);
2344
2345 if (ret) {
2346 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2347 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2348 }
2349
2350 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2351 smc_voltage_table->StdVoltageHiSidd =
2352 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2353 smc_voltage_table->StdVoltageLoSidd =
2354 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2355}
2356
2357static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2358 SMU7_Discrete_DpmTable *table)
2359{
2360 struct ci_power_info *pi = ci_get_pi(adev);
2361 unsigned int count;
2362
2363 table->VddcLevelCount = pi->vddc_voltage_table.count;
2364 for (count = 0; count < table->VddcLevelCount; count++) {
2365 ci_populate_smc_voltage_table(adev,
2366 &pi->vddc_voltage_table.entries[count],
2367 &table->VddcLevel[count]);
2368
2369 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2370 table->VddcLevel[count].Smio |=
2371 pi->vddc_voltage_table.entries[count].smio_low;
2372 else
2373 table->VddcLevel[count].Smio = 0;
2374 }
2375 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2376
2377 return 0;
2378}
2379
2380static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2381 SMU7_Discrete_DpmTable *table)
2382{
2383 unsigned int count;
2384 struct ci_power_info *pi = ci_get_pi(adev);
2385
2386 table->VddciLevelCount = pi->vddci_voltage_table.count;
2387 for (count = 0; count < table->VddciLevelCount; count++) {
2388 ci_populate_smc_voltage_table(adev,
2389 &pi->vddci_voltage_table.entries[count],
2390 &table->VddciLevel[count]);
2391
2392 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2393 table->VddciLevel[count].Smio |=
2394 pi->vddci_voltage_table.entries[count].smio_low;
2395 else
2396 table->VddciLevel[count].Smio = 0;
2397 }
2398 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2399
2400 return 0;
2401}
2402
2403static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2404 SMU7_Discrete_DpmTable *table)
2405{
2406 struct ci_power_info *pi = ci_get_pi(adev);
2407 unsigned int count;
2408
2409 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2410 for (count = 0; count < table->MvddLevelCount; count++) {
2411 ci_populate_smc_voltage_table(adev,
2412 &pi->mvdd_voltage_table.entries[count],
2413 &table->MvddLevel[count]);
2414
2415 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2416 table->MvddLevel[count].Smio |=
2417 pi->mvdd_voltage_table.entries[count].smio_low;
2418 else
2419 table->MvddLevel[count].Smio = 0;
2420 }
2421 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2422
2423 return 0;
2424}
2425
2426static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2427 SMU7_Discrete_DpmTable *table)
2428{
2429 int ret;
2430
2431 ret = ci_populate_smc_vddc_table(adev, table);
2432 if (ret)
2433 return ret;
2434
2435 ret = ci_populate_smc_vddci_table(adev, table);
2436 if (ret)
2437 return ret;
2438
2439 ret = ci_populate_smc_mvdd_table(adev, table);
2440 if (ret)
2441 return ret;
2442
2443 return 0;
2444}
2445
2446static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2447 SMU7_Discrete_VoltageLevel *voltage)
2448{
2449 struct ci_power_info *pi = ci_get_pi(adev);
2450 u32 i = 0;
2451
2452 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2453 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2454 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2455 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2456 break;
2457 }
2458 }
2459
2460 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2461 return -EINVAL;
2462 }
2463
2464 return -EINVAL;
2465}
2466
2467static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2468 struct atom_voltage_table_entry *voltage_table,
2469 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2470{
2471 u16 v_index, idx;
2472 bool voltage_found = false;
2473 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2474 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2475
2476 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2477 return -EINVAL;
2478
2479 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2480 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2481 if (voltage_table->value ==
2482 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2483 voltage_found = true;
2484 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2485 idx = v_index;
2486 else
2487 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2488 *std_voltage_lo_sidd =
2489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2490 *std_voltage_hi_sidd =
2491 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2492 break;
2493 }
2494 }
2495
2496 if (!voltage_found) {
2497 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2498 if (voltage_table->value <=
2499 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2500 voltage_found = true;
2501 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2502 idx = v_index;
2503 else
2504 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2505 *std_voltage_lo_sidd =
2506 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2507 *std_voltage_hi_sidd =
2508 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2509 break;
2510 }
2511 }
2512 }
2513 }
2514
2515 return 0;
2516}
2517
2518static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2519 const struct amdgpu_phase_shedding_limits_table *limits,
2520 u32 sclk,
2521 u32 *phase_shedding)
2522{
2523 unsigned int i;
2524
2525 *phase_shedding = 1;
2526
2527 for (i = 0; i < limits->count; i++) {
2528 if (sclk < limits->entries[i].sclk) {
2529 *phase_shedding = i;
2530 break;
2531 }
2532 }
2533}
2534
2535static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2536 const struct amdgpu_phase_shedding_limits_table *limits,
2537 u32 mclk,
2538 u32 *phase_shedding)
2539{
2540 unsigned int i;
2541
2542 *phase_shedding = 1;
2543
2544 for (i = 0; i < limits->count; i++) {
2545 if (mclk < limits->entries[i].mclk) {
2546 *phase_shedding = i;
2547 break;
2548 }
2549 }
2550}
2551
2552static int ci_init_arb_table_index(struct amdgpu_device *adev)
2553{
2554 struct ci_power_info *pi = ci_get_pi(adev);
2555 u32 tmp;
2556 int ret;
2557
2558 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2559 &tmp, pi->sram_end);
2560 if (ret)
2561 return ret;
2562
2563 tmp &= 0x00FFFFFF;
2564 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2565
2566 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2567 tmp, pi->sram_end);
2568}
2569
2570static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2571 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2572 u32 clock, u32 *voltage)
2573{
2574 u32 i = 0;
2575
2576 if (allowed_clock_voltage_table->count == 0)
2577 return -EINVAL;
2578
2579 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2580 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2581 *voltage = allowed_clock_voltage_table->entries[i].v;
2582 return 0;
2583 }
2584 }
2585
2586 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2587
2588 return 0;
2589}
2590
2591static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2592{
2593 u32 i;
2594 u32 tmp;
2595 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2596
2597 if (sclk < min)
2598 return 0;
2599
2600 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2601 tmp = sclk >> i;
2602 if (tmp >= min || i == 0)
2603 break;
2604 }
2605
2606 return (u8)i;
2607}
2608
2609static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2610{
2611 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2612}
2613
2614static int ci_reset_to_default(struct amdgpu_device *adev)
2615{
2616 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2617 0 : -EINVAL;
2618}
2619
2620static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2621{
2622 u32 tmp;
2623
2624 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2625
2626 if (tmp == MC_CG_ARB_FREQ_F0)
2627 return 0;
2628
2629 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2630}
2631
2632static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2633 const u32 engine_clock,
2634 const u32 memory_clock,
2635 u32 *dram_timimg2)
2636{
2637 bool patch;
2638 u32 tmp, tmp2;
2639
2640 tmp = RREG32(mmMC_SEQ_MISC0);
2641 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2642
2643 if (patch &&
2644 ((adev->pdev->device == 0x67B0) ||
2645 (adev->pdev->device == 0x67B1))) {
2646 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2647 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2648 *dram_timimg2 &= ~0x00ff0000;
2649 *dram_timimg2 |= tmp2 << 16;
2650 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2651 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2652 *dram_timimg2 &= ~0x00ff0000;
2653 *dram_timimg2 |= tmp2 << 16;
2654 }
2655 }
2656}
2657
2658static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2659 u32 sclk,
2660 u32 mclk,
2661 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2662{
2663 u32 dram_timing;
2664 u32 dram_timing2;
2665 u32 burst_time;
2666
2667 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2668
2669 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2670 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2671 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2672
2673 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2674
2675 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2676 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2677 arb_regs->McArbBurstTime = (u8)burst_time;
2678
2679 return 0;
2680}
2681
2682static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2683{
2684 struct ci_power_info *pi = ci_get_pi(adev);
2685 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2686 u32 i, j;
2687 int ret = 0;
2688
2689 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2690
2691 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2692 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2693 ret = ci_populate_memory_timing_parameters(adev,
2694 pi->dpm_table.sclk_table.dpm_levels[i].value,
2695 pi->dpm_table.mclk_table.dpm_levels[j].value,
2696 &arb_regs.entries[i][j]);
2697 if (ret)
2698 break;
2699 }
2700 }
2701
2702 if (ret == 0)
2703 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2704 pi->arb_table_start,
2705 (u8 *)&arb_regs,
2706 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2707 pi->sram_end);
2708
2709 return ret;
2710}
2711
2712static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2713{
2714 struct ci_power_info *pi = ci_get_pi(adev);
2715
2716 if (pi->need_update_smu7_dpm_table == 0)
2717 return 0;
2718
2719 return ci_do_program_memory_timing_parameters(adev);
2720}
2721
2722static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2723 struct amdgpu_ps *amdgpu_boot_state)
2724{
2725 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2726 struct ci_power_info *pi = ci_get_pi(adev);
2727 u32 level = 0;
2728
2729 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2730 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2731 boot_state->performance_levels[0].sclk) {
2732 pi->smc_state_table.GraphicsBootLevel = level;
2733 break;
2734 }
2735 }
2736
2737 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2738 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2739 boot_state->performance_levels[0].mclk) {
2740 pi->smc_state_table.MemoryBootLevel = level;
2741 break;
2742 }
2743 }
2744}
2745
2746static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2747{
2748 u32 i;
2749 u32 mask_value = 0;
2750
2751 for (i = dpm_table->count; i > 0; i--) {
2752 mask_value = mask_value << 1;
2753 if (dpm_table->dpm_levels[i-1].enabled)
2754 mask_value |= 0x1;
2755 else
2756 mask_value &= 0xFFFFFFFE;
2757 }
2758
2759 return mask_value;
2760}
2761
2762static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2763 SMU7_Discrete_DpmTable *table)
2764{
2765 struct ci_power_info *pi = ci_get_pi(adev);
2766 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2767 u32 i;
2768
2769 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2770 table->LinkLevel[i].PcieGenSpeed =
2771 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2772 table->LinkLevel[i].PcieLaneCount =
2773 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2774 table->LinkLevel[i].EnabledForActivity = 1;
2775 table->LinkLevel[i].DownT = cpu_to_be32(5);
2776 table->LinkLevel[i].UpT = cpu_to_be32(30);
2777 }
2778
2779 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2780 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2781 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2782}
2783
2784static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2785 SMU7_Discrete_DpmTable *table)
2786{
2787 u32 count;
2788 struct atom_clock_dividers dividers;
2789 int ret = -EINVAL;
2790
2791 table->UvdLevelCount =
2792 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2793
2794 for (count = 0; count < table->UvdLevelCount; count++) {
2795 table->UvdLevel[count].VclkFrequency =
2796 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2797 table->UvdLevel[count].DclkFrequency =
2798 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2799 table->UvdLevel[count].MinVddc =
2800 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2801 table->UvdLevel[count].MinVddcPhases = 1;
2802
2803 ret = amdgpu_atombios_get_clock_dividers(adev,
2804 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2805 table->UvdLevel[count].VclkFrequency, false, &dividers);
2806 if (ret)
2807 return ret;
2808
2809 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2810
2811 ret = amdgpu_atombios_get_clock_dividers(adev,
2812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2813 table->UvdLevel[count].DclkFrequency, false, &dividers);
2814 if (ret)
2815 return ret;
2816
2817 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2818
2819 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2820 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2821 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2822 }
2823
2824 return ret;
2825}
2826
2827static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2828 SMU7_Discrete_DpmTable *table)
2829{
2830 u32 count;
2831 struct atom_clock_dividers dividers;
2832 int ret = -EINVAL;
2833
2834 table->VceLevelCount =
2835 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2836
2837 for (count = 0; count < table->VceLevelCount; count++) {
2838 table->VceLevel[count].Frequency =
2839 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2840 table->VceLevel[count].MinVoltage =
2841 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2842 table->VceLevel[count].MinPhases = 1;
2843
2844 ret = amdgpu_atombios_get_clock_dividers(adev,
2845 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2846 table->VceLevel[count].Frequency, false, &dividers);
2847 if (ret)
2848 return ret;
2849
2850 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2851
2852 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2853 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2854 }
2855
2856 return ret;
2857
2858}
2859
2860static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2861 SMU7_Discrete_DpmTable *table)
2862{
2863 u32 count;
2864 struct atom_clock_dividers dividers;
2865 int ret = -EINVAL;
2866
2867 table->AcpLevelCount = (u8)
2868 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2869
2870 for (count = 0; count < table->AcpLevelCount; count++) {
2871 table->AcpLevel[count].Frequency =
2872 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2873 table->AcpLevel[count].MinVoltage =
2874 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2875 table->AcpLevel[count].MinPhases = 1;
2876
2877 ret = amdgpu_atombios_get_clock_dividers(adev,
2878 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2879 table->AcpLevel[count].Frequency, false, &dividers);
2880 if (ret)
2881 return ret;
2882
2883 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2884
2885 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2886 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2887 }
2888
2889 return ret;
2890}
2891
2892static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2893 SMU7_Discrete_DpmTable *table)
2894{
2895 u32 count;
2896 struct atom_clock_dividers dividers;
2897 int ret = -EINVAL;
2898
2899 table->SamuLevelCount =
2900 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2901
2902 for (count = 0; count < table->SamuLevelCount; count++) {
2903 table->SamuLevel[count].Frequency =
2904 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2905 table->SamuLevel[count].MinVoltage =
2906 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2907 table->SamuLevel[count].MinPhases = 1;
2908
2909 ret = amdgpu_atombios_get_clock_dividers(adev,
2910 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2911 table->SamuLevel[count].Frequency, false, &dividers);
2912 if (ret)
2913 return ret;
2914
2915 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2916
2917 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2918 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2919 }
2920
2921 return ret;
2922}
2923
2924static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2925 u32 memory_clock,
2926 SMU7_Discrete_MemoryLevel *mclk,
2927 bool strobe_mode,
2928 bool dll_state_on)
2929{
2930 struct ci_power_info *pi = ci_get_pi(adev);
2931 u32 dll_cntl = pi->clock_registers.dll_cntl;
2932 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2933 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2934 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2935 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2936 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2937 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2938 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2939 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2940 struct atom_mpll_param mpll_param;
2941 int ret;
2942
2943 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2944 if (ret)
2945 return ret;
2946
2947 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2948 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2949
2950 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2951 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2952 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2953 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2954 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2955
2956 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2957 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2958
2959 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2960 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2961 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2962 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2963 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2964 }
2965
2966 if (pi->caps_mclk_ss_support) {
2967 struct amdgpu_atom_ss ss;
2968 u32 freq_nom;
2969 u32 tmp;
2970 u32 reference_clock = adev->clock.mpll.reference_freq;
2971
2972 if (mpll_param.qdr == 1)
2973 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2974 else
2975 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2976
2977 tmp = (freq_nom / reference_clock);
2978 tmp = tmp * tmp;
2979 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2980 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2981 u32 clks = reference_clock * 5 / ss.rate;
2982 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2983
2984 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2985 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2986
2987 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2988 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2989 }
2990 }
2991
2992 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2993 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2994
2995 if (dll_state_on)
2996 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2997 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2998 else
2999 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3000 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3001
3002 mclk->MclkFrequency = memory_clock;
3003 mclk->MpllFuncCntl = mpll_func_cntl;
3004 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
3005 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
3006 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
3007 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3008 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3009 mclk->DllCntl = dll_cntl;
3010 mclk->MpllSs1 = mpll_ss1;
3011 mclk->MpllSs2 = mpll_ss2;
3012
3013 return 0;
3014}
3015
3016static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3017 u32 memory_clock,
3018 SMU7_Discrete_MemoryLevel *memory_level)
3019{
3020 struct ci_power_info *pi = ci_get_pi(adev);
3021 int ret;
3022 bool dll_state_on;
3023
3024 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3025 ret = ci_get_dependency_volt_by_clk(adev,
3026 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3027 memory_clock, &memory_level->MinVddc);
3028 if (ret)
3029 return ret;
3030 }
3031
3032 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3033 ret = ci_get_dependency_volt_by_clk(adev,
3034 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3035 memory_clock, &memory_level->MinVddci);
3036 if (ret)
3037 return ret;
3038 }
3039
3040 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3041 ret = ci_get_dependency_volt_by_clk(adev,
3042 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3043 memory_clock, &memory_level->MinMvdd);
3044 if (ret)
3045 return ret;
3046 }
3047
3048 memory_level->MinVddcPhases = 1;
3049
3050 if (pi->vddc_phase_shed_control)
3051 ci_populate_phase_value_based_on_mclk(adev,
3052 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3053 memory_clock,
3054 &memory_level->MinVddcPhases);
3055
3056 memory_level->EnabledForActivity = 1;
3057 memory_level->EnabledForThrottle = 1;
3058 memory_level->UpH = 0;
3059 memory_level->DownH = 100;
3060 memory_level->VoltageDownH = 0;
3061 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3062
3063 memory_level->StutterEnable = false;
3064 memory_level->StrobeEnable = false;
3065 memory_level->EdcReadEnable = false;
3066 memory_level->EdcWriteEnable = false;
3067 memory_level->RttEnable = false;
3068
3069 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3070
3071 if (pi->mclk_stutter_mode_threshold &&
3072 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3073 (!pi->uvd_enabled) &&
3074 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3075 (adev->pm.dpm.new_active_crtc_count <= 2))
3076 memory_level->StutterEnable = true;
3077
3078 if (pi->mclk_strobe_mode_threshold &&
3079 (memory_clock <= pi->mclk_strobe_mode_threshold))
3080 memory_level->StrobeEnable = 1;
3081
3082 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3083 memory_level->StrobeRatio =
3084 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3085 if (pi->mclk_edc_enable_threshold &&
3086 (memory_clock > pi->mclk_edc_enable_threshold))
3087 memory_level->EdcReadEnable = true;
3088
3089 if (pi->mclk_edc_wr_enable_threshold &&
3090 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3091 memory_level->EdcWriteEnable = true;
3092
3093 if (memory_level->StrobeEnable) {
3094 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3095 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3096 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3097 else
3098 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3099 } else {
3100 dll_state_on = pi->dll_default_on;
3101 }
3102 } else {
3103 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3104 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3105 }
3106
3107 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3108 if (ret)
3109 return ret;
3110
3111 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3112 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3113 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3114 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3115
3116 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3117 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3118 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3119 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3120 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3121 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3122 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3123 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3124 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3125 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3126 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3127
3128 return 0;
3129}
3130
3131static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3132 SMU7_Discrete_DpmTable *table)
3133{
3134 struct ci_power_info *pi = ci_get_pi(adev);
3135 struct atom_clock_dividers dividers;
3136 SMU7_Discrete_VoltageLevel voltage_level;
3137 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3138 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3139 u32 dll_cntl = pi->clock_registers.dll_cntl;
3140 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3141 int ret;
3142
3143 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3144
3145 if (pi->acpi_vddc)
3146 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3147 else
3148 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3149
3150 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3151
3152 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3153
3154 ret = amdgpu_atombios_get_clock_dividers(adev,
3155 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3156 table->ACPILevel.SclkFrequency, false, &dividers);
3157 if (ret)
3158 return ret;
3159
3160 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3161 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3162 table->ACPILevel.DeepSleepDivId = 0;
3163
3164 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3165 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3166
3167 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3168 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3169
3170 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3171 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3172 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3173 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3174 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3175 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3176 table->ACPILevel.CcPwrDynRm = 0;
3177 table->ACPILevel.CcPwrDynRm1 = 0;
3178
3179 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3180 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3181 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3182 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3183 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3184 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3185 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3186 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3187 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3188 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3189 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3190
3191 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3192 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3193
3194 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3195 if (pi->acpi_vddci)
3196 table->MemoryACPILevel.MinVddci =
3197 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3198 else
3199 table->MemoryACPILevel.MinVddci =
3200 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3201 }
3202
3203 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3204 table->MemoryACPILevel.MinMvdd = 0;
3205 else
3206 table->MemoryACPILevel.MinMvdd =
3207 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3208
3209 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3210 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3211 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3212 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3213
3214 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3215
3216 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3217 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3218 table->MemoryACPILevel.MpllAdFuncCntl =
3219 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3220 table->MemoryACPILevel.MpllDqFuncCntl =
3221 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3222 table->MemoryACPILevel.MpllFuncCntl =
3223 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3224 table->MemoryACPILevel.MpllFuncCntl_1 =
3225 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3226 table->MemoryACPILevel.MpllFuncCntl_2 =
3227 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3228 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3229 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3230
3231 table->MemoryACPILevel.EnabledForThrottle = 0;
3232 table->MemoryACPILevel.EnabledForActivity = 0;
3233 table->MemoryACPILevel.UpH = 0;
3234 table->MemoryACPILevel.DownH = 100;
3235 table->MemoryACPILevel.VoltageDownH = 0;
3236 table->MemoryACPILevel.ActivityLevel =
3237 cpu_to_be16((u16)pi->mclk_activity_target);
3238
3239 table->MemoryACPILevel.StutterEnable = false;
3240 table->MemoryACPILevel.StrobeEnable = false;
3241 table->MemoryACPILevel.EdcReadEnable = false;
3242 table->MemoryACPILevel.EdcWriteEnable = false;
3243 table->MemoryACPILevel.RttEnable = false;
3244
3245 return 0;
3246}
3247
3248
3249static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3250{
3251 struct ci_power_info *pi = ci_get_pi(adev);
3252 struct ci_ulv_parm *ulv = &pi->ulv;
3253
3254 if (ulv->supported) {
3255 if (enable)
3256 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3257 0 : -EINVAL;
3258 else
3259 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3260 0 : -EINVAL;
3261 }
3262
3263 return 0;
3264}
3265
3266static int ci_populate_ulv_level(struct amdgpu_device *adev,
3267 SMU7_Discrete_Ulv *state)
3268{
3269 struct ci_power_info *pi = ci_get_pi(adev);
3270 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3271
3272 state->CcPwrDynRm = 0;
3273 state->CcPwrDynRm1 = 0;
3274
3275 if (ulv_voltage == 0) {
3276 pi->ulv.supported = false;
3277 return 0;
3278 }
3279
3280 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3281 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3282 state->VddcOffset = 0;
3283 else
3284 state->VddcOffset =
3285 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3286 } else {
3287 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3288 state->VddcOffsetVid = 0;
3289 else
3290 state->VddcOffsetVid = (u8)
3291 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3292 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3293 }
3294 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3295
3296 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3297 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3298 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3299
3300 return 0;
3301}
3302
3303static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3304 u32 engine_clock,
3305 SMU7_Discrete_GraphicsLevel *sclk)
3306{
3307 struct ci_power_info *pi = ci_get_pi(adev);
3308 struct atom_clock_dividers dividers;
3309 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3310 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3311 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3312 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3313 u32 reference_clock = adev->clock.spll.reference_freq;
3314 u32 reference_divider;
3315 u32 fbdiv;
3316 int ret;
3317
3318 ret = amdgpu_atombios_get_clock_dividers(adev,
3319 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3320 engine_clock, false, &dividers);
3321 if (ret)
3322 return ret;
3323
3324 reference_divider = 1 + dividers.ref_div;
3325 fbdiv = dividers.fb_div & 0x3FFFFFF;
3326
3327 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3328 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3329 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3330
3331 if (pi->caps_sclk_ss_support) {
3332 struct amdgpu_atom_ss ss;
3333 u32 vco_freq = engine_clock * dividers.post_div;
3334
3335 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3336 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3337 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3338 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3339
3340 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3341 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3342 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3343
3344 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3345 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3346 }
3347 }
3348
3349 sclk->SclkFrequency = engine_clock;
3350 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3351 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3352 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3353 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3354 sclk->SclkDid = (u8)dividers.post_divider;
3355
3356 return 0;
3357}
3358
3359static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3360 u32 engine_clock,
3361 u16 sclk_activity_level_t,
3362 SMU7_Discrete_GraphicsLevel *graphic_level)
3363{
3364 struct ci_power_info *pi = ci_get_pi(adev);
3365 int ret;
3366
3367 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3368 if (ret)
3369 return ret;
3370
3371 ret = ci_get_dependency_volt_by_clk(adev,
3372 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3373 engine_clock, &graphic_level->MinVddc);
3374 if (ret)
3375 return ret;
3376
3377 graphic_level->SclkFrequency = engine_clock;
3378
3379 graphic_level->Flags = 0;
3380 graphic_level->MinVddcPhases = 1;
3381
3382 if (pi->vddc_phase_shed_control)
3383 ci_populate_phase_value_based_on_sclk(adev,
3384 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3385 engine_clock,
3386 &graphic_level->MinVddcPhases);
3387
3388 graphic_level->ActivityLevel = sclk_activity_level_t;
3389
3390 graphic_level->CcPwrDynRm = 0;
3391 graphic_level->CcPwrDynRm1 = 0;
3392 graphic_level->EnabledForThrottle = 1;
3393 graphic_level->UpH = 0;
3394 graphic_level->DownH = 0;
3395 graphic_level->VoltageDownH = 0;
3396 graphic_level->PowerThrottle = 0;
3397
3398 if (pi->caps_sclk_ds)
3399 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3400 CISLAND_MINIMUM_ENGINE_CLOCK);
3401
3402 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3403
3404 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3405 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3406 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3407 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3408 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3409 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3410 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3411 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3412 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3413 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3414 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3415
3416 return 0;
3417}
3418
3419static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3420{
3421 struct ci_power_info *pi = ci_get_pi(adev);
3422 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3423 u32 level_array_address = pi->dpm_table_start +
3424 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3425 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3426 SMU7_MAX_LEVELS_GRAPHICS;
3427 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3428 u32 i, ret;
3429
3430 memset(levels, 0, level_array_size);
3431
3432 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3433 ret = ci_populate_single_graphic_level(adev,
3434 dpm_table->sclk_table.dpm_levels[i].value,
3435 (u16)pi->activity_target[i],
3436 &pi->smc_state_table.GraphicsLevel[i]);
3437 if (ret)
3438 return ret;
3439 if (i > 1)
3440 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3441 if (i == (dpm_table->sclk_table.count - 1))
3442 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3443 PPSMC_DISPLAY_WATERMARK_HIGH;
3444 }
3445 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3446
3447 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3448 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3449 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3450
3451 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3452 (u8 *)levels, level_array_size,
3453 pi->sram_end);
3454 if (ret)
3455 return ret;
3456
3457 return 0;
3458}
3459
3460static int ci_populate_ulv_state(struct amdgpu_device *adev,
3461 SMU7_Discrete_Ulv *ulv_level)
3462{
3463 return ci_populate_ulv_level(adev, ulv_level);
3464}
3465
3466static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3467{
3468 struct ci_power_info *pi = ci_get_pi(adev);
3469 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3470 u32 level_array_address = pi->dpm_table_start +
3471 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3472 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3473 SMU7_MAX_LEVELS_MEMORY;
3474 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3475 u32 i, ret;
3476
3477 memset(levels, 0, level_array_size);
3478
3479 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3480 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3481 return -EINVAL;
3482 ret = ci_populate_single_memory_level(adev,
3483 dpm_table->mclk_table.dpm_levels[i].value,
3484 &pi->smc_state_table.MemoryLevel[i]);
3485 if (ret)
3486 return ret;
3487 }
3488
3489 if ((dpm_table->mclk_table.count >= 2) &&
3490 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3491 pi->smc_state_table.MemoryLevel[1].MinVddc =
3492 pi->smc_state_table.MemoryLevel[0].MinVddc;
3493 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3494 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3495 }
3496
3497 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3498
3499 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3500 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3501 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3502
3503 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3504 PPSMC_DISPLAY_WATERMARK_HIGH;
3505
3506 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3507 (u8 *)levels, level_array_size,
3508 pi->sram_end);
3509 if (ret)
3510 return ret;
3511
3512 return 0;
3513}
3514
3515static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3516 struct ci_single_dpm_table* dpm_table,
3517 u32 count)
3518{
3519 u32 i;
3520
3521 dpm_table->count = count;
3522 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3523 dpm_table->dpm_levels[i].enabled = false;
3524}
3525
3526static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3527 u32 index, u32 pcie_gen, u32 pcie_lanes)
3528{
3529 dpm_table->dpm_levels[index].value = pcie_gen;
3530 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3531 dpm_table->dpm_levels[index].enabled = true;
3532}
3533
3534static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3535{
3536 struct ci_power_info *pi = ci_get_pi(adev);
3537
3538 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3539 return -EINVAL;
3540
3541 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3542 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3543 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3544 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3545 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3546 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3547 }
3548
3549 ci_reset_single_dpm_table(adev,
3550 &pi->dpm_table.pcie_speed_table,
3551 SMU7_MAX_LEVELS_LINK);
3552
3553 if (adev->asic_type == CHIP_BONAIRE)
3554 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3555 pi->pcie_gen_powersaving.min,
3556 pi->pcie_lane_powersaving.max);
3557 else
3558 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3559 pi->pcie_gen_powersaving.min,
3560 pi->pcie_lane_powersaving.min);
3561 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3562 pi->pcie_gen_performance.min,
3563 pi->pcie_lane_performance.min);
3564 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3565 pi->pcie_gen_powersaving.min,
3566 pi->pcie_lane_powersaving.max);
3567 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3568 pi->pcie_gen_performance.min,
3569 pi->pcie_lane_performance.max);
3570 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3571 pi->pcie_gen_powersaving.max,
3572 pi->pcie_lane_powersaving.max);
3573 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3574 pi->pcie_gen_performance.max,
3575 pi->pcie_lane_performance.max);
3576
3577 pi->dpm_table.pcie_speed_table.count = 6;
3578
3579 return 0;
3580}
3581
3582static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3583{
3584 struct ci_power_info *pi = ci_get_pi(adev);
3585 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3586 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3587 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3588 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3589 struct amdgpu_cac_leakage_table *std_voltage_table =
3590 &adev->pm.dpm.dyn_state.cac_leakage_table;
3591 u32 i;
3592
3593 if (allowed_sclk_vddc_table == NULL)
3594 return -EINVAL;
3595 if (allowed_sclk_vddc_table->count < 1)
3596 return -EINVAL;
3597 if (allowed_mclk_table == NULL)
3598 return -EINVAL;
3599 if (allowed_mclk_table->count < 1)
3600 return -EINVAL;
3601
3602 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3603
3604 ci_reset_single_dpm_table(adev,
3605 &pi->dpm_table.sclk_table,
3606 SMU7_MAX_LEVELS_GRAPHICS);
3607 ci_reset_single_dpm_table(adev,
3608 &pi->dpm_table.mclk_table,
3609 SMU7_MAX_LEVELS_MEMORY);
3610 ci_reset_single_dpm_table(adev,
3611 &pi->dpm_table.vddc_table,
3612 SMU7_MAX_LEVELS_VDDC);
3613 ci_reset_single_dpm_table(adev,
3614 &pi->dpm_table.vddci_table,
3615 SMU7_MAX_LEVELS_VDDCI);
3616 ci_reset_single_dpm_table(adev,
3617 &pi->dpm_table.mvdd_table,
3618 SMU7_MAX_LEVELS_MVDD);
3619
3620 pi->dpm_table.sclk_table.count = 0;
3621 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3622 if ((i == 0) ||
3623 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3624 allowed_sclk_vddc_table->entries[i].clk)) {
3625 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3626 allowed_sclk_vddc_table->entries[i].clk;
3627 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3628 (i == 0) ? true : false;
3629 pi->dpm_table.sclk_table.count++;
3630 }
3631 }
3632
3633 pi->dpm_table.mclk_table.count = 0;
3634 for (i = 0; i < allowed_mclk_table->count; i++) {
3635 if ((i == 0) ||
3636 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3637 allowed_mclk_table->entries[i].clk)) {
3638 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3639 allowed_mclk_table->entries[i].clk;
3640 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3641 (i == 0) ? true : false;
3642 pi->dpm_table.mclk_table.count++;
3643 }
3644 }
3645
3646 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3647 pi->dpm_table.vddc_table.dpm_levels[i].value =
3648 allowed_sclk_vddc_table->entries[i].v;
3649 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3650 std_voltage_table->entries[i].leakage;
3651 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3652 }
3653 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3654
3655 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3656 if (allowed_mclk_table) {
3657 for (i = 0; i < allowed_mclk_table->count; i++) {
3658 pi->dpm_table.vddci_table.dpm_levels[i].value =
3659 allowed_mclk_table->entries[i].v;
3660 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3661 }
3662 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3663 }
3664
3665 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3666 if (allowed_mclk_table) {
3667 for (i = 0; i < allowed_mclk_table->count; i++) {
3668 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3669 allowed_mclk_table->entries[i].v;
3670 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3671 }
3672 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3673 }
3674
3675 ci_setup_default_pcie_tables(adev);
3676
3677 /* save a copy of the default DPM table */
3678 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3679 sizeof(struct ci_dpm_table));
3680
3681 return 0;
3682}
3683
3684static int ci_find_boot_level(struct ci_single_dpm_table *table,
3685 u32 value, u32 *boot_level)
3686{
3687 u32 i;
3688 int ret = -EINVAL;
3689
3690 for(i = 0; i < table->count; i++) {
3691 if (value == table->dpm_levels[i].value) {
3692 *boot_level = i;
3693 ret = 0;
3694 }
3695 }
3696
3697 return ret;
3698}
3699
3700static int ci_init_smc_table(struct amdgpu_device *adev)
3701{
3702 struct ci_power_info *pi = ci_get_pi(adev);
3703 struct ci_ulv_parm *ulv = &pi->ulv;
3704 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3705 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3706 int ret;
3707
3708 ret = ci_setup_default_dpm_tables(adev);
3709 if (ret)
3710 return ret;
3711
3712 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3713 ci_populate_smc_voltage_tables(adev, table);
3714
3715 ci_init_fps_limits(adev);
3716
3717 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3718 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3719
3720 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3721 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3722
3723 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3724 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3725
3726 if (ulv->supported) {
3727 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3728 if (ret)
3729 return ret;
3730 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3731 }
3732
3733 ret = ci_populate_all_graphic_levels(adev);
3734 if (ret)
3735 return ret;
3736
3737 ret = ci_populate_all_memory_levels(adev);
3738 if (ret)
3739 return ret;
3740
3741 ci_populate_smc_link_level(adev, table);
3742
3743 ret = ci_populate_smc_acpi_level(adev, table);
3744 if (ret)
3745 return ret;
3746
3747 ret = ci_populate_smc_vce_level(adev, table);
3748 if (ret)
3749 return ret;
3750
3751 ret = ci_populate_smc_acp_level(adev, table);
3752 if (ret)
3753 return ret;
3754
3755 ret = ci_populate_smc_samu_level(adev, table);
3756 if (ret)
3757 return ret;
3758
3759 ret = ci_do_program_memory_timing_parameters(adev);
3760 if (ret)
3761 return ret;
3762
3763 ret = ci_populate_smc_uvd_level(adev, table);
3764 if (ret)
3765 return ret;
3766
3767 table->UvdBootLevel = 0;
3768 table->VceBootLevel = 0;
3769 table->AcpBootLevel = 0;
3770 table->SamuBootLevel = 0;
3771 table->GraphicsBootLevel = 0;
3772 table->MemoryBootLevel = 0;
3773
3774 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3775 pi->vbios_boot_state.sclk_bootup_value,
3776 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3777
3778 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3779 pi->vbios_boot_state.mclk_bootup_value,
3780 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3781
3782 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3783 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3784 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3785
3786 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3787
3788 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3789 if (ret)
3790 return ret;
3791
3792 table->UVDInterval = 1;
3793 table->VCEInterval = 1;
3794 table->ACPInterval = 1;
3795 table->SAMUInterval = 1;
3796 table->GraphicsVoltageChangeEnable = 1;
3797 table->GraphicsThermThrottleEnable = 1;
3798 table->GraphicsInterval = 1;
3799 table->VoltageInterval = 1;
3800 table->ThermalInterval = 1;
3801 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3802 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3803 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3804 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3805 table->MemoryVoltageChangeEnable = 1;
3806 table->MemoryInterval = 1;
3807 table->VoltageResponseTime = 0;
3808 table->VddcVddciDelta = 4000;
3809 table->PhaseResponseTime = 0;
3810 table->MemoryThermThrottleEnable = 1;
3811 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3812 table->PCIeGenInterval = 1;
3813 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3814 table->SVI2Enable = 1;
3815 else
3816 table->SVI2Enable = 0;
3817
3818 table->ThermGpio = 17;
3819 table->SclkStepSize = 0x4000;
3820
3821 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3822 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3823 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3824 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3825 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3826 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3827 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3828 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3829 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3830 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3831 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3832 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3833 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3834 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3835
3836 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3837 pi->dpm_table_start +
3838 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3839 (u8 *)&table->SystemFlags,
3840 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3841 pi->sram_end);
3842 if (ret)
3843 return ret;
3844
3845 return 0;
3846}
3847
3848static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3849 struct ci_single_dpm_table *dpm_table,
3850 u32 low_limit, u32 high_limit)
3851{
3852 u32 i;
3853
3854 for (i = 0; i < dpm_table->count; i++) {
3855 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3856 (dpm_table->dpm_levels[i].value > high_limit))
3857 dpm_table->dpm_levels[i].enabled = false;
3858 else
3859 dpm_table->dpm_levels[i].enabled = true;
3860 }
3861}
3862
3863static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3864 u32 speed_low, u32 lanes_low,
3865 u32 speed_high, u32 lanes_high)
3866{
3867 struct ci_power_info *pi = ci_get_pi(adev);
3868 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3869 u32 i, j;
3870
3871 for (i = 0; i < pcie_table->count; i++) {
3872 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3873 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3874 (pcie_table->dpm_levels[i].value > speed_high) ||
3875 (pcie_table->dpm_levels[i].param1 > lanes_high))
3876 pcie_table->dpm_levels[i].enabled = false;
3877 else
3878 pcie_table->dpm_levels[i].enabled = true;
3879 }
3880
3881 for (i = 0; i < pcie_table->count; i++) {
3882 if (pcie_table->dpm_levels[i].enabled) {
3883 for (j = i + 1; j < pcie_table->count; j++) {
3884 if (pcie_table->dpm_levels[j].enabled) {
3885 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3886 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3887 pcie_table->dpm_levels[j].enabled = false;
3888 }
3889 }
3890 }
3891 }
3892}
3893
3894static int ci_trim_dpm_states(struct amdgpu_device *adev,
3895 struct amdgpu_ps *amdgpu_state)
3896{
3897 struct ci_ps *state = ci_get_ps(amdgpu_state);
3898 struct ci_power_info *pi = ci_get_pi(adev);
3899 u32 high_limit_count;
3900
3901 if (state->performance_level_count < 1)
3902 return -EINVAL;
3903
3904 if (state->performance_level_count == 1)
3905 high_limit_count = 0;
3906 else
3907 high_limit_count = 1;
3908
3909 ci_trim_single_dpm_states(adev,
3910 &pi->dpm_table.sclk_table,
3911 state->performance_levels[0].sclk,
3912 state->performance_levels[high_limit_count].sclk);
3913
3914 ci_trim_single_dpm_states(adev,
3915 &pi->dpm_table.mclk_table,
3916 state->performance_levels[0].mclk,
3917 state->performance_levels[high_limit_count].mclk);
3918
3919 ci_trim_pcie_dpm_states(adev,
3920 state->performance_levels[0].pcie_gen,
3921 state->performance_levels[0].pcie_lane,
3922 state->performance_levels[high_limit_count].pcie_gen,
3923 state->performance_levels[high_limit_count].pcie_lane);
3924
3925 return 0;
3926}
3927
3928static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3929{
3930 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3931 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3932 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3933 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3934 u32 requested_voltage = 0;
3935 u32 i;
3936
3937 if (disp_voltage_table == NULL)
3938 return -EINVAL;
3939 if (!disp_voltage_table->count)
3940 return -EINVAL;
3941
3942 for (i = 0; i < disp_voltage_table->count; i++) {
3943 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3944 requested_voltage = disp_voltage_table->entries[i].v;
3945 }
3946
3947 for (i = 0; i < vddc_table->count; i++) {
3948 if (requested_voltage <= vddc_table->entries[i].v) {
3949 requested_voltage = vddc_table->entries[i].v;
3950 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3951 PPSMC_MSG_VddC_Request,
3952 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3953 0 : -EINVAL;
3954 }
3955 }
3956
3957 return -EINVAL;
3958}
3959
3960static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3961{
3962 struct ci_power_info *pi = ci_get_pi(adev);
3963 PPSMC_Result result;
3964
3965 ci_apply_disp_minimum_voltage_request(adev);
3966
3967 if (!pi->sclk_dpm_key_disabled) {
3968 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3969 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3970 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3971 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3972 if (result != PPSMC_Result_OK)
3973 return -EINVAL;
3974 }
3975 }
3976
3977 if (!pi->mclk_dpm_key_disabled) {
3978 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3979 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3980 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3981 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3982 if (result != PPSMC_Result_OK)
3983 return -EINVAL;
3984 }
3985 }
3986
3987#if 0
3988 if (!pi->pcie_dpm_key_disabled) {
3989 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3990 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3991 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3992 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3993 if (result != PPSMC_Result_OK)
3994 return -EINVAL;
3995 }
3996 }
3997#endif
3998
3999 return 0;
4000}
4001
4002static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4003 struct amdgpu_ps *amdgpu_state)
4004{
4005 struct ci_power_info *pi = ci_get_pi(adev);
4006 struct ci_ps *state = ci_get_ps(amdgpu_state);
4007 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4008 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4009 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4010 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4011 u32 i;
4012
4013 pi->need_update_smu7_dpm_table = 0;
4014
4015 for (i = 0; i < sclk_table->count; i++) {
4016 if (sclk == sclk_table->dpm_levels[i].value)
4017 break;
4018 }
4019
4020 if (i >= sclk_table->count) {
4021 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4022 } else {
4023 /* XXX check display min clock requirements */
4024 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4025 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4026 }
4027
4028 for (i = 0; i < mclk_table->count; i++) {
4029 if (mclk == mclk_table->dpm_levels[i].value)
4030 break;
4031 }
4032
4033 if (i >= mclk_table->count)
4034 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4035
4036 if (adev->pm.dpm.current_active_crtc_count !=
4037 adev->pm.dpm.new_active_crtc_count)
4038 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4039}
4040
4041static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4042 struct amdgpu_ps *amdgpu_state)
4043{
4044 struct ci_power_info *pi = ci_get_pi(adev);
4045 struct ci_ps *state = ci_get_ps(amdgpu_state);
4046 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4047 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4048 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4049 int ret;
4050
4051 if (!pi->need_update_smu7_dpm_table)
4052 return 0;
4053
4054 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4055 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4056
4057 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4058 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4059
4060 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4061 ret = ci_populate_all_graphic_levels(adev);
4062 if (ret)
4063 return ret;
4064 }
4065
4066 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4067 ret = ci_populate_all_memory_levels(adev);
4068 if (ret)
4069 return ret;
4070 }
4071
4072 return 0;
4073}
4074
4075static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4076{
4077 struct ci_power_info *pi = ci_get_pi(adev);
4078 const struct amdgpu_clock_and_voltage_limits *max_limits;
4079 int i;
4080
4081 if (adev->pm.ac_power)
4082 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4083 else
4084 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4085
4086 if (enable) {
4087 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4088
4089 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4090 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4091 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4092
4093 if (!pi->caps_uvd_dpm)
4094 break;
4095 }
4096 }
4097
4098 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4099 PPSMC_MSG_UVDDPM_SetEnabledMask,
4100 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4101
4102 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4103 pi->uvd_enabled = true;
4104 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4105 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4106 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4107 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4108 }
4109 } else {
4110 if (pi->uvd_enabled) {
4111 pi->uvd_enabled = false;
4112 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4113 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4114 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4115 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4116 }
4117 }
4118
4119 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4120 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4121 0 : -EINVAL;
4122}
4123
4124static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4125{
4126 struct ci_power_info *pi = ci_get_pi(adev);
4127 const struct amdgpu_clock_and_voltage_limits *max_limits;
4128 int i;
4129
4130 if (adev->pm.ac_power)
4131 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4132 else
4133 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4134
4135 if (enable) {
4136 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4137 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4138 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4139 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4140
4141 if (!pi->caps_vce_dpm)
4142 break;
4143 }
4144 }
4145
4146 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4147 PPSMC_MSG_VCEDPM_SetEnabledMask,
4148 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4149 }
4150
4151 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4152 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4153 0 : -EINVAL;
4154}
4155
4156#if 0
4157static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4158{
4159 struct ci_power_info *pi = ci_get_pi(adev);
4160 const struct amdgpu_clock_and_voltage_limits *max_limits;
4161 int i;
4162
4163 if (adev->pm.ac_power)
4164 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4165 else
4166 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4167
4168 if (enable) {
4169 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4170 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4171 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4172 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4173
4174 if (!pi->caps_samu_dpm)
4175 break;
4176 }
4177 }
4178
4179 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4180 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4181 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4182 }
4183 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4184 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4185 0 : -EINVAL;
4186}
4187
4188static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4189{
4190 struct ci_power_info *pi = ci_get_pi(adev);
4191 const struct amdgpu_clock_and_voltage_limits *max_limits;
4192 int i;
4193
4194 if (adev->pm.ac_power)
4195 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4196 else
4197 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4198
4199 if (enable) {
4200 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4201 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4202 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4203 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4204
4205 if (!pi->caps_acp_dpm)
4206 break;
4207 }
4208 }
4209
4210 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4211 PPSMC_MSG_ACPDPM_SetEnabledMask,
4212 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4213 }
4214
4215 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4216 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4217 0 : -EINVAL;
4218}
4219#endif
4220
4221static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4222{
4223 struct ci_power_info *pi = ci_get_pi(adev);
4224 u32 tmp;
4225 int ret = 0;
4226
4227 if (!gate) {
4228 /* turn the clocks on when decoding */
4229 if (pi->caps_uvd_dpm ||
4230 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4231 pi->smc_state_table.UvdBootLevel = 0;
4232 else
4233 pi->smc_state_table.UvdBootLevel =
4234 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4235
4236 tmp = RREG32_SMC(ixDPM_TABLE_475);
4237 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4238 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4239 WREG32_SMC(ixDPM_TABLE_475, tmp);
4240 ret = ci_enable_uvd_dpm(adev, true);
4241 } else {
4242 ret = ci_enable_uvd_dpm(adev, false);
4243 if (ret)
4244 return ret;
4245 }
4246
4247 return ret;
4248}
4249
4250static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4251{
4252 u8 i;
4253 u32 min_evclk = 30000; /* ??? */
4254 struct amdgpu_vce_clock_voltage_dependency_table *table =
4255 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4256
4257 for (i = 0; i < table->count; i++) {
4258 if (table->entries[i].evclk >= min_evclk)
4259 return i;
4260 }
4261
4262 return table->count - 1;
4263}
4264
4265static int ci_update_vce_dpm(struct amdgpu_device *adev,
4266 struct amdgpu_ps *amdgpu_new_state,
4267 struct amdgpu_ps *amdgpu_current_state)
4268{
4269 struct ci_power_info *pi = ci_get_pi(adev);
4270 int ret = 0;
4271 u32 tmp;
4272
4273 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4274 if (amdgpu_new_state->evclk) {
4275 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4276 tmp = RREG32_SMC(ixDPM_TABLE_475);
4277 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4278 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4279 WREG32_SMC(ixDPM_TABLE_475, tmp);
4280
4281 ret = ci_enable_vce_dpm(adev, true);
4282 } else {
4283 ret = ci_enable_vce_dpm(adev, false);
4284 if (ret)
4285 return ret;
4286 }
4287 }
4288 return ret;
4289}
4290
4291#if 0
4292static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4293{
4294 return ci_enable_samu_dpm(adev, gate);
4295}
4296
4297static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4298{
4299 struct ci_power_info *pi = ci_get_pi(adev);
4300 u32 tmp;
4301
4302 if (!gate) {
4303 pi->smc_state_table.AcpBootLevel = 0;
4304
4305 tmp = RREG32_SMC(ixDPM_TABLE_475);
4306 tmp &= ~AcpBootLevel_MASK;
4307 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4308 WREG32_SMC(ixDPM_TABLE_475, tmp);
4309 }
4310
4311 return ci_enable_acp_dpm(adev, !gate);
4312}
4313#endif
4314
4315static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4316 struct amdgpu_ps *amdgpu_state)
4317{
4318 struct ci_power_info *pi = ci_get_pi(adev);
4319 int ret;
4320
4321 ret = ci_trim_dpm_states(adev, amdgpu_state);
4322 if (ret)
4323 return ret;
4324
4325 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4326 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4327 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4328 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4329 pi->last_mclk_dpm_enable_mask =
4330 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4331 if (pi->uvd_enabled) {
4332 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4333 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4334 }
4335 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4336 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4337
4338 return 0;
4339}
4340
4341static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4342 u32 level_mask)
4343{
4344 u32 level = 0;
4345
4346 while ((level_mask & (1 << level)) == 0)
4347 level++;
4348
4349 return level;
4350}
4351
4352
4353static int ci_dpm_force_performance_level(void *handle,
4354 enum amd_dpm_forced_level level)
4355{
4356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4357 struct ci_power_info *pi = ci_get_pi(adev);
4358 u32 tmp, levels, i;
4359 int ret;
4360
4361 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
4362 if ((!pi->pcie_dpm_key_disabled) &&
4363 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4364 levels = 0;
4365 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4366 while (tmp >>= 1)
4367 levels++;
4368 if (levels) {
4369 ret = ci_dpm_force_state_pcie(adev, level);
4370 if (ret)
4371 return ret;
4372 for (i = 0; i < adev->usec_timeout; i++) {
4373 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4374 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4375 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4376 if (tmp == levels)
4377 break;
4378 udelay(1);
4379 }
4380 }
4381 }
4382 if ((!pi->sclk_dpm_key_disabled) &&
4383 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4384 levels = 0;
4385 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4386 while (tmp >>= 1)
4387 levels++;
4388 if (levels) {
4389 ret = ci_dpm_force_state_sclk(adev, levels);
4390 if (ret)
4391 return ret;
4392 for (i = 0; i < adev->usec_timeout; i++) {
4393 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4394 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4395 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4396 if (tmp == levels)
4397 break;
4398 udelay(1);
4399 }
4400 }
4401 }
4402 if ((!pi->mclk_dpm_key_disabled) &&
4403 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4404 levels = 0;
4405 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4406 while (tmp >>= 1)
4407 levels++;
4408 if (levels) {
4409 ret = ci_dpm_force_state_mclk(adev, levels);
4410 if (ret)
4411 return ret;
4412 for (i = 0; i < adev->usec_timeout; i++) {
4413 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4414 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4415 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4416 if (tmp == levels)
4417 break;
4418 udelay(1);
4419 }
4420 }
4421 }
4422 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
4423 if ((!pi->sclk_dpm_key_disabled) &&
4424 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4425 levels = ci_get_lowest_enabled_level(adev,
4426 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4427 ret = ci_dpm_force_state_sclk(adev, levels);
4428 if (ret)
4429 return ret;
4430 for (i = 0; i < adev->usec_timeout; i++) {
4431 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4432 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4433 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4434 if (tmp == levels)
4435 break;
4436 udelay(1);
4437 }
4438 }
4439 if ((!pi->mclk_dpm_key_disabled) &&
4440 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4441 levels = ci_get_lowest_enabled_level(adev,
4442 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4443 ret = ci_dpm_force_state_mclk(adev, levels);
4444 if (ret)
4445 return ret;
4446 for (i = 0; i < adev->usec_timeout; i++) {
4447 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4448 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4449 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4450 if (tmp == levels)
4451 break;
4452 udelay(1);
4453 }
4454 }
4455 if ((!pi->pcie_dpm_key_disabled) &&
4456 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4457 levels = ci_get_lowest_enabled_level(adev,
4458 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4459 ret = ci_dpm_force_state_pcie(adev, levels);
4460 if (ret)
4461 return ret;
4462 for (i = 0; i < adev->usec_timeout; i++) {
4463 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4464 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4465 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4466 if (tmp == levels)
4467 break;
4468 udelay(1);
4469 }
4470 }
4471 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
4472 if (!pi->pcie_dpm_key_disabled) {
4473 PPSMC_Result smc_result;
4474
4475 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4476 PPSMC_MSG_PCIeDPM_UnForceLevel);
4477 if (smc_result != PPSMC_Result_OK)
4478 return -EINVAL;
4479 }
4480 ret = ci_upload_dpm_level_enable_mask(adev);
4481 if (ret)
4482 return ret;
4483 }
4484
4485 adev->pm.dpm.forced_level = level;
4486
4487 return 0;
4488}
4489
4490static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4491 struct ci_mc_reg_table *table)
4492{
4493 u8 i, j, k;
4494 u32 temp_reg;
4495
4496 for (i = 0, j = table->last; i < table->last; i++) {
4497 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4498 return -EINVAL;
4499 switch(table->mc_reg_address[i].s1) {
4500 case mmMC_SEQ_MISC1:
4501 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4502 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4503 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4504 for (k = 0; k < table->num_entries; k++) {
4505 table->mc_reg_table_entry[k].mc_data[j] =
4506 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4507 }
4508 j++;
4509
4510 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4511 return -EINVAL;
4512 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4513 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4514 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4515 for (k = 0; k < table->num_entries; k++) {
4516 table->mc_reg_table_entry[k].mc_data[j] =
4517 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4518 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4519 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4520 }
4521 j++;
4522
4523 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4524 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4525 return -EINVAL;
4526 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4527 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4528 for (k = 0; k < table->num_entries; k++) {
4529 table->mc_reg_table_entry[k].mc_data[j] =
4530 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4531 }
4532 j++;
4533 }
4534 break;
4535 case mmMC_SEQ_RESERVE_M:
4536 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4537 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4538 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4539 for (k = 0; k < table->num_entries; k++) {
4540 table->mc_reg_table_entry[k].mc_data[j] =
4541 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4542 }
4543 j++;
4544 break;
4545 default:
4546 break;
4547 }
4548
4549 }
4550
4551 table->last = j;
4552
4553 return 0;
4554}
4555
4556static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4557{
4558 bool result = true;
4559
4560 switch(in_reg) {
4561 case mmMC_SEQ_RAS_TIMING:
4562 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4563 break;
4564 case mmMC_SEQ_DLL_STBY:
4565 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4566 break;
4567 case mmMC_SEQ_G5PDX_CMD0:
4568 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4569 break;
4570 case mmMC_SEQ_G5PDX_CMD1:
4571 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4572 break;
4573 case mmMC_SEQ_G5PDX_CTRL:
4574 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4575 break;
4576 case mmMC_SEQ_CAS_TIMING:
4577 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4578 break;
4579 case mmMC_SEQ_MISC_TIMING:
4580 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4581 break;
4582 case mmMC_SEQ_MISC_TIMING2:
4583 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4584 break;
4585 case mmMC_SEQ_PMG_DVS_CMD:
4586 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4587 break;
4588 case mmMC_SEQ_PMG_DVS_CTL:
4589 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4590 break;
4591 case mmMC_SEQ_RD_CTL_D0:
4592 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4593 break;
4594 case mmMC_SEQ_RD_CTL_D1:
4595 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4596 break;
4597 case mmMC_SEQ_WR_CTL_D0:
4598 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4599 break;
4600 case mmMC_SEQ_WR_CTL_D1:
4601 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4602 break;
4603 case mmMC_PMG_CMD_EMRS:
4604 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4605 break;
4606 case mmMC_PMG_CMD_MRS:
4607 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4608 break;
4609 case mmMC_PMG_CMD_MRS1:
4610 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4611 break;
4612 case mmMC_SEQ_PMG_TIMING:
4613 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4614 break;
4615 case mmMC_PMG_CMD_MRS2:
4616 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4617 break;
4618 case mmMC_SEQ_WR_CTL_2:
4619 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4620 break;
4621 default:
4622 result = false;
4623 break;
4624 }
4625
4626 return result;
4627}
4628
4629static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4630{
4631 u8 i, j;
4632
4633 for (i = 0; i < table->last; i++) {
4634 for (j = 1; j < table->num_entries; j++) {
4635 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4636 table->mc_reg_table_entry[j].mc_data[i]) {
4637 table->valid_flag |= 1 << i;
4638 break;
4639 }
4640 }
4641 }
4642}
4643
4644static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4645{
4646 u32 i;
4647 u16 address;
4648
4649 for (i = 0; i < table->last; i++) {
4650 table->mc_reg_address[i].s0 =
4651 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4652 address : table->mc_reg_address[i].s1;
4653 }
4654}
4655
4656static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4657 struct ci_mc_reg_table *ci_table)
4658{
4659 u8 i, j;
4660
4661 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4662 return -EINVAL;
4663 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4664 return -EINVAL;
4665
4666 for (i = 0; i < table->last; i++)
4667 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4668
4669 ci_table->last = table->last;
4670
4671 for (i = 0; i < table->num_entries; i++) {
4672 ci_table->mc_reg_table_entry[i].mclk_max =
4673 table->mc_reg_table_entry[i].mclk_max;
4674 for (j = 0; j < table->last; j++)
4675 ci_table->mc_reg_table_entry[i].mc_data[j] =
4676 table->mc_reg_table_entry[i].mc_data[j];
4677 }
4678 ci_table->num_entries = table->num_entries;
4679
4680 return 0;
4681}
4682
4683static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4684 struct ci_mc_reg_table *table)
4685{
4686 u8 i, k;
4687 u32 tmp;
4688 bool patch;
4689
4690 tmp = RREG32(mmMC_SEQ_MISC0);
4691 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4692
4693 if (patch &&
4694 ((adev->pdev->device == 0x67B0) ||
4695 (adev->pdev->device == 0x67B1))) {
4696 for (i = 0; i < table->last; i++) {
4697 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4698 return -EINVAL;
4699 switch (table->mc_reg_address[i].s1) {
4700 case mmMC_SEQ_MISC1:
4701 for (k = 0; k < table->num_entries; k++) {
4702 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4703 (table->mc_reg_table_entry[k].mclk_max == 137500))
4704 table->mc_reg_table_entry[k].mc_data[i] =
4705 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4706 0x00000007;
4707 }
4708 break;
4709 case mmMC_SEQ_WR_CTL_D0:
4710 for (k = 0; k < table->num_entries; k++) {
4711 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4712 (table->mc_reg_table_entry[k].mclk_max == 137500))
4713 table->mc_reg_table_entry[k].mc_data[i] =
4714 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4715 0x0000D0DD;
4716 }
4717 break;
4718 case mmMC_SEQ_WR_CTL_D1:
4719 for (k = 0; k < table->num_entries; k++) {
4720 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4721 (table->mc_reg_table_entry[k].mclk_max == 137500))
4722 table->mc_reg_table_entry[k].mc_data[i] =
4723 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4724 0x0000D0DD;
4725 }
4726 break;
4727 case mmMC_SEQ_WR_CTL_2:
4728 for (k = 0; k < table->num_entries; k++) {
4729 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4730 (table->mc_reg_table_entry[k].mclk_max == 137500))
4731 table->mc_reg_table_entry[k].mc_data[i] = 0;
4732 }
4733 break;
4734 case mmMC_SEQ_CAS_TIMING:
4735 for (k = 0; k < table->num_entries; k++) {
4736 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4737 table->mc_reg_table_entry[k].mc_data[i] =
4738 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4739 0x000C0140;
4740 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4741 table->mc_reg_table_entry[k].mc_data[i] =
4742 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4743 0x000C0150;
4744 }
4745 break;
4746 case mmMC_SEQ_MISC_TIMING:
4747 for (k = 0; k < table->num_entries; k++) {
4748 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4749 table->mc_reg_table_entry[k].mc_data[i] =
4750 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4751 0x00000030;
4752 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4753 table->mc_reg_table_entry[k].mc_data[i] =
4754 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4755 0x00000035;
4756 }
4757 break;
4758 default:
4759 break;
4760 }
4761 }
4762
4763 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4764 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4765 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4766 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4767 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4768 }
4769
4770 return 0;
4771}
4772
4773static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4774{
4775 struct ci_power_info *pi = ci_get_pi(adev);
4776 struct atom_mc_reg_table *table;
4777 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4778 u8 module_index = ci_get_memory_module_index(adev);
4779 int ret;
4780
4781 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4782 if (!table)
4783 return -ENOMEM;
4784
4785 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4786 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4787 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4788 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4789 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4790 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4791 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4792 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4793 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4794 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4795 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4796 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4797 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4798 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4799 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4800 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4801 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4802 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4803 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4804 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4805
4806 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4807 if (ret)
4808 goto init_mc_done;
4809
4810 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4811 if (ret)
4812 goto init_mc_done;
4813
4814 ci_set_s0_mc_reg_index(ci_table);
4815
4816 ret = ci_register_patching_mc_seq(adev, ci_table);
4817 if (ret)
4818 goto init_mc_done;
4819
4820 ret = ci_set_mc_special_registers(adev, ci_table);
4821 if (ret)
4822 goto init_mc_done;
4823
4824 ci_set_valid_flag(ci_table);
4825
4826init_mc_done:
4827 kfree(table);
4828
4829 return ret;
4830}
4831
4832static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4833 SMU7_Discrete_MCRegisters *mc_reg_table)
4834{
4835 struct ci_power_info *pi = ci_get_pi(adev);
4836 u32 i, j;
4837
4838 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4839 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4840 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4841 return -EINVAL;
4842 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4843 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4844 i++;
4845 }
4846 }
4847
4848 mc_reg_table->last = (u8)i;
4849
4850 return 0;
4851}
4852
4853static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4854 SMU7_Discrete_MCRegisterSet *data,
4855 u32 num_entries, u32 valid_flag)
4856{
4857 u32 i, j;
4858
4859 for (i = 0, j = 0; j < num_entries; j++) {
4860 if (valid_flag & (1 << j)) {
4861 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4862 i++;
4863 }
4864 }
4865}
4866
4867static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4868 const u32 memory_clock,
4869 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4870{
4871 struct ci_power_info *pi = ci_get_pi(adev);
4872 u32 i = 0;
4873
4874 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4875 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4876 break;
4877 }
4878
4879 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4880 --i;
4881
4882 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4883 mc_reg_table_data, pi->mc_reg_table.last,
4884 pi->mc_reg_table.valid_flag);
4885}
4886
4887static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4888 SMU7_Discrete_MCRegisters *mc_reg_table)
4889{
4890 struct ci_power_info *pi = ci_get_pi(adev);
4891 u32 i;
4892
4893 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4894 ci_convert_mc_reg_table_entry_to_smc(adev,
4895 pi->dpm_table.mclk_table.dpm_levels[i].value,
4896 &mc_reg_table->data[i]);
4897}
4898
4899static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4900{
4901 struct ci_power_info *pi = ci_get_pi(adev);
4902 int ret;
4903
4904 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4905
4906 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4907 if (ret)
4908 return ret;
4909 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4910
4911 return amdgpu_ci_copy_bytes_to_smc(adev,
4912 pi->mc_reg_table_start,
4913 (u8 *)&pi->smc_mc_reg_table,
4914 sizeof(SMU7_Discrete_MCRegisters),
4915 pi->sram_end);
4916}
4917
4918static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4919{
4920 struct ci_power_info *pi = ci_get_pi(adev);
4921
4922 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4923 return 0;
4924
4925 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4926
4927 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4928
4929 return amdgpu_ci_copy_bytes_to_smc(adev,
4930 pi->mc_reg_table_start +
4931 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4932 (u8 *)&pi->smc_mc_reg_table.data[0],
4933 sizeof(SMU7_Discrete_MCRegisterSet) *
4934 pi->dpm_table.mclk_table.count,
4935 pi->sram_end);
4936}
4937
4938static void ci_enable_voltage_control(struct amdgpu_device *adev)
4939{
4940 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4941
4942 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4943 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4944}
4945
4946static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4947 struct amdgpu_ps *amdgpu_state)
4948{
4949 struct ci_ps *state = ci_get_ps(amdgpu_state);
4950 int i;
4951 u16 pcie_speed, max_speed = 0;
4952
4953 for (i = 0; i < state->performance_level_count; i++) {
4954 pcie_speed = state->performance_levels[i].pcie_gen;
4955 if (max_speed < pcie_speed)
4956 max_speed = pcie_speed;
4957 }
4958
4959 return max_speed;
4960}
4961
4962static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4963{
4964 u32 speed_cntl = 0;
4965
4966 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4967 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4968 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4969
4970 return (u16)speed_cntl;
4971}
4972
4973static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4974{
4975 u32 link_width = 0;
4976
4977 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4978 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4979 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4980
4981 switch (link_width) {
4982 case 1:
4983 return 1;
4984 case 2:
4985 return 2;
4986 case 3:
4987 return 4;
4988 case 4:
4989 return 8;
4990 case 0:
4991 case 6:
4992 default:
4993 return 16;
4994 }
4995}
4996
4997static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4998 struct amdgpu_ps *amdgpu_new_state,
4999 struct amdgpu_ps *amdgpu_current_state)
5000{
5001 struct ci_power_info *pi = ci_get_pi(adev);
5002 enum amdgpu_pcie_gen target_link_speed =
5003 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5004 enum amdgpu_pcie_gen current_link_speed;
5005
5006 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5007 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5008 else
5009 current_link_speed = pi->force_pcie_gen;
5010
5011 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5012 pi->pspp_notify_required = false;
5013 if (target_link_speed > current_link_speed) {
5014 switch (target_link_speed) {
5015#ifdef CONFIG_ACPI
5016 case AMDGPU_PCIE_GEN3:
5017 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5018 break;
5019 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5020 if (current_link_speed == AMDGPU_PCIE_GEN2)
5021 break;
5022 case AMDGPU_PCIE_GEN2:
5023 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5024 break;
5025#endif
5026 default:
5027 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5028 break;
5029 }
5030 } else {
5031 if (target_link_speed < current_link_speed)
5032 pi->pspp_notify_required = true;
5033 }
5034}
5035
5036static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5037 struct amdgpu_ps *amdgpu_new_state,
5038 struct amdgpu_ps *amdgpu_current_state)
5039{
5040 struct ci_power_info *pi = ci_get_pi(adev);
5041 enum amdgpu_pcie_gen target_link_speed =
5042 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5043 u8 request;
5044
5045 if (pi->pspp_notify_required) {
5046 if (target_link_speed == AMDGPU_PCIE_GEN3)
5047 request = PCIE_PERF_REQ_PECI_GEN3;
5048 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5049 request = PCIE_PERF_REQ_PECI_GEN2;
5050 else
5051 request = PCIE_PERF_REQ_PECI_GEN1;
5052
5053 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5054 (ci_get_current_pcie_speed(adev) > 0))
5055 return;
5056
5057#ifdef CONFIG_ACPI
5058 amdgpu_acpi_pcie_performance_request(adev, request, false);
5059#endif
5060 }
5061}
5062
5063static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5064{
5065 struct ci_power_info *pi = ci_get_pi(adev);
5066 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5067 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5068 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5069 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5070 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5071 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5072
5073 if (allowed_sclk_vddc_table == NULL)
5074 return -EINVAL;
5075 if (allowed_sclk_vddc_table->count < 1)
5076 return -EINVAL;
5077 if (allowed_mclk_vddc_table == NULL)
5078 return -EINVAL;
5079 if (allowed_mclk_vddc_table->count < 1)
5080 return -EINVAL;
5081 if (allowed_mclk_vddci_table == NULL)
5082 return -EINVAL;
5083 if (allowed_mclk_vddci_table->count < 1)
5084 return -EINVAL;
5085
5086 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5087 pi->max_vddc_in_pp_table =
5088 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5089
5090 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5091 pi->max_vddci_in_pp_table =
5092 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5093
5094 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5095 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5096 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5097 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5098 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5099 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5100 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5101 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5102
5103 return 0;
5104}
5105
5106static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5107{
5108 struct ci_power_info *pi = ci_get_pi(adev);
5109 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5110 u32 leakage_index;
5111
5112 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5113 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5114 *vddc = leakage_table->actual_voltage[leakage_index];
5115 break;
5116 }
5117 }
5118}
5119
5120static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5121{
5122 struct ci_power_info *pi = ci_get_pi(adev);
5123 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5124 u32 leakage_index;
5125
5126 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5127 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5128 *vddci = leakage_table->actual_voltage[leakage_index];
5129 break;
5130 }
5131 }
5132}
5133
5134static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5135 struct amdgpu_clock_voltage_dependency_table *table)
5136{
5137 u32 i;
5138
5139 if (table) {
5140 for (i = 0; i < table->count; i++)
5141 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5142 }
5143}
5144
5145static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5146 struct amdgpu_clock_voltage_dependency_table *table)
5147{
5148 u32 i;
5149
5150 if (table) {
5151 for (i = 0; i < table->count; i++)
5152 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5153 }
5154}
5155
5156static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5157 struct amdgpu_vce_clock_voltage_dependency_table *table)
5158{
5159 u32 i;
5160
5161 if (table) {
5162 for (i = 0; i < table->count; i++)
5163 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5164 }
5165}
5166
5167static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5168 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5169{
5170 u32 i;
5171
5172 if (table) {
5173 for (i = 0; i < table->count; i++)
5174 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5175 }
5176}
5177
5178static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5179 struct amdgpu_phase_shedding_limits_table *table)
5180{
5181 u32 i;
5182
5183 if (table) {
5184 for (i = 0; i < table->count; i++)
5185 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5186 }
5187}
5188
5189static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5190 struct amdgpu_clock_and_voltage_limits *table)
5191{
5192 if (table) {
5193 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5194 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5195 }
5196}
5197
5198static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5199 struct amdgpu_cac_leakage_table *table)
5200{
5201 u32 i;
5202
5203 if (table) {
5204 for (i = 0; i < table->count; i++)
5205 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5206 }
5207}
5208
5209static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5210{
5211
5212 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5213 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5214 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5215 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5216 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5217 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5218 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5219 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5220 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5221 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5222 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5223 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5224 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5225 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5226 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5227 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5228 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5229 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5230 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5231 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5232 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5233 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5234 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5235 &adev->pm.dpm.dyn_state.cac_leakage_table);
5236
5237}
5238
5239static void ci_update_current_ps(struct amdgpu_device *adev,
5240 struct amdgpu_ps *rps)
5241{
5242 struct ci_ps *new_ps = ci_get_ps(rps);
5243 struct ci_power_info *pi = ci_get_pi(adev);
5244
5245 pi->current_rps = *rps;
5246 pi->current_ps = *new_ps;
5247 pi->current_rps.ps_priv = &pi->current_ps;
5248 adev->pm.dpm.current_ps = &pi->current_rps;
5249}
5250
5251static void ci_update_requested_ps(struct amdgpu_device *adev,
5252 struct amdgpu_ps *rps)
5253{
5254 struct ci_ps *new_ps = ci_get_ps(rps);
5255 struct ci_power_info *pi = ci_get_pi(adev);
5256
5257 pi->requested_rps = *rps;
5258 pi->requested_ps = *new_ps;
5259 pi->requested_rps.ps_priv = &pi->requested_ps;
5260 adev->pm.dpm.requested_ps = &pi->requested_rps;
5261}
5262
5263static int ci_dpm_pre_set_power_state(void *handle)
5264{
5265 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5266 struct ci_power_info *pi = ci_get_pi(adev);
5267 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5268 struct amdgpu_ps *new_ps = &requested_ps;
5269
5270 ci_update_requested_ps(adev, new_ps);
5271
5272 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5273
5274 return 0;
5275}
5276
5277static void ci_dpm_post_set_power_state(void *handle)
5278{
5279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5280 struct ci_power_info *pi = ci_get_pi(adev);
5281 struct amdgpu_ps *new_ps = &pi->requested_rps;
5282
5283 ci_update_current_ps(adev, new_ps);
5284}
5285
5286
5287static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5288{
5289 ci_read_clock_registers(adev);
5290 ci_enable_acpi_power_management(adev);
5291 ci_init_sclk_t(adev);
5292}
5293
5294static int ci_dpm_enable(struct amdgpu_device *adev)
5295{
5296 struct ci_power_info *pi = ci_get_pi(adev);
5297 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5298 int ret;
5299
5300 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5301 ci_enable_voltage_control(adev);
5302 ret = ci_construct_voltage_tables(adev);
5303 if (ret) {
5304 DRM_ERROR("ci_construct_voltage_tables failed\n");
5305 return ret;
5306 }
5307 }
5308 if (pi->caps_dynamic_ac_timing) {
5309 ret = ci_initialize_mc_reg_table(adev);
5310 if (ret)
5311 pi->caps_dynamic_ac_timing = false;
5312 }
5313 if (pi->dynamic_ss)
5314 ci_enable_spread_spectrum(adev, true);
5315 if (pi->thermal_protection)
5316 ci_enable_thermal_protection(adev, true);
5317 ci_program_sstp(adev);
5318 ci_enable_display_gap(adev);
5319 ci_program_vc(adev);
5320 ret = ci_upload_firmware(adev);
5321 if (ret) {
5322 DRM_ERROR("ci_upload_firmware failed\n");
5323 return ret;
5324 }
5325 ret = ci_process_firmware_header(adev);
5326 if (ret) {
5327 DRM_ERROR("ci_process_firmware_header failed\n");
5328 return ret;
5329 }
5330 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5331 if (ret) {
5332 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5333 return ret;
5334 }
5335 ret = ci_init_smc_table(adev);
5336 if (ret) {
5337 DRM_ERROR("ci_init_smc_table failed\n");
5338 return ret;
5339 }
5340 ret = ci_init_arb_table_index(adev);
5341 if (ret) {
5342 DRM_ERROR("ci_init_arb_table_index failed\n");
5343 return ret;
5344 }
5345 if (pi->caps_dynamic_ac_timing) {
5346 ret = ci_populate_initial_mc_reg_table(adev);
5347 if (ret) {
5348 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5349 return ret;
5350 }
5351 }
5352 ret = ci_populate_pm_base(adev);
5353 if (ret) {
5354 DRM_ERROR("ci_populate_pm_base failed\n");
5355 return ret;
5356 }
5357 ci_dpm_start_smc(adev);
5358 ci_enable_vr_hot_gpio_interrupt(adev);
5359 ret = ci_notify_smc_display_change(adev, false);
5360 if (ret) {
5361 DRM_ERROR("ci_notify_smc_display_change failed\n");
5362 return ret;
5363 }
5364 ci_enable_sclk_control(adev, true);
5365 ret = ci_enable_ulv(adev, true);
5366 if (ret) {
5367 DRM_ERROR("ci_enable_ulv failed\n");
5368 return ret;
5369 }
5370 ret = ci_enable_ds_master_switch(adev, true);
5371 if (ret) {
5372 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5373 return ret;
5374 }
5375 ret = ci_start_dpm(adev);
5376 if (ret) {
5377 DRM_ERROR("ci_start_dpm failed\n");
5378 return ret;
5379 }
5380 ret = ci_enable_didt(adev, true);
5381 if (ret) {
5382 DRM_ERROR("ci_enable_didt failed\n");
5383 return ret;
5384 }
5385 ret = ci_enable_smc_cac(adev, true);
5386 if (ret) {
5387 DRM_ERROR("ci_enable_smc_cac failed\n");
5388 return ret;
5389 }
5390 ret = ci_enable_power_containment(adev, true);
5391 if (ret) {
5392 DRM_ERROR("ci_enable_power_containment failed\n");
5393 return ret;
5394 }
5395
5396 ret = ci_power_control_set_level(adev);
5397 if (ret) {
5398 DRM_ERROR("ci_power_control_set_level failed\n");
5399 return ret;
5400 }
5401
5402 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5403
5404 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5405 if (ret) {
5406 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5407 return ret;
5408 }
5409
5410 ci_thermal_start_thermal_controller(adev);
5411
5412 ci_update_current_ps(adev, boot_ps);
5413
5414 return 0;
5415}
5416
5417static void ci_dpm_disable(struct amdgpu_device *adev)
5418{
5419 struct ci_power_info *pi = ci_get_pi(adev);
5420 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5421
5422 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5423 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5424 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5425 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5426
5427 ci_dpm_powergate_uvd(adev, true);
5428
5429 if (!amdgpu_ci_is_smc_running(adev))
5430 return;
5431
5432 ci_thermal_stop_thermal_controller(adev);
5433
5434 if (pi->thermal_protection)
5435 ci_enable_thermal_protection(adev, false);
5436 ci_enable_power_containment(adev, false);
5437 ci_enable_smc_cac(adev, false);
5438 ci_enable_didt(adev, false);
5439 ci_enable_spread_spectrum(adev, false);
5440 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5441 ci_stop_dpm(adev);
5442 ci_enable_ds_master_switch(adev, false);
5443 ci_enable_ulv(adev, false);
5444 ci_clear_vc(adev);
5445 ci_reset_to_default(adev);
5446 ci_dpm_stop_smc(adev);
5447 ci_force_switch_to_arb_f0(adev);
5448 ci_enable_thermal_based_sclk_dpm(adev, false);
5449
5450 ci_update_current_ps(adev, boot_ps);
5451}
5452
5453static int ci_dpm_set_power_state(void *handle)
5454{
5455 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5456 struct ci_power_info *pi = ci_get_pi(adev);
5457 struct amdgpu_ps *new_ps = &pi->requested_rps;
5458 struct amdgpu_ps *old_ps = &pi->current_rps;
5459 int ret;
5460
5461 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5462 if (pi->pcie_performance_request)
5463 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5464 ret = ci_freeze_sclk_mclk_dpm(adev);
5465 if (ret) {
5466 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5467 return ret;
5468 }
5469 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5470 if (ret) {
5471 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5472 return ret;
5473 }
5474 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5475 if (ret) {
5476 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5477 return ret;
5478 }
5479
5480 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5481 if (ret) {
5482 DRM_ERROR("ci_update_vce_dpm failed\n");
5483 return ret;
5484 }
5485
5486 ret = ci_update_sclk_t(adev);
5487 if (ret) {
5488 DRM_ERROR("ci_update_sclk_t failed\n");
5489 return ret;
5490 }
5491 if (pi->caps_dynamic_ac_timing) {
5492 ret = ci_update_and_upload_mc_reg_table(adev);
5493 if (ret) {
5494 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5495 return ret;
5496 }
5497 }
5498 ret = ci_program_memory_timing_parameters(adev);
5499 if (ret) {
5500 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5501 return ret;
5502 }
5503 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5504 if (ret) {
5505 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5506 return ret;
5507 }
5508 ret = ci_upload_dpm_level_enable_mask(adev);
5509 if (ret) {
5510 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5511 return ret;
5512 }
5513 if (pi->pcie_performance_request)
5514 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5515
5516 return 0;
5517}
5518
5519#if 0
5520static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5521{
5522 ci_set_boot_state(adev);
5523}
5524#endif
5525
5526static void ci_dpm_display_configuration_changed(void *handle)
5527{
5528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5529
5530 ci_program_display_gap(adev);
5531}
5532
5533union power_info {
5534 struct _ATOM_POWERPLAY_INFO info;
5535 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5536 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5537 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5538 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5539 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5540};
5541
5542union pplib_clock_info {
5543 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5544 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5545 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5546 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5547 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5548 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5549};
5550
5551union pplib_power_state {
5552 struct _ATOM_PPLIB_STATE v1;
5553 struct _ATOM_PPLIB_STATE_V2 v2;
5554};
5555
5556static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5557 struct amdgpu_ps *rps,
5558 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5559 u8 table_rev)
5560{
5561 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5562 rps->class = le16_to_cpu(non_clock_info->usClassification);
5563 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5564
5565 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5566 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5567 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5568 } else {
5569 rps->vclk = 0;
5570 rps->dclk = 0;
5571 }
5572
5573 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5574 adev->pm.dpm.boot_ps = rps;
5575 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5576 adev->pm.dpm.uvd_ps = rps;
5577}
5578
5579static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5580 struct amdgpu_ps *rps, int index,
5581 union pplib_clock_info *clock_info)
5582{
5583 struct ci_power_info *pi = ci_get_pi(adev);
5584 struct ci_ps *ps = ci_get_ps(rps);
5585 struct ci_pl *pl = &ps->performance_levels[index];
5586
5587 ps->performance_level_count = index + 1;
5588
5589 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5590 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5591 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5592 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5593
5594 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5595 pi->sys_pcie_mask,
5596 pi->vbios_boot_state.pcie_gen_bootup_value,
5597 clock_info->ci.ucPCIEGen);
5598 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5599 pi->vbios_boot_state.pcie_lane_bootup_value,
5600 le16_to_cpu(clock_info->ci.usPCIELane));
5601
5602 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5603 pi->acpi_pcie_gen = pl->pcie_gen;
5604 }
5605
5606 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5607 pi->ulv.supported = true;
5608 pi->ulv.pl = *pl;
5609 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5610 }
5611
5612 /* patch up boot state */
5613 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5614 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5615 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5616 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5617 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5618 }
5619
5620 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5621 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5622 pi->use_pcie_powersaving_levels = true;
5623 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5624 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5625 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5626 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5627 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5628 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5629 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5630 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5631 break;
5632 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5633 pi->use_pcie_performance_levels = true;
5634 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5635 pi->pcie_gen_performance.max = pl->pcie_gen;
5636 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5637 pi->pcie_gen_performance.min = pl->pcie_gen;
5638 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5639 pi->pcie_lane_performance.max = pl->pcie_lane;
5640 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5641 pi->pcie_lane_performance.min = pl->pcie_lane;
5642 break;
5643 default:
5644 break;
5645 }
5646}
5647
5648static int ci_parse_power_table(struct amdgpu_device *adev)
5649{
5650 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5651 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5652 union pplib_power_state *power_state;
5653 int i, j, k, non_clock_array_index, clock_array_index;
5654 union pplib_clock_info *clock_info;
5655 struct _StateArray *state_array;
5656 struct _ClockInfoArray *clock_info_array;
5657 struct _NonClockInfoArray *non_clock_info_array;
5658 union power_info *power_info;
5659 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5660 u16 data_offset;
5661 u8 frev, crev;
5662 u8 *power_state_offset;
5663 struct ci_ps *ps;
5664
5665 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5666 &frev, &crev, &data_offset))
5667 return -EINVAL;
5668 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5669
5670 amdgpu_add_thermal_controller(adev);
5671
5672 state_array = (struct _StateArray *)
5673 (mode_info->atom_context->bios + data_offset +
5674 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5675 clock_info_array = (struct _ClockInfoArray *)
5676 (mode_info->atom_context->bios + data_offset +
5677 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5678 non_clock_info_array = (struct _NonClockInfoArray *)
5679 (mode_info->atom_context->bios + data_offset +
5680 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5681
5682 adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5683 sizeof(struct amdgpu_ps),
5684 GFP_KERNEL);
5685 if (!adev->pm.dpm.ps)
5686 return -ENOMEM;
5687 power_state_offset = (u8 *)state_array->states;
5688 for (i = 0; i < state_array->ucNumEntries; i++) {
5689 u8 *idx;
5690 power_state = (union pplib_power_state *)power_state_offset;
5691 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5692 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5693 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5694 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5695 if (ps == NULL) {
5696 kfree(adev->pm.dpm.ps);
5697 return -ENOMEM;
5698 }
5699 adev->pm.dpm.ps[i].ps_priv = ps;
5700 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5701 non_clock_info,
5702 non_clock_info_array->ucEntrySize);
5703 k = 0;
5704 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5705 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5706 clock_array_index = idx[j];
5707 if (clock_array_index >= clock_info_array->ucNumEntries)
5708 continue;
5709 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5710 break;
5711 clock_info = (union pplib_clock_info *)
5712 ((u8 *)&clock_info_array->clockInfo[0] +
5713 (clock_array_index * clock_info_array->ucEntrySize));
5714 ci_parse_pplib_clock_info(adev,
5715 &adev->pm.dpm.ps[i], k,
5716 clock_info);
5717 k++;
5718 }
5719 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5720 }
5721 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5722
5723 /* fill in the vce power states */
5724 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5725 u32 sclk, mclk;
5726 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5727 clock_info = (union pplib_clock_info *)
5728 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5729 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5730 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5731 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5732 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5733 adev->pm.dpm.vce_states[i].sclk = sclk;
5734 adev->pm.dpm.vce_states[i].mclk = mclk;
5735 }
5736
5737 return 0;
5738}
5739
5740static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5741 struct ci_vbios_boot_state *boot_state)
5742{
5743 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5744 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5745 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5746 u8 frev, crev;
5747 u16 data_offset;
5748
5749 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5750 &frev, &crev, &data_offset)) {
5751 firmware_info =
5752 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5753 data_offset);
5754 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5755 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5756 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5757 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5758 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5759 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5760 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5761
5762 return 0;
5763 }
5764 return -EINVAL;
5765}
5766
5767static void ci_dpm_fini(struct amdgpu_device *adev)
5768{
5769 int i;
5770
5771 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5772 kfree(adev->pm.dpm.ps[i].ps_priv);
5773 }
5774 kfree(adev->pm.dpm.ps);
5775 kfree(adev->pm.dpm.priv);
5776 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5777 amdgpu_free_extended_power_table(adev);
5778}
5779
5780/**
5781 * ci_dpm_init_microcode - load ucode images from disk
5782 *
5783 * @adev: amdgpu_device pointer
5784 *
5785 * Use the firmware interface to load the ucode images into
5786 * the driver (not loaded into hw).
5787 * Returns 0 on success, error on failure.
5788 */
5789static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5790{
5791 const char *chip_name;
5792 char fw_name[30];
5793 int err;
5794
5795 DRM_DEBUG("\n");
5796
5797 switch (adev->asic_type) {
5798 case CHIP_BONAIRE:
5799 if ((adev->pdev->revision == 0x80) ||
5800 (adev->pdev->revision == 0x81) ||
5801 (adev->pdev->device == 0x665f))
5802 chip_name = "bonaire_k";
5803 else
5804 chip_name = "bonaire";
5805 break;
5806 case CHIP_HAWAII:
5807 if (adev->pdev->revision == 0x80)
5808 chip_name = "hawaii_k";
5809 else
5810 chip_name = "hawaii";
5811 break;
5812 case CHIP_KAVERI:
5813 case CHIP_KABINI:
5814 case CHIP_MULLINS:
5815 default: BUG();
5816 }
5817
5818 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
5819 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5820 if (err)
5821 goto out;
5822 err = amdgpu_ucode_validate(adev->pm.fw);
5823
5824out:
5825 if (err) {
5826 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
5827 release_firmware(adev->pm.fw);
5828 adev->pm.fw = NULL;
5829 }
5830 return err;
5831}
5832
5833static int ci_dpm_init(struct amdgpu_device *adev)
5834{
5835 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5836 SMU7_Discrete_DpmTable *dpm_table;
5837 struct amdgpu_gpio_rec gpio;
5838 u16 data_offset, size;
5839 u8 frev, crev;
5840 struct ci_power_info *pi;
5841 int ret;
5842
5843 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5844 if (pi == NULL)
5845 return -ENOMEM;
5846 adev->pm.dpm.priv = pi;
5847
5848 pi->sys_pcie_mask =
5849 adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
5850
5851 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5852
5853 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5854 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5855 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5856 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5857
5858 pi->pcie_lane_performance.max = 0;
5859 pi->pcie_lane_performance.min = 16;
5860 pi->pcie_lane_powersaving.max = 0;
5861 pi->pcie_lane_powersaving.min = 16;
5862
5863 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5864 if (ret) {
5865 ci_dpm_fini(adev);
5866 return ret;
5867 }
5868
5869 ret = amdgpu_get_platform_caps(adev);
5870 if (ret) {
5871 ci_dpm_fini(adev);
5872 return ret;
5873 }
5874
5875 ret = amdgpu_parse_extended_power_table(adev);
5876 if (ret) {
5877 ci_dpm_fini(adev);
5878 return ret;
5879 }
5880
5881 ret = ci_parse_power_table(adev);
5882 if (ret) {
5883 ci_dpm_fini(adev);
5884 return ret;
5885 }
5886
5887 pi->dll_default_on = false;
5888 pi->sram_end = SMC_RAM_END;
5889
5890 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5891 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5892 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5893 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5894 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5895 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5896 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5897 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5898
5899 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5900
5901 pi->sclk_dpm_key_disabled = 0;
5902 pi->mclk_dpm_key_disabled = 0;
5903 pi->pcie_dpm_key_disabled = 0;
5904 pi->thermal_sclk_dpm_enabled = 0;
5905
5906 if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
5907 pi->caps_sclk_ds = true;
5908 else
5909 pi->caps_sclk_ds = false;
5910
5911 pi->mclk_strobe_mode_threshold = 40000;
5912 pi->mclk_stutter_mode_threshold = 40000;
5913 pi->mclk_edc_enable_threshold = 40000;
5914 pi->mclk_edc_wr_enable_threshold = 40000;
5915
5916 ci_initialize_powertune_defaults(adev);
5917
5918 pi->caps_fps = false;
5919
5920 pi->caps_sclk_throttle_low_notification = false;
5921
5922 pi->caps_uvd_dpm = true;
5923 pi->caps_vce_dpm = true;
5924
5925 ci_get_leakage_voltages(adev);
5926 ci_patch_dependency_tables_with_leakage(adev);
5927 ci_set_private_data_variables_based_on_pptable(adev);
5928
5929 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5930 kcalloc(4,
5931 sizeof(struct amdgpu_clock_voltage_dependency_entry),
5932 GFP_KERNEL);
5933 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5934 ci_dpm_fini(adev);
5935 return -ENOMEM;
5936 }
5937 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5938 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5939 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5940 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5941 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5942 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5943 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5944 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5945 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5946
5947 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5948 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5949 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5950
5951 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5952 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5953 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5954 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5955
5956 if (adev->asic_type == CHIP_HAWAII) {
5957 pi->thermal_temp_setting.temperature_low = 94500;
5958 pi->thermal_temp_setting.temperature_high = 95000;
5959 pi->thermal_temp_setting.temperature_shutdown = 104000;
5960 } else {
5961 pi->thermal_temp_setting.temperature_low = 99500;
5962 pi->thermal_temp_setting.temperature_high = 100000;
5963 pi->thermal_temp_setting.temperature_shutdown = 104000;
5964 }
5965
5966 pi->uvd_enabled = false;
5967
5968 dpm_table = &pi->smc_state_table;
5969
5970 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5971 if (gpio.valid) {
5972 dpm_table->VRHotGpio = gpio.shift;
5973 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5974 } else {
5975 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5976 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5977 }
5978
5979 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5980 if (gpio.valid) {
5981 dpm_table->AcDcGpio = gpio.shift;
5982 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5983 } else {
5984 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5985 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5986 }
5987
5988 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5989 if (gpio.valid) {
5990 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5991
5992 switch (gpio.shift) {
5993 case 0:
5994 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5995 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5996 break;
5997 case 1:
5998 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5999 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6000 break;
6001 case 2:
6002 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6003 break;
6004 case 3:
6005 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6006 break;
6007 case 4:
6008 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6009 break;
6010 default:
6011 DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
6012 break;
6013 }
6014 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6015 }
6016
6017 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6018 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6019 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6020 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6021 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6022 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6023 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6024
6025 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6026 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6027 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6028 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6029 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6030 else
6031 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6032 }
6033
6034 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6035 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6036 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6037 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6038 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6039 else
6040 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6041 }
6042
6043 pi->vddc_phase_shed_control = true;
6044
6045#if defined(CONFIG_ACPI)
6046 pi->pcie_performance_request =
6047 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6048#else
6049 pi->pcie_performance_request = false;
6050#endif
6051
6052 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6053 &frev, &crev, &data_offset)) {
6054 pi->caps_sclk_ss_support = true;
6055 pi->caps_mclk_ss_support = true;
6056 pi->dynamic_ss = true;
6057 } else {
6058 pi->caps_sclk_ss_support = false;
6059 pi->caps_mclk_ss_support = false;
6060 pi->dynamic_ss = true;
6061 }
6062
6063 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6064 pi->thermal_protection = true;
6065 else
6066 pi->thermal_protection = false;
6067
6068 pi->caps_dynamic_ac_timing = true;
6069
6070 pi->uvd_power_gated = true;
6071
6072 /* make sure dc limits are valid */
6073 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6074 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6075 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6076 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6077
6078 pi->fan_ctrl_is_in_default_mode = true;
6079
6080 return 0;
6081}
6082
6083static void
6084ci_dpm_debugfs_print_current_performance_level(void *handle,
6085 struct seq_file *m)
6086{
6087 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6088 struct ci_power_info *pi = ci_get_pi(adev);
6089 struct amdgpu_ps *rps = &pi->current_rps;
6090 u32 sclk = ci_get_average_sclk_freq(adev);
6091 u32 mclk = ci_get_average_mclk_freq(adev);
6092 u32 activity_percent = 50;
6093 int ret;
6094
6095 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6096 &activity_percent);
6097
6098 if (ret == 0) {
6099 activity_percent += 0x80;
6100 activity_percent >>= 8;
6101 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6102 }
6103
6104 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6105 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6106 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6107 sclk, mclk);
6108 seq_printf(m, "GPU load: %u %%\n", activity_percent);
6109}
6110
6111static void ci_dpm_print_power_state(void *handle, void *current_ps)
6112{
6113 struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
6114 struct ci_ps *ps = ci_get_ps(rps);
6115 struct ci_pl *pl;
6116 int i;
6117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6118
6119 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6120 amdgpu_dpm_print_cap_info(rps->caps);
6121 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6122 for (i = 0; i < ps->performance_level_count; i++) {
6123 pl = &ps->performance_levels[i];
6124 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6125 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6126 }
6127 amdgpu_dpm_print_ps_status(adev, rps);
6128}
6129
6130static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6131 const struct ci_pl *ci_cpl2)
6132{
6133 return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6134 (ci_cpl1->sclk == ci_cpl2->sclk) &&
6135 (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6136 (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6137}
6138
6139static int ci_check_state_equal(void *handle,
6140 void *current_ps,
6141 void *request_ps,
6142 bool *equal)
6143{
6144 struct ci_ps *ci_cps;
6145 struct ci_ps *ci_rps;
6146 int i;
6147 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
6148 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
6149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6150
6151 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6152 return -EINVAL;
6153
6154 ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
6155 ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
6156
6157 if (ci_cps == NULL) {
6158 *equal = false;
6159 return 0;
6160 }
6161
6162 if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6163
6164 *equal = false;
6165 return 0;
6166 }
6167
6168 for (i = 0; i < ci_cps->performance_level_count; i++) {
6169 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6170 &(ci_rps->performance_levels[i]))) {
6171 *equal = false;
6172 return 0;
6173 }
6174 }
6175
6176 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6177 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6178 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6179
6180 return 0;
6181}
6182
6183static u32 ci_dpm_get_sclk(void *handle, bool low)
6184{
6185 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6186 struct ci_power_info *pi = ci_get_pi(adev);
6187 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6188
6189 if (low)
6190 return requested_state->performance_levels[0].sclk;
6191 else
6192 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6193}
6194
6195static u32 ci_dpm_get_mclk(void *handle, bool low)
6196{
6197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6198 struct ci_power_info *pi = ci_get_pi(adev);
6199 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6200
6201 if (low)
6202 return requested_state->performance_levels[0].mclk;
6203 else
6204 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6205}
6206
6207/* get temperature in millidegrees */
6208static int ci_dpm_get_temp(void *handle)
6209{
6210 u32 temp;
6211 int actual_temp = 0;
6212 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6213
6214 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6215 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6216
6217 if (temp & 0x200)
6218 actual_temp = 255;
6219 else
6220 actual_temp = temp & 0x1ff;
6221
6222 actual_temp = actual_temp * 1000;
6223
6224 return actual_temp;
6225}
6226
6227static int ci_set_temperature_range(struct amdgpu_device *adev)
6228{
6229 int ret;
6230
6231 ret = ci_thermal_enable_alert(adev, false);
6232 if (ret)
6233 return ret;
6234 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6235 CISLANDS_TEMP_RANGE_MAX);
6236 if (ret)
6237 return ret;
6238 ret = ci_thermal_enable_alert(adev, true);
6239 if (ret)
6240 return ret;
6241 return ret;
6242}
6243
6244static int ci_dpm_early_init(void *handle)
6245{
6246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6247
6248 adev->powerplay.pp_funcs = &ci_dpm_funcs;
6249 adev->powerplay.pp_handle = adev;
6250 ci_dpm_set_irq_funcs(adev);
6251
6252 return 0;
6253}
6254
6255static int ci_dpm_late_init(void *handle)
6256{
6257 int ret;
6258 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6259
6260 if (!adev->pm.dpm_enabled)
6261 return 0;
6262
6263 /* init the sysfs and debugfs files late */
6264 ret = amdgpu_pm_sysfs_init(adev);
6265 if (ret)
6266 return ret;
6267
6268 ret = ci_set_temperature_range(adev);
6269 if (ret)
6270 return ret;
6271
6272 return 0;
6273}
6274
6275static int ci_dpm_sw_init(void *handle)
6276{
6277 int ret;
6278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6279
6280 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
6281 &adev->pm.dpm.thermal.irq);
6282 if (ret)
6283 return ret;
6284
6285 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
6286 &adev->pm.dpm.thermal.irq);
6287 if (ret)
6288 return ret;
6289
6290 /* default to balanced state */
6291 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6292 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6293 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
6294 adev->pm.default_sclk = adev->clock.default_sclk;
6295 adev->pm.default_mclk = adev->clock.default_mclk;
6296 adev->pm.current_sclk = adev->clock.default_sclk;
6297 adev->pm.current_mclk = adev->clock.default_mclk;
6298 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6299
6300 ret = ci_dpm_init_microcode(adev);
6301 if (ret)
6302 return ret;
6303
6304 if (amdgpu_dpm == 0)
6305 return 0;
6306
6307 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6308 mutex_lock(&adev->pm.mutex);
6309 ret = ci_dpm_init(adev);
6310 if (ret)
6311 goto dpm_failed;
6312 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6313 if (amdgpu_dpm == 1)
6314 amdgpu_pm_print_power_states(adev);
6315 mutex_unlock(&adev->pm.mutex);
6316 DRM_INFO("amdgpu: dpm initialized\n");
6317
6318 return 0;
6319
6320dpm_failed:
6321 ci_dpm_fini(adev);
6322 mutex_unlock(&adev->pm.mutex);
6323 DRM_ERROR("amdgpu: dpm initialization failed\n");
6324 return ret;
6325}
6326
6327static int ci_dpm_sw_fini(void *handle)
6328{
6329 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6330
6331 flush_work(&adev->pm.dpm.thermal.work);
6332
6333 mutex_lock(&adev->pm.mutex);
6334 ci_dpm_fini(adev);
6335 mutex_unlock(&adev->pm.mutex);
6336
6337 release_firmware(adev->pm.fw);
6338 adev->pm.fw = NULL;
6339
6340 return 0;
6341}
6342
6343static int ci_dpm_hw_init(void *handle)
6344{
6345 int ret;
6346
6347 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6348
6349 if (!amdgpu_dpm) {
6350 ret = ci_upload_firmware(adev);
6351 if (ret) {
6352 DRM_ERROR("ci_upload_firmware failed\n");
6353 return ret;
6354 }
6355 ci_dpm_start_smc(adev);
6356 return 0;
6357 }
6358
6359 mutex_lock(&adev->pm.mutex);
6360 ci_dpm_setup_asic(adev);
6361 ret = ci_dpm_enable(adev);
6362 if (ret)
6363 adev->pm.dpm_enabled = false;
6364 else
6365 adev->pm.dpm_enabled = true;
6366 mutex_unlock(&adev->pm.mutex);
6367
6368 return ret;
6369}
6370
6371static int ci_dpm_hw_fini(void *handle)
6372{
6373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6374
6375 if (adev->pm.dpm_enabled) {
6376 mutex_lock(&adev->pm.mutex);
6377 ci_dpm_disable(adev);
6378 mutex_unlock(&adev->pm.mutex);
6379 } else {
6380 ci_dpm_stop_smc(adev);
6381 }
6382
6383 return 0;
6384}
6385
6386static int ci_dpm_suspend(void *handle)
6387{
6388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6389
6390 if (adev->pm.dpm_enabled) {
6391 mutex_lock(&adev->pm.mutex);
6392 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6393 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6394 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6395 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6396 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6397 adev->pm.dpm.last_state = adev->pm.dpm.state;
6398 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6399 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6400 mutex_unlock(&adev->pm.mutex);
6401 amdgpu_pm_compute_clocks(adev);
6402
6403 }
6404
6405 return 0;
6406}
6407
6408static int ci_dpm_resume(void *handle)
6409{
6410 int ret;
6411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6412
6413 if (adev->pm.dpm_enabled) {
6414 /* asic init will reset to the boot state */
6415 mutex_lock(&adev->pm.mutex);
6416 ci_dpm_setup_asic(adev);
6417 ret = ci_dpm_enable(adev);
6418 if (ret)
6419 adev->pm.dpm_enabled = false;
6420 else
6421 adev->pm.dpm_enabled = true;
6422 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6423 adev->pm.dpm.state = adev->pm.dpm.last_state;
6424 mutex_unlock(&adev->pm.mutex);
6425 if (adev->pm.dpm_enabled)
6426 amdgpu_pm_compute_clocks(adev);
6427 }
6428 return 0;
6429}
6430
6431static bool ci_dpm_is_idle(void *handle)
6432{
6433 /* XXX */
6434 return true;
6435}
6436
6437static int ci_dpm_wait_for_idle(void *handle)
6438{
6439 /* XXX */
6440 return 0;
6441}
6442
6443static int ci_dpm_soft_reset(void *handle)
6444{
6445 return 0;
6446}
6447
6448static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6449 struct amdgpu_irq_src *source,
6450 unsigned type,
6451 enum amdgpu_interrupt_state state)
6452{
6453 u32 cg_thermal_int;
6454
6455 switch (type) {
6456 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6457 switch (state) {
6458 case AMDGPU_IRQ_STATE_DISABLE:
6459 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6460 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6461 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6462 break;
6463 case AMDGPU_IRQ_STATE_ENABLE:
6464 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6465 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6466 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6467 break;
6468 default:
6469 break;
6470 }
6471 break;
6472
6473 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6474 switch (state) {
6475 case AMDGPU_IRQ_STATE_DISABLE:
6476 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6477 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6478 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6479 break;
6480 case AMDGPU_IRQ_STATE_ENABLE:
6481 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6482 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6483 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6484 break;
6485 default:
6486 break;
6487 }
6488 break;
6489
6490 default:
6491 break;
6492 }
6493 return 0;
6494}
6495
6496static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6497 struct amdgpu_irq_src *source,
6498 struct amdgpu_iv_entry *entry)
6499{
6500 bool queue_thermal = false;
6501
6502 if (entry == NULL)
6503 return -EINVAL;
6504
6505 switch (entry->src_id) {
6506 case 230: /* thermal low to high */
6507 DRM_DEBUG("IH: thermal low to high\n");
6508 adev->pm.dpm.thermal.high_to_low = false;
6509 queue_thermal = true;
6510 break;
6511 case 231: /* thermal high to low */
6512 DRM_DEBUG("IH: thermal high to low\n");
6513 adev->pm.dpm.thermal.high_to_low = true;
6514 queue_thermal = true;
6515 break;
6516 default:
6517 break;
6518 }
6519
6520 if (queue_thermal)
6521 schedule_work(&adev->pm.dpm.thermal.work);
6522
6523 return 0;
6524}
6525
6526static int ci_dpm_set_clockgating_state(void *handle,
6527 enum amd_clockgating_state state)
6528{
6529 return 0;
6530}
6531
6532static int ci_dpm_set_powergating_state(void *handle,
6533 enum amd_powergating_state state)
6534{
6535 return 0;
6536}
6537
6538static int ci_dpm_print_clock_levels(void *handle,
6539 enum pp_clock_type type, char *buf)
6540{
6541 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6542 struct ci_power_info *pi = ci_get_pi(adev);
6543 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6544 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6545 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6546
6547 int i, now, size = 0;
6548 uint32_t clock, pcie_speed;
6549
6550 switch (type) {
6551 case PP_SCLK:
6552 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6553 clock = RREG32(mmSMC_MSG_ARG_0);
6554
6555 for (i = 0; i < sclk_table->count; i++) {
6556 if (clock > sclk_table->dpm_levels[i].value)
6557 continue;
6558 break;
6559 }
6560 now = i;
6561
6562 for (i = 0; i < sclk_table->count; i++)
6563 size += sprintf(buf + size, "%d: %uMhz %s\n",
6564 i, sclk_table->dpm_levels[i].value / 100,
6565 (i == now) ? "*" : "");
6566 break;
6567 case PP_MCLK:
6568 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6569 clock = RREG32(mmSMC_MSG_ARG_0);
6570
6571 for (i = 0; i < mclk_table->count; i++) {
6572 if (clock > mclk_table->dpm_levels[i].value)
6573 continue;
6574 break;
6575 }
6576 now = i;
6577
6578 for (i = 0; i < mclk_table->count; i++)
6579 size += sprintf(buf + size, "%d: %uMhz %s\n",
6580 i, mclk_table->dpm_levels[i].value / 100,
6581 (i == now) ? "*" : "");
6582 break;
6583 case PP_PCIE:
6584 pcie_speed = ci_get_current_pcie_speed(adev);
6585 for (i = 0; i < pcie_table->count; i++) {
6586 if (pcie_speed != pcie_table->dpm_levels[i].value)
6587 continue;
6588 break;
6589 }
6590 now = i;
6591
6592 for (i = 0; i < pcie_table->count; i++)
6593 size += sprintf(buf + size, "%d: %s %s\n", i,
6594 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
6595 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
6596 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
6597 (i == now) ? "*" : "");
6598 break;
6599 default:
6600 break;
6601 }
6602
6603 return size;
6604}
6605
6606static int ci_dpm_force_clock_level(void *handle,
6607 enum pp_clock_type type, uint32_t mask)
6608{
6609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6610 struct ci_power_info *pi = ci_get_pi(adev);
6611
6612 if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
6613 return -EINVAL;
6614
6615 if (mask == 0)
6616 return -EINVAL;
6617
6618 switch (type) {
6619 case PP_SCLK:
6620 if (!pi->sclk_dpm_key_disabled)
6621 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6622 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6623 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6624 break;
6625
6626 case PP_MCLK:
6627 if (!pi->mclk_dpm_key_disabled)
6628 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6629 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6630 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6631 break;
6632
6633 case PP_PCIE:
6634 {
6635 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6636
6637 if (!pi->pcie_dpm_key_disabled) {
6638 if (fls(tmp) != ffs(tmp))
6639 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
6640 else
6641 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6642 PPSMC_MSG_PCIeDPM_ForceLevel,
6643 fls(tmp) - 1);
6644 }
6645 break;
6646 }
6647 default:
6648 break;
6649 }
6650
6651 return 0;
6652}
6653
6654static int ci_dpm_get_sclk_od(void *handle)
6655{
6656 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6657 struct ci_power_info *pi = ci_get_pi(adev);
6658 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6659 struct ci_single_dpm_table *golden_sclk_table =
6660 &(pi->golden_dpm_table.sclk_table);
6661 int value;
6662
6663 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6664 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6665 100 /
6666 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6667
6668 return value;
6669}
6670
6671static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
6672{
6673 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6674 struct ci_power_info *pi = ci_get_pi(adev);
6675 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6676 struct ci_single_dpm_table *golden_sclk_table =
6677 &(pi->golden_dpm_table.sclk_table);
6678
6679 if (value > 20)
6680 value = 20;
6681
6682 ps->performance_levels[ps->performance_level_count - 1].sclk =
6683 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6684 value / 100 +
6685 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6686
6687 return 0;
6688}
6689
6690static int ci_dpm_get_mclk_od(void *handle)
6691{
6692 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6693 struct ci_power_info *pi = ci_get_pi(adev);
6694 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6695 struct ci_single_dpm_table *golden_mclk_table =
6696 &(pi->golden_dpm_table.mclk_table);
6697 int value;
6698
6699 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6700 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6701 100 /
6702 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6703
6704 return value;
6705}
6706
6707static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
6708{
6709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6710 struct ci_power_info *pi = ci_get_pi(adev);
6711 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6712 struct ci_single_dpm_table *golden_mclk_table =
6713 &(pi->golden_dpm_table.mclk_table);
6714
6715 if (value > 20)
6716 value = 20;
6717
6718 ps->performance_levels[ps->performance_level_count - 1].mclk =
6719 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6720 value / 100 +
6721 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6722
6723 return 0;
6724}
6725
6726static int ci_dpm_read_sensor(void *handle, int idx,
6727 void *value, int *size)
6728{
6729 u32 activity_percent = 50;
6730 int ret;
6731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6732
6733 /* size must be at least 4 bytes for all sensors */
6734 if (*size < 4)
6735 return -EINVAL;
6736
6737 switch (idx) {
6738 case AMDGPU_PP_SENSOR_GFX_SCLK:
6739 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6740 *size = 4;
6741 return 0;
6742 case AMDGPU_PP_SENSOR_GFX_MCLK:
6743 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6744 *size = 4;
6745 return 0;
6746 case AMDGPU_PP_SENSOR_GPU_TEMP:
6747 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6748 *size = 4;
6749 return 0;
6750 case AMDGPU_PP_SENSOR_GPU_LOAD:
6751 ret = ci_read_smc_soft_register(adev,
6752 offsetof(SMU7_SoftRegisters,
6753 AverageGraphicsA),
6754 &activity_percent);
6755 if (ret == 0) {
6756 activity_percent += 0x80;
6757 activity_percent >>= 8;
6758 activity_percent =
6759 activity_percent > 100 ? 100 : activity_percent;
6760 }
6761 *((uint32_t *)value) = activity_percent;
6762 *size = 4;
6763 return 0;
6764 default:
6765 return -EINVAL;
6766 }
6767}
6768
6769static int ci_set_powergating_by_smu(void *handle,
6770 uint32_t block_type, bool gate)
6771{
6772 switch (block_type) {
6773 case AMD_IP_BLOCK_TYPE_UVD:
6774 ci_dpm_powergate_uvd(handle, gate);
6775 break;
6776 default:
6777 break;
6778 }
6779 return 0;
6780}
6781
6782static const struct amd_ip_funcs ci_dpm_ip_funcs = {
6783 .name = "ci_dpm",
6784 .early_init = ci_dpm_early_init,
6785 .late_init = ci_dpm_late_init,
6786 .sw_init = ci_dpm_sw_init,
6787 .sw_fini = ci_dpm_sw_fini,
6788 .hw_init = ci_dpm_hw_init,
6789 .hw_fini = ci_dpm_hw_fini,
6790 .suspend = ci_dpm_suspend,
6791 .resume = ci_dpm_resume,
6792 .is_idle = ci_dpm_is_idle,
6793 .wait_for_idle = ci_dpm_wait_for_idle,
6794 .soft_reset = ci_dpm_soft_reset,
6795 .set_clockgating_state = ci_dpm_set_clockgating_state,
6796 .set_powergating_state = ci_dpm_set_powergating_state,
6797};
6798
6799const struct amdgpu_ip_block_version ci_smu_ip_block =
6800{
6801 .type = AMD_IP_BLOCK_TYPE_SMC,
6802 .major = 7,
6803 .minor = 0,
6804 .rev = 0,
6805 .funcs = &ci_dpm_ip_funcs,
6806};
6807
6808static const struct amd_pm_funcs ci_dpm_funcs = {
6809 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6810 .set_power_state = &ci_dpm_set_power_state,
6811 .post_set_power_state = &ci_dpm_post_set_power_state,
6812 .display_configuration_changed = &ci_dpm_display_configuration_changed,
6813 .get_sclk = &ci_dpm_get_sclk,
6814 .get_mclk = &ci_dpm_get_mclk,
6815 .print_power_state = &ci_dpm_print_power_state,
6816 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6817 .force_performance_level = &ci_dpm_force_performance_level,
6818 .vblank_too_short = &ci_dpm_vblank_too_short,
6819 .set_powergating_by_smu = &ci_set_powergating_by_smu,
6820 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6821 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6822 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6823 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6824 .print_clock_levels = ci_dpm_print_clock_levels,
6825 .force_clock_level = ci_dpm_force_clock_level,
6826 .get_sclk_od = ci_dpm_get_sclk_od,
6827 .set_sclk_od = ci_dpm_set_sclk_od,
6828 .get_mclk_od = ci_dpm_get_mclk_od,
6829 .set_mclk_od = ci_dpm_set_mclk_od,
6830 .check_state_equal = ci_check_state_equal,
6831 .get_vce_clock_state = amdgpu_get_vce_clock_state,
6832 .read_sensor = ci_dpm_read_sensor,
6833};
6834
6835static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6836 .set = ci_dpm_set_interrupt_state,
6837 .process = ci_dpm_process_interrupt,
6838};
6839
6840static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6841{
6842 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6843 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6844}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
deleted file mode 100644
index 91be2996ae7c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ /dev/null
@@ -1,349 +0,0 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CI_DPM_H__
24#define __CI_DPM_H__
25
26#include "amdgpu_atombios.h"
27#include "ppsmc.h"
28
29#define SMU__NUM_SCLK_DPM_STATE 8
30#define SMU__NUM_MCLK_DPM_LEVELS 6
31#define SMU__NUM_LCLK_DPM_LEVELS 8
32#define SMU__NUM_PCIE_DPM_LEVELS 8
33#include "smu7_discrete.h"
34
35#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
36
37#define CISLANDS_UNUSED_GPIO_PIN 0x7F
38
39struct ci_pl {
40 u32 mclk;
41 u32 sclk;
42 enum amdgpu_pcie_gen pcie_gen;
43 u16 pcie_lane;
44};
45
46struct ci_ps {
47 u16 performance_level_count;
48 bool dc_compatible;
49 u32 sclk_t;
50 struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
51};
52
53struct ci_dpm_level {
54 bool enabled;
55 u32 value;
56 u32 param1;
57};
58
59#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
60#define MAX_REGULAR_DPM_NUMBER 8
61#define CISLAND_MINIMUM_ENGINE_CLOCK 800
62
63struct ci_single_dpm_table {
64 u32 count;
65 struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
66};
67
68struct ci_dpm_table {
69 struct ci_single_dpm_table sclk_table;
70 struct ci_single_dpm_table mclk_table;
71 struct ci_single_dpm_table pcie_speed_table;
72 struct ci_single_dpm_table vddc_table;
73 struct ci_single_dpm_table vddci_table;
74 struct ci_single_dpm_table mvdd_table;
75};
76
77struct ci_mc_reg_entry {
78 u32 mclk_max;
79 u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
80};
81
82struct ci_mc_reg_table {
83 u8 last;
84 u8 num_entries;
85 u16 valid_flag;
86 struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
87 SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
88};
89
90struct ci_ulv_parm
91{
92 bool supported;
93 u32 cg_ulv_parameter;
94 u32 volt_change_delay;
95 struct ci_pl pl;
96};
97
98#define CISLANDS_MAX_LEAKAGE_COUNT 8
99
100struct ci_leakage_voltage {
101 u16 count;
102 u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
103 u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
104};
105
106struct ci_dpm_level_enable_mask {
107 u32 uvd_dpm_enable_mask;
108 u32 vce_dpm_enable_mask;
109 u32 acp_dpm_enable_mask;
110 u32 samu_dpm_enable_mask;
111 u32 sclk_dpm_enable_mask;
112 u32 mclk_dpm_enable_mask;
113 u32 pcie_dpm_enable_mask;
114};
115
116struct ci_vbios_boot_state
117{
118 u16 mvdd_bootup_value;
119 u16 vddc_bootup_value;
120 u16 vddci_bootup_value;
121 u32 sclk_bootup_value;
122 u32 mclk_bootup_value;
123 u16 pcie_gen_bootup_value;
124 u16 pcie_lane_bootup_value;
125};
126
127struct ci_clock_registers {
128 u32 cg_spll_func_cntl;
129 u32 cg_spll_func_cntl_2;
130 u32 cg_spll_func_cntl_3;
131 u32 cg_spll_func_cntl_4;
132 u32 cg_spll_spread_spectrum;
133 u32 cg_spll_spread_spectrum_2;
134 u32 dll_cntl;
135 u32 mclk_pwrmgt_cntl;
136 u32 mpll_ad_func_cntl;
137 u32 mpll_dq_func_cntl;
138 u32 mpll_func_cntl;
139 u32 mpll_func_cntl_1;
140 u32 mpll_func_cntl_2;
141 u32 mpll_ss1;
142 u32 mpll_ss2;
143};
144
145struct ci_thermal_temperature_setting {
146 s32 temperature_low;
147 s32 temperature_high;
148 s32 temperature_shutdown;
149};
150
151struct ci_pcie_perf_range {
152 u16 max;
153 u16 min;
154};
155
156enum ci_pt_config_reg_type {
157 CISLANDS_CONFIGREG_MMR = 0,
158 CISLANDS_CONFIGREG_SMC_IND,
159 CISLANDS_CONFIGREG_DIDT_IND,
160 CISLANDS_CONFIGREG_CACHE,
161 CISLANDS_CONFIGREG_MAX
162};
163
164#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
165#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
166#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
167
168struct ci_pt_config_reg {
169 u32 offset;
170 u32 mask;
171 u32 shift;
172 u32 value;
173 enum ci_pt_config_reg_type type;
174};
175
176struct ci_pt_defaults {
177 u8 svi_load_line_en;
178 u8 svi_load_line_vddc;
179 u8 tdc_vddc_throttle_release_limit_perc;
180 u8 tdc_mawt;
181 u8 tdc_waterfall_ctl;
182 u8 dte_ambient_temp_base;
183 u32 display_cac;
184 u32 bapm_temp_gradient;
185 u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
186 u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
187};
188
189#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
190#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
191#define DPMTABLE_UPDATE_SCLK 0x00000004
192#define DPMTABLE_UPDATE_MCLK 0x00000008
193
194struct ci_power_info {
195 struct ci_dpm_table dpm_table;
196 struct ci_dpm_table golden_dpm_table;
197 u32 voltage_control;
198 u32 mvdd_control;
199 u32 vddci_control;
200 u32 active_auto_throttle_sources;
201 struct ci_clock_registers clock_registers;
202 u16 acpi_vddc;
203 u16 acpi_vddci;
204 enum amdgpu_pcie_gen force_pcie_gen;
205 enum amdgpu_pcie_gen acpi_pcie_gen;
206 struct ci_leakage_voltage vddc_leakage;
207 struct ci_leakage_voltage vddci_leakage;
208 u16 max_vddc_in_pp_table;
209 u16 min_vddc_in_pp_table;
210 u16 max_vddci_in_pp_table;
211 u16 min_vddci_in_pp_table;
212 u32 mclk_strobe_mode_threshold;
213 u32 mclk_stutter_mode_threshold;
214 u32 mclk_edc_enable_threshold;
215 u32 mclk_edc_wr_enable_threshold;
216 struct ci_vbios_boot_state vbios_boot_state;
217 /* smc offsets */
218 u32 sram_end;
219 u32 dpm_table_start;
220 u32 soft_regs_start;
221 u32 mc_reg_table_start;
222 u32 fan_table_start;
223 u32 arb_table_start;
224 /* smc tables */
225 SMU7_Discrete_DpmTable smc_state_table;
226 SMU7_Discrete_MCRegisters smc_mc_reg_table;
227 SMU7_Discrete_PmFuses smc_powertune_table;
228 /* other stuff */
229 struct ci_mc_reg_table mc_reg_table;
230 struct atom_voltage_table vddc_voltage_table;
231 struct atom_voltage_table vddci_voltage_table;
232 struct atom_voltage_table mvdd_voltage_table;
233 struct ci_ulv_parm ulv;
234 u32 power_containment_features;
235 const struct ci_pt_defaults *powertune_defaults;
236 u32 dte_tj_offset;
237 bool vddc_phase_shed_control;
238 struct ci_thermal_temperature_setting thermal_temp_setting;
239 struct ci_dpm_level_enable_mask dpm_level_enable_mask;
240 u32 need_update_smu7_dpm_table;
241 u32 sclk_dpm_key_disabled;
242 u32 mclk_dpm_key_disabled;
243 u32 pcie_dpm_key_disabled;
244 u32 thermal_sclk_dpm_enabled;
245 struct ci_pcie_perf_range pcie_gen_performance;
246 struct ci_pcie_perf_range pcie_lane_performance;
247 struct ci_pcie_perf_range pcie_gen_powersaving;
248 struct ci_pcie_perf_range pcie_lane_powersaving;
249 u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
250 u32 mclk_activity_target;
251 u32 low_sclk_interrupt_t;
252 u32 last_mclk_dpm_enable_mask;
253 u32 sys_pcie_mask;
254 /* caps */
255 bool caps_power_containment;
256 bool caps_cac;
257 bool caps_sq_ramping;
258 bool caps_db_ramping;
259 bool caps_td_ramping;
260 bool caps_tcp_ramping;
261 bool caps_fps;
262 bool caps_sclk_ds;
263 bool caps_sclk_ss_support;
264 bool caps_mclk_ss_support;
265 bool caps_uvd_dpm;
266 bool caps_vce_dpm;
267 bool caps_samu_dpm;
268 bool caps_acp_dpm;
269 bool caps_automatic_dc_transition;
270 bool caps_sclk_throttle_low_notification;
271 bool caps_dynamic_ac_timing;
272 bool caps_od_fuzzy_fan_control_support;
273 /* flags */
274 bool thermal_protection;
275 bool pcie_performance_request;
276 bool dynamic_ss;
277 bool dll_default_on;
278 bool cac_enabled;
279 bool uvd_enabled;
280 bool battery_state;
281 bool pspp_notify_required;
282 bool enable_bapm_feature;
283 bool enable_tdc_limit_feature;
284 bool enable_pkg_pwr_tracking_feature;
285 bool use_pcie_performance_levels;
286 bool use_pcie_powersaving_levels;
287 bool uvd_power_gated;
288 /* driver states */
289 struct amdgpu_ps current_rps;
290 struct ci_ps current_ps;
291 struct amdgpu_ps requested_rps;
292 struct ci_ps requested_ps;
293 /* fan control */
294 bool fan_ctrl_is_in_default_mode;
295 bool fan_is_controlled_by_smc;
296 u32 t_min;
297 u32 fan_ctrl_default_mode;
298};
299
300#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
301#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
302#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
303
304#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
305
306#define CISLANDS_VRC_DFLT0 0x3FFFC000
307#define CISLANDS_VRC_DFLT1 0x000400
308#define CISLANDS_VRC_DFLT2 0xC00080
309#define CISLANDS_VRC_DFLT3 0xC00200
310#define CISLANDS_VRC_DFLT4 0xC01680
311#define CISLANDS_VRC_DFLT5 0xC00033
312#define CISLANDS_VRC_DFLT6 0xC00033
313#define CISLANDS_VRC_DFLT7 0x3FFFC000
314
315#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
316#define CISLAND_TARGETACTIVITY_DFLT 30
317#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
318
319#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
320#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
321#define PCIE_PERF_REQ_PECI_GEN1 2
322#define PCIE_PERF_REQ_PECI_GEN2 3
323#define PCIE_PERF_REQ_PECI_GEN3 4
324
325#define CISLANDS_SSTU_DFLT 0
326#define CISLANDS_SST_DFLT 0x00C8
327
328/* XXX are these ok? */
329#define CISLANDS_TEMP_RANGE_MIN (90 * 1000)
330#define CISLANDS_TEMP_RANGE_MAX (120 * 1000)
331
332int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
333 u32 smc_start_address,
334 const u8 *src, u32 byte_count, u32 limit);
335void amdgpu_ci_start_smc(struct amdgpu_device *adev);
336void amdgpu_ci_reset_smc(struct amdgpu_device *adev);
337int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev);
338void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev);
339void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev);
340bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev);
341PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
342PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev);
343int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
344int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
345 u32 smc_address, u32 *value, u32 limit);
346int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
347 u32 smc_address, u32 value, u32 limit);
348
349#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
deleted file mode 100644
index b8ba51e045b5..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_smc.c
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "cikd.h"
29#include "ppsmc.h"
30#include "amdgpu_ucode.h"
31#include "ci_dpm.h"
32
33#include "smu/smu_7_0_1_d.h"
34#include "smu/smu_7_0_1_sh_mask.h"
35
36static int ci_set_smc_sram_address(struct amdgpu_device *adev,
37 u32 smc_address, u32 limit)
38{
39 if (smc_address & 3)
40 return -EINVAL;
41 if ((smc_address + 3) > limit)
42 return -EINVAL;
43
44 WREG32(mmSMC_IND_INDEX_0, smc_address);
45 WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
46
47 return 0;
48}
49
50int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
51 u32 smc_start_address,
52 const u8 *src, u32 byte_count, u32 limit)
53{
54 unsigned long flags;
55 u32 data, original_data;
56 u32 addr;
57 u32 extra_shift;
58 int ret = 0;
59
60 if (smc_start_address & 3)
61 return -EINVAL;
62 if ((smc_start_address + byte_count) > limit)
63 return -EINVAL;
64
65 addr = smc_start_address;
66
67 spin_lock_irqsave(&adev->smc_idx_lock, flags);
68 while (byte_count >= 4) {
69 /* SMC address space is BE */
70 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
71
72 ret = ci_set_smc_sram_address(adev, addr, limit);
73 if (ret)
74 goto done;
75
76 WREG32(mmSMC_IND_DATA_0, data);
77
78 src += 4;
79 byte_count -= 4;
80 addr += 4;
81 }
82
83 /* RMW for the final bytes */
84 if (byte_count > 0) {
85 data = 0;
86
87 ret = ci_set_smc_sram_address(adev, addr, limit);
88 if (ret)
89 goto done;
90
91 original_data = RREG32(mmSMC_IND_DATA_0);
92
93 extra_shift = 8 * (4 - byte_count);
94
95 while (byte_count > 0) {
96 data = (data << 8) + *src++;
97 byte_count--;
98 }
99
100 data <<= extra_shift;
101
102 data |= (original_data & ~((~0UL) << extra_shift));
103
104 ret = ci_set_smc_sram_address(adev, addr, limit);
105 if (ret)
106 goto done;
107
108 WREG32(mmSMC_IND_DATA_0, data);
109 }
110
111done:
112 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
113
114 return ret;
115}
116
117void amdgpu_ci_start_smc(struct amdgpu_device *adev)
118{
119 u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
120
121 tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
122 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
123}
124
125void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
126{
127 u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
128
129 tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
130 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
131}
132
133int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
134{
135 static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
136
137 return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
138}
139
140void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
141{
142 u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
143
144 tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
145
146 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
147}
148
149void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
150{
151 u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
152
153 tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
154
155 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
156}
157
158bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
159{
160 u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
161 u32 pc_c = RREG32_SMC(ixSMC_PC_C);
162
163 if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
164 return true;
165
166 return false;
167}
168
169PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
170{
171 u32 tmp;
172 int i;
173
174 if (!amdgpu_ci_is_smc_running(adev))
175 return PPSMC_Result_Failed;
176
177 WREG32(mmSMC_MESSAGE_0, msg);
178
179 for (i = 0; i < adev->usec_timeout; i++) {
180 tmp = RREG32(mmSMC_RESP_0);
181 if (tmp != 0)
182 break;
183 udelay(1);
184 }
185 tmp = RREG32(mmSMC_RESP_0);
186
187 return (PPSMC_Result)tmp;
188}
189
190PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
191{
192 u32 tmp;
193 int i;
194
195 if (!amdgpu_ci_is_smc_running(adev))
196 return PPSMC_Result_OK;
197
198 for (i = 0; i < adev->usec_timeout; i++) {
199 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
200 if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
201 break;
202 udelay(1);
203 }
204
205 return PPSMC_Result_OK;
206}
207
208int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
209{
210 const struct smc_firmware_header_v1_0 *hdr;
211 unsigned long flags;
212 u32 ucode_start_address;
213 u32 ucode_size;
214 const u8 *src;
215 u32 data;
216
217 if (!adev->pm.fw)
218 return -EINVAL;
219
220 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
221 amdgpu_ucode_print_smc_hdr(&hdr->header);
222
223 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
224 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
225 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
226 src = (const u8 *)
227 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
228
229 if (ucode_size & 3)
230 return -EINVAL;
231
232 spin_lock_irqsave(&adev->smc_idx_lock, flags);
233 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
234 WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
235 ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
236 while (ucode_size >= 4) {
237 /* SMC address space is BE */
238 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
239
240 WREG32(mmSMC_IND_DATA_0, data);
241
242 src += 4;
243 ucode_size -= 4;
244 }
245 WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
246 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
247
248 return 0;
249}
250
251int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
252 u32 smc_address, u32 *value, u32 limit)
253{
254 unsigned long flags;
255 int ret;
256
257 spin_lock_irqsave(&adev->smc_idx_lock, flags);
258 ret = ci_set_smc_sram_address(adev, smc_address, limit);
259 if (ret == 0)
260 *value = RREG32(mmSMC_IND_DATA_0);
261 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
262
263 return ret;
264}
265
266int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
267 u32 smc_address, u32 value, u32 limit)
268{
269 unsigned long flags;
270 int ret;
271
272 spin_lock_irqsave(&adev->smc_idx_lock, flags);
273 ret = ci_set_smc_sram_address(adev, smc_address, limit);
274 if (ret == 0)
275 WREG32(mmSMC_IND_DATA_0, value);
276 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
277
278 return ret;
279}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 71c50d8900e3..07c1f239e9c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1741,6 +1741,69 @@ static bool cik_need_full_reset(struct amdgpu_device *adev)
1741 return true; 1741 return true;
1742} 1742}
1743 1743
1744static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1745 uint64_t *count1)
1746{
1747 uint32_t perfctr = 0;
1748 uint64_t cnt0_of, cnt1_of;
1749 int tmp;
1750
1751 /* This reports 0 on APUs, so return to avoid writing/reading registers
1752 * that may or may not be different from their GPU counterparts
1753 */
1754 if (adev->flags & AMD_IS_APU)
1755 return;
1756
1757 /* Set the 2 events that we wish to watch, defined above */
1758 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1759 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1760 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1761
1762 /* Write to enable desired perf counters */
1763 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1764 /* Zero out and enable the perf counters
1765 * Write 0x5:
1766 * Bit 0 = Start all counters(1)
1767 * Bit 2 = Global counter reset enable(1)
1768 */
1769 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1770
1771 msleep(1000);
1772
1773 /* Load the shadow and disable the perf counters
1774 * Write 0x2:
1775 * Bit 0 = Stop counters(0)
1776 * Bit 1 = Load the shadow counters(1)
1777 */
1778 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1779
1780 /* Read register values to get any >32bit overflow */
1781 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1782 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1783 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1784
1785 /* Get the values and add the overflow */
1786 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1787 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1788}
1789
1790static bool cik_need_reset_on_init(struct amdgpu_device *adev)
1791{
1792 u32 clock_cntl, pc;
1793
1794 if (adev->flags & AMD_IS_APU)
1795 return false;
1796
1797 /* check if the SMC is already running */
1798 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1799 pc = RREG32_SMC(ixSMC_PC_C);
1800 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1801 (0x20100 <= pc))
1802 return true;
1803
1804 return false;
1805}
1806
1744static const struct amdgpu_asic_funcs cik_asic_funcs = 1807static const struct amdgpu_asic_funcs cik_asic_funcs =
1745{ 1808{
1746 .read_disabled_bios = &cik_read_disabled_bios, 1809 .read_disabled_bios = &cik_read_disabled_bios,
@@ -1756,6 +1819,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
1756 .invalidate_hdp = &cik_invalidate_hdp, 1819 .invalidate_hdp = &cik_invalidate_hdp,
1757 .need_full_reset = &cik_need_full_reset, 1820 .need_full_reset = &cik_need_full_reset,
1758 .init_doorbell_index = &legacy_doorbell_index_init, 1821 .init_doorbell_index = &legacy_doorbell_index_init,
1822 .get_pcie_usage = &cik_get_pcie_usage,
1823 .need_reset_on_init = &cik_need_reset_on_init,
1759}; 1824};
1760 1825
1761static int cik_common_early_init(void *handle) 1826static int cik_common_early_init(void *handle)
@@ -2005,10 +2070,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2005 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2070 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2006 amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); 2071 amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
2007 amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); 2072 amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
2008 if (amdgpu_dpm == -1) 2073 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2009 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2010 else
2011 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2012 if (adev->enable_virtual_display) 2074 if (adev->enable_virtual_display)
2013 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2075 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2014#if defined(CONFIG_DRM_AMD_DC) 2076#if defined(CONFIG_DRM_AMD_DC)
@@ -2026,10 +2088,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2026 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2088 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2027 amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); 2089 amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
2028 amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); 2090 amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
2029 if (amdgpu_dpm == -1) 2091 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2030 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2031 else
2032 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2033 if (adev->enable_virtual_display) 2092 if (adev->enable_virtual_display)
2034 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2093 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2035#if defined(CONFIG_DRM_AMD_DC) 2094#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index 2a086610f74d..2fcc4b60153c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,7 +24,6 @@
24#ifndef __CIK_DPM_H__ 24#ifndef __CIK_DPM_H__
25#define __CIK_DPM_H__ 25#define __CIK_DPM_H__
26 26
27extern const struct amdgpu_ip_block_version ci_smu_ip_block;
28extern const struct amdgpu_ip_block_version kv_smu_ip_block; 27extern const struct amdgpu_ip_block_version kv_smu_ip_block;
29 28
30#endif 29#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 8a8b4967a101..721c757156e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,9 +103,9 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int cik_ih_irq_init(struct amdgpu_device *adev) 104static int cik_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 struct amdgpu_ih_ring *ih = &adev->irq.ih;
106 int rb_bufsz; 107 int rb_bufsz;
107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 108 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
108 u64 wptr_off;
109 109
110 /* disable irqs */ 110 /* disable irqs */
111 cik_ih_disable_interrupts(adev); 111 cik_ih_disable_interrupts(adev);
@@ -131,9 +131,8 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
131 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK; 131 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
132 132
133 /* set the writeback address whether it's enabled or not */ 133 /* set the writeback address whether it's enabled or not */
134 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 134 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
135 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 135 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
136 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
137 136
138 WREG32(mmIH_RB_CNTL, ih_rb_cntl); 137 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
139 138
@@ -183,11 +182,12 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev)
183 * Used by cik_irq_process(). 182 * Used by cik_irq_process().
184 * Returns the value of the wptr. 183 * Returns the value of the wptr.
185 */ 184 */
186static u32 cik_ih_get_wptr(struct amdgpu_device *adev) 185static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
186 struct amdgpu_ih_ring *ih)
187{ 187{
188 u32 wptr, tmp; 188 u32 wptr, tmp;
189 189
190 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 190 wptr = le32_to_cpu(*ih->wptr_cpu);
191 191
192 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { 192 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
193 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; 193 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
@@ -196,13 +196,13 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
196 * this should allow us to catchup. 196 * this should allow us to catchup.
197 */ 197 */
198 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 198 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
199 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 199 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
200 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 200 ih->rptr = (wptr + 16) & ih->ptr_mask;
201 tmp = RREG32(mmIH_RB_CNTL); 201 tmp = RREG32(mmIH_RB_CNTL);
202 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 202 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
203 WREG32(mmIH_RB_CNTL, tmp); 203 WREG32(mmIH_RB_CNTL, tmp);
204 } 204 }
205 return (wptr & adev->irq.ih.ptr_mask); 205 return (wptr & ih->ptr_mask);
206} 206}
207 207
208/* CIK IV Ring 208/* CIK IV Ring
@@ -237,16 +237,17 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
237 * position and also advance the position. 237 * position and also advance the position.
238 */ 238 */
239static void cik_ih_decode_iv(struct amdgpu_device *adev, 239static void cik_ih_decode_iv(struct amdgpu_device *adev,
240 struct amdgpu_ih_ring *ih,
240 struct amdgpu_iv_entry *entry) 241 struct amdgpu_iv_entry *entry)
241{ 242{
242 /* wptr/rptr are in bytes! */ 243 /* wptr/rptr are in bytes! */
243 u32 ring_index = adev->irq.ih.rptr >> 2; 244 u32 ring_index = ih->rptr >> 2;
244 uint32_t dw[4]; 245 uint32_t dw[4];
245 246
246 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 247 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
247 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 248 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
248 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 249 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
249 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 250 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
250 251
251 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 252 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
252 entry->src_id = dw[0] & 0xff; 253 entry->src_id = dw[0] & 0xff;
@@ -256,7 +257,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
256 entry->pasid = (dw[2] >> 16) & 0xffff; 257 entry->pasid = (dw[2] >> 16) & 0xffff;
257 258
258 /* wptr/rptr are in bytes! */ 259 /* wptr/rptr are in bytes! */
259 adev->irq.ih.rptr += 16; 260 ih->rptr += 16;
260} 261}
261 262
262/** 263/**
@@ -266,9 +267,10 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
266 * 267 *
267 * Set the IH ring buffer rptr. 268 * Set the IH ring buffer rptr.
268 */ 269 */
269static void cik_ih_set_rptr(struct amdgpu_device *adev) 270static void cik_ih_set_rptr(struct amdgpu_device *adev,
271 struct amdgpu_ih_ring *ih)
270{ 272{
271 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 273 WREG32(mmIH_RB_RPTR, ih->rptr);
272} 274}
273 275
274static int cik_ih_early_init(void *handle) 276static int cik_ih_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 45795191de1f..189599b694e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -220,7 +220,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
220static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 220static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
221 struct amdgpu_job *job, 221 struct amdgpu_job *job,
222 struct amdgpu_ib *ib, 222 struct amdgpu_ib *ib,
223 bool ctx_switch) 223 uint32_t flags)
224{ 224{
225 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 225 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
226 u32 extra_bits = vmid & 0xf; 226 u32 extra_bits = vmid & 0xf;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 9d3ea298e116..61024b9c7a4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,9 +103,9 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int cz_ih_irq_init(struct amdgpu_device *adev) 104static int cz_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 int rb_bufsz; 106 struct amdgpu_ih_ring *ih = &adev->irq.ih;
107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
108 u64 wptr_off; 108 int rb_bufsz;
109 109
110 /* disable irqs */ 110 /* disable irqs */
111 cz_ih_disable_interrupts(adev); 111 cz_ih_disable_interrupts(adev);
@@ -133,9 +133,8 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); 133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
134 134
135 /* set the writeback address whether it's enabled or not */ 135 /* set the writeback address whether it's enabled or not */
136 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 136 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
137 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 137 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
138 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
139 138
140 WREG32(mmIH_RB_CNTL, ih_rb_cntl); 139 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
141 140
@@ -185,11 +184,12 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev)
185 * Used by cz_irq_process(VI). 184 * Used by cz_irq_process(VI).
186 * Returns the value of the wptr. 185 * Returns the value of the wptr.
187 */ 186 */
188static u32 cz_ih_get_wptr(struct amdgpu_device *adev) 187static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
188 struct amdgpu_ih_ring *ih)
189{ 189{
190 u32 wptr, tmp; 190 u32 wptr, tmp;
191 191
192 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 192 wptr = le32_to_cpu(*ih->wptr_cpu);
193 193
194 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 194 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
195 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); 195 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -198,13 +198,13 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
198 * this should allow us to catchup. 198 * this should allow us to catchup.
199 */ 199 */
200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
201 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 201 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
202 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 202 ih->rptr = (wptr + 16) & ih->ptr_mask;
203 tmp = RREG32(mmIH_RB_CNTL); 203 tmp = RREG32(mmIH_RB_CNTL);
204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
205 WREG32(mmIH_RB_CNTL, tmp); 205 WREG32(mmIH_RB_CNTL, tmp);
206 } 206 }
207 return (wptr & adev->irq.ih.ptr_mask); 207 return (wptr & ih->ptr_mask);
208} 208}
209 209
210/** 210/**
@@ -216,16 +216,17 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
216 * position and also advance the position. 216 * position and also advance the position.
217 */ 217 */
218static void cz_ih_decode_iv(struct amdgpu_device *adev, 218static void cz_ih_decode_iv(struct amdgpu_device *adev,
219 struct amdgpu_iv_entry *entry) 219 struct amdgpu_ih_ring *ih,
220 struct amdgpu_iv_entry *entry)
220{ 221{
221 /* wptr/rptr are in bytes! */ 222 /* wptr/rptr are in bytes! */
222 u32 ring_index = adev->irq.ih.rptr >> 2; 223 u32 ring_index = ih->rptr >> 2;
223 uint32_t dw[4]; 224 uint32_t dw[4];
224 225
225 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 226 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
226 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 227 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
227 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 228 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
228 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 229 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
229 230
230 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 231 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
231 entry->src_id = dw[0] & 0xff; 232 entry->src_id = dw[0] & 0xff;
@@ -235,7 +236,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
235 entry->pasid = (dw[2] >> 16) & 0xffff; 236 entry->pasid = (dw[2] >> 16) & 0xffff;
236 237
237 /* wptr/rptr are in bytes! */ 238 /* wptr/rptr are in bytes! */
238 adev->irq.ih.rptr += 16; 239 ih->rptr += 16;
239} 240}
240 241
241/** 242/**
@@ -245,9 +246,10 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
245 * 246 *
246 * Set the IH ring buffer rptr. 247 * Set the IH ring buffer rptr.
247 */ 248 */
248static void cz_ih_set_rptr(struct amdgpu_device *adev) 249static void cz_ih_set_rptr(struct amdgpu_device *adev,
250 struct amdgpu_ih_ring *ih)
249{ 251{
250 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 252 WREG32(mmIH_RB_RPTR, ih->rptr);
251} 253}
252 254
253static int cz_ih_early_init(void *handle) 255static int cz_ih_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
168 168
169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
170 if (crtc->primary->fb) {
171 int r;
172 struct amdgpu_bo *abo;
173
174 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
175 r = amdgpu_bo_reserve(abo, true);
176 if (unlikely(r))
177 DRM_ERROR("failed to reserve abo before unpin\n");
178 else {
179 amdgpu_bo_unpin(abo);
180 amdgpu_bo_unreserve(abo);
181 }
182 }
183 170
184 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
185 amdgpu_crtc->encoder = NULL; 172 amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
692 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 679 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
693 680
694 drm_crtc_vblank_put(&amdgpu_crtc->base); 681 drm_crtc_vblank_put(&amdgpu_crtc->base);
695 schedule_work(&works->unpin_work); 682 amdgpu_bo_unref(&works->old_abo);
683 kfree(works->shared);
684 kfree(works);
696 685
697 return 0; 686 return 0;
698} 687}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 1dc3013ea1d5..305276c7e4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1842,13 +1842,13 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1842static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 1842static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1843 struct amdgpu_job *job, 1843 struct amdgpu_job *job,
1844 struct amdgpu_ib *ib, 1844 struct amdgpu_ib *ib,
1845 bool ctx_switch) 1845 uint32_t flags)
1846{ 1846{
1847 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1847 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1848 u32 header, control = 0; 1848 u32 header, control = 0;
1849 1849
1850 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 1850 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
1851 if (ctx_switch) { 1851 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
1852 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 1852 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
1853 amdgpu_ring_write(ring, 0); 1853 amdgpu_ring_write(ring, 0);
1854 } 1854 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 3a9fb6018c16..7984292f9282 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2228,13 +2228,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2228static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 2228static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2229 struct amdgpu_job *job, 2229 struct amdgpu_job *job,
2230 struct amdgpu_ib *ib, 2230 struct amdgpu_ib *ib,
2231 bool ctx_switch) 2231 uint32_t flags)
2232{ 2232{
2233 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2233 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2234 u32 header, control = 0; 2234 u32 header, control = 0;
2235 2235
2236 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2236 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2237 if (ctx_switch) { 2237 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2238 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2238 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2239 amdgpu_ring_write(ring, 0); 2239 amdgpu_ring_write(ring, 0);
2240 } 2240 }
@@ -2259,7 +2259,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2259static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 2259static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2260 struct amdgpu_job *job, 2260 struct amdgpu_job *job,
2261 struct amdgpu_ib *ib, 2261 struct amdgpu_ib *ib,
2262 bool ctx_switch) 2262 uint32_t flags)
2263{ 2263{
2264 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2264 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2265 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2265 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 381f593b0cda..a26747681ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4233 u32 tmp; 4233 u32 tmp;
4234 u32 rb_bufsz; 4234 u32 rb_bufsz;
4235 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4235 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4236 int r;
4237 4236
4238 /* Set the write pointer delay */ 4237 /* Set the write pointer delay */
4239 WREG32(mmCP_RB_WPTR_DELAY, 0); 4238 WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4278 amdgpu_ring_clear_ring(ring); 4277 amdgpu_ring_clear_ring(ring);
4279 gfx_v8_0_cp_gfx_start(adev); 4278 gfx_v8_0_cp_gfx_start(adev);
4280 ring->sched.ready = true; 4279 ring->sched.ready = true;
4281 r = amdgpu_ring_test_helper(ring);
4282 4280
4283 return r; 4281 return 0;
4284} 4282}
4285 4283
4286static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 4284static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4369 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4367 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4370 } 4368 }
4371 4369
4372 r = amdgpu_ring_test_helper(kiq_ring); 4370 amdgpu_ring_commit(kiq_ring);
4373 if (r) 4371
4374 DRM_ERROR("KCQ enable failed\n"); 4372 return 0;
4375 return r;
4376} 4373}
4377 4374
4378static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) 4375static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4709 if (r) 4706 if (r)
4710 goto done; 4707 goto done;
4711 4708
4712 /* Test KCQs - reversing the order of rings seems to fix ring test failure 4709done:
4713 * after GPU reset 4710 return r;
4714 */ 4711}
4715 for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4712
4713static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4714{
4715 int r, i;
4716 struct amdgpu_ring *ring;
4717
4718 /* collect all the ring_tests here, gfx, kiq, compute */
4719 ring = &adev->gfx.gfx_ring[0];
4720 r = amdgpu_ring_test_helper(ring);
4721 if (r)
4722 return r;
4723
4724 ring = &adev->gfx.kiq.ring;
4725 r = amdgpu_ring_test_helper(ring);
4726 if (r)
4727 return r;
4728
4729 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4716 ring = &adev->gfx.compute_ring[i]; 4730 ring = &adev->gfx.compute_ring[i];
4717 r = amdgpu_ring_test_helper(ring); 4731 amdgpu_ring_test_helper(ring);
4718 } 4732 }
4719 4733
4720done: 4734 return 0;
4721 return r;
4722} 4735}
4723 4736
4724static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) 4737static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4739 r = gfx_v8_0_kcq_resume(adev); 4752 r = gfx_v8_0_kcq_resume(adev);
4740 if (r) 4753 if (r)
4741 return r; 4754 return r;
4755
4756 r = gfx_v8_0_cp_test_all_rings(adev);
4757 if (r)
4758 return r;
4759
4742 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4760 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4743 4761
4744 return 0; 4762 return 0;
@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5086 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5104 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5087 gfx_v8_0_cp_gfx_resume(adev); 5105 gfx_v8_0_cp_gfx_resume(adev);
5088 5106
5107 gfx_v8_0_cp_test_all_rings(adev);
5108
5089 adev->gfx.rlc.funcs->start(adev); 5109 adev->gfx.rlc.funcs->start(adev);
5090 5110
5091 return 0; 5111 return 0;
@@ -6027,7 +6047,7 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6027static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 6047static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6028 struct amdgpu_job *job, 6048 struct amdgpu_job *job,
6029 struct amdgpu_ib *ib, 6049 struct amdgpu_ib *ib,
6030 bool ctx_switch) 6050 uint32_t flags)
6031{ 6051{
6032 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6052 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6033 u32 header, control = 0; 6053 u32 header, control = 0;
@@ -6059,7 +6079,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6059static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 6079static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6060 struct amdgpu_job *job, 6080 struct amdgpu_job *job,
6061 struct amdgpu_ib *ib, 6081 struct amdgpu_ib *ib,
6062 bool ctx_switch) 6082 uint32_t flags)
6063{ 6083{
6064 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6084 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6065 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 6085 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7556716038d3..262ee3cf6f1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
117}; 120};
118 121
119static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 122static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
142}; 142};
143 143
144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3587{ 3587{
3588 uint32_t data, def; 3588 uint32_t data, def;
3589 3589
3590 amdgpu_gfx_rlc_enter_safe_mode(adev);
3591
3590 /* It is disabled by HW by default */ 3592 /* It is disabled by HW by default */
3591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3592 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3594 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3651 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3653 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3652 } 3654 }
3653 } 3655 }
3656
3657 amdgpu_gfx_rlc_exit_safe_mode(adev);
3654} 3658}
3655 3659
3656static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3660static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -3968,7 +3972,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3968static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3972static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3969 struct amdgpu_job *job, 3973 struct amdgpu_job *job,
3970 struct amdgpu_ib *ib, 3974 struct amdgpu_ib *ib,
3971 bool ctx_switch) 3975 uint32_t flags)
3972{ 3976{
3973 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 3977 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3974 u32 header, control = 0; 3978 u32 header, control = 0;
@@ -4001,7 +4005,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4001static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4005static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4002 struct amdgpu_job *job, 4006 struct amdgpu_job *job,
4003 struct amdgpu_ib *ib, 4007 struct amdgpu_ib *ib,
4004 bool ctx_switch) 4008 uint32_t flags)
4005{ 4009{
4006 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4010 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4007 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4011 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1ad7e6b8ed1d..34440672f938 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1471,8 +1471,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1471 gmc_v8_0_set_fault_enable_default(adev, false); 1471 gmc_v8_0_set_fault_enable_default(adev, false);
1472 1472
1473 if (printk_ratelimit()) { 1473 if (printk_ratelimit()) {
1474 struct amdgpu_task_info task_info = { 0 }; 1474 struct amdgpu_task_info task_info;
1475 1475
1476 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1476 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 1477 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1477 1478
1478 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n", 1479 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index bacdaef77b6c..9c082f9aea1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -305,6 +305,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
305 struct amdgpu_iv_entry *entry) 305 struct amdgpu_iv_entry *entry)
306{ 306{
307 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; 307 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
308 bool retry_fault = !!(entry->src_data[1] & 0x80);
308 uint32_t status = 0; 309 uint32_t status = 0;
309 u64 addr; 310 u64 addr;
310 311
@@ -320,13 +321,16 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
320 } 321 }
321 322
322 if (printk_ratelimit()) { 323 if (printk_ratelimit()) {
323 struct amdgpu_task_info task_info = { 0 }; 324 struct amdgpu_task_info task_info;
324 325
326 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
325 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 327 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
326 328
327 dev_err(adev->dev, 329 dev_err(adev->dev,
328 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", 330 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
331 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
329 entry->vmid_src ? "mmhub" : "gfxhub", 332 entry->vmid_src ? "mmhub" : "gfxhub",
333 retry_fault ? "retry" : "no-retry",
330 entry->src_id, entry->ring_id, entry->vmid, 334 entry->src_id, entry->ring_id, entry->vmid,
331 entry->pasid, task_info.process_name, task_info.tgid, 335 entry->pasid, task_info.process_name, task_info.tgid,
332 task_info.task_name, task_info.pid); 336 task_info.task_name, task_info.pid);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index a3984d10b604..b1626e1d2f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,9 +103,9 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int iceland_ih_irq_init(struct amdgpu_device *adev) 104static int iceland_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 struct amdgpu_ih_ring *ih = &adev->irq.ih;
106 int rb_bufsz; 107 int rb_bufsz;
107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 108 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
108 u64 wptr_off;
109 109
110 /* disable irqs */ 110 /* disable irqs */
111 iceland_ih_disable_interrupts(adev); 111 iceland_ih_disable_interrupts(adev);
@@ -133,9 +133,8 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); 133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
134 134
135 /* set the writeback address whether it's enabled or not */ 135 /* set the writeback address whether it's enabled or not */
136 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 136 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
137 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 137 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
138 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
139 138
140 WREG32(mmIH_RB_CNTL, ih_rb_cntl); 139 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
141 140
@@ -185,11 +184,12 @@ static void iceland_ih_irq_disable(struct amdgpu_device *adev)
185 * Used by cz_irq_process(VI). 184 * Used by cz_irq_process(VI).
186 * Returns the value of the wptr. 185 * Returns the value of the wptr.
187 */ 186 */
188static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) 187static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
188 struct amdgpu_ih_ring *ih)
189{ 189{
190 u32 wptr, tmp; 190 u32 wptr, tmp;
191 191
192 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 192 wptr = le32_to_cpu(*ih->wptr_cpu);
193 193
194 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 194 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
195 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); 195 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -198,13 +198,13 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
198 * this should allow us to catchup. 198 * this should allow us to catchup.
199 */ 199 */
200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
201 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 201 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
202 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 202 ih->rptr = (wptr + 16) & ih->ptr_mask;
203 tmp = RREG32(mmIH_RB_CNTL); 203 tmp = RREG32(mmIH_RB_CNTL);
204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
205 WREG32(mmIH_RB_CNTL, tmp); 205 WREG32(mmIH_RB_CNTL, tmp);
206 } 206 }
207 return (wptr & adev->irq.ih.ptr_mask); 207 return (wptr & ih->ptr_mask);
208} 208}
209 209
210/** 210/**
@@ -216,16 +216,17 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
216 * position and also advance the position. 216 * position and also advance the position.
217 */ 217 */
218static void iceland_ih_decode_iv(struct amdgpu_device *adev, 218static void iceland_ih_decode_iv(struct amdgpu_device *adev,
219 struct amdgpu_ih_ring *ih,
219 struct amdgpu_iv_entry *entry) 220 struct amdgpu_iv_entry *entry)
220{ 221{
221 /* wptr/rptr are in bytes! */ 222 /* wptr/rptr are in bytes! */
222 u32 ring_index = adev->irq.ih.rptr >> 2; 223 u32 ring_index = ih->rptr >> 2;
223 uint32_t dw[4]; 224 uint32_t dw[4];
224 225
225 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 226 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
226 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 227 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
227 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 228 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
228 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 229 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
229 230
230 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 231 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
231 entry->src_id = dw[0] & 0xff; 232 entry->src_id = dw[0] & 0xff;
@@ -235,7 +236,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
235 entry->pasid = (dw[2] >> 16) & 0xffff; 236 entry->pasid = (dw[2] >> 16) & 0xffff;
236 237
237 /* wptr/rptr are in bytes! */ 238 /* wptr/rptr are in bytes! */
238 adev->irq.ih.rptr += 16; 239 ih->rptr += 16;
239} 240}
240 241
241/** 242/**
@@ -245,9 +246,10 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
245 * 246 *
246 * Set the IH ring buffer rptr. 247 * Set the IH ring buffer rptr.
247 */ 248 */
248static void iceland_ih_set_rptr(struct amdgpu_device *adev) 249static void iceland_ih_set_rptr(struct amdgpu_device *adev,
250 struct amdgpu_ih_ring *ih)
249{ 251{
250 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 252 WREG32(mmIH_RB_RPTR, ih->rptr);
251} 253}
252 254
253static int iceland_ih_early_init(void *handle) 255static int iceland_ih_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
174 return r; 174 return r;
175 } 175 }
176 /* Retrieve checksum from mailbox2 */ 176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS) { 177 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
178 adev->virt.fw_reserve.checksum_key = 178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index accdedd63c98..cc967dbfd631 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -27,13 +27,9 @@
27#include "nbio/nbio_6_1_default.h" 27#include "nbio/nbio_6_1_default.h"
28#include "nbio/nbio_6_1_offset.h" 28#include "nbio/nbio_6_1_offset.h"
29#include "nbio/nbio_6_1_sh_mask.h" 29#include "nbio/nbio_6_1_sh_mask.h"
30#include "nbio/nbio_6_1_smn.h"
30#include "vega10_enum.h" 31#include "vega10_enum.h"
31 32
32#define smnCPM_CONTROL 0x11180460
33#define smnPCIE_CNTL2 0x11180070
34#define smnPCIE_CONFIG_CNTL 0x11180044
35#define smnPCIE_CI_CNTL 0x11180080
36
37static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) 33static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
38{ 34{
39 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 35 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -72,7 +68,7 @@ static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
72} 68}
73 69
74static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 70static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
75 bool use_doorbell, int doorbell_index) 71 bool use_doorbell, int doorbell_index, int doorbell_size)
76{ 72{
77 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : 73 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
78 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); 74 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -81,7 +77,7 @@ static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instan
81 77
82 if (use_doorbell) { 78 if (use_doorbell) {
83 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 79 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
84 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); 80 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
85 } else 81 } else
86 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 82 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
87 83
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index df34dc79d444..1cdb98ad2db3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -27,13 +27,11 @@
27#include "nbio/nbio_7_0_default.h" 27#include "nbio/nbio_7_0_default.h"
28#include "nbio/nbio_7_0_offset.h" 28#include "nbio/nbio_7_0_offset.h"
29#include "nbio/nbio_7_0_sh_mask.h" 29#include "nbio/nbio_7_0_sh_mask.h"
30#include "nbio/nbio_7_0_smn.h"
30#include "vega10_enum.h" 31#include "vega10_enum.h"
31 32
32#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c 33#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
33 34
34#define smnCPM_CONTROL 0x11180460
35#define smnPCIE_CNTL2 0x11180070
36
37static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) 35static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
38{ 36{
39 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 37 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -69,7 +67,7 @@ static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
69} 67}
70 68
71static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 69static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
72 bool use_doorbell, int doorbell_index) 70 bool use_doorbell, int doorbell_index, int doorbell_size)
73{ 71{
74 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : 72 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
75 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); 73 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -78,7 +76,7 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
78 76
79 if (use_doorbell) { 77 if (use_doorbell) {
80 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 78 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
81 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); 79 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
82 } else 80 } else
83 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 81 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
84 82
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..e347b407bd03 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -26,13 +26,10 @@
26 26
27#include "nbio/nbio_7_4_offset.h" 27#include "nbio/nbio_7_4_offset.h"
28#include "nbio/nbio_7_4_sh_mask.h" 28#include "nbio/nbio_7_4_sh_mask.h"
29#include "nbio/nbio_7_4_0_smn.h"
29 30
30#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c 31#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
31 32
32#define smnCPM_CONTROL 0x11180460
33#define smnPCIE_CNTL2 0x11180070
34#define smnPCIE_CI_CNTL 0x11180080
35
36static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) 33static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
37{ 34{
38 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 35 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -68,7 +65,7 @@ static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
68} 65}
69 66
70static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 67static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
71 bool use_doorbell, int doorbell_index) 68 bool use_doorbell, int doorbell_index, int doorbell_size)
72{ 69{
73 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : 70 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
74 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); 71 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -77,7 +74,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
77 74
78 if (use_doorbell) { 75 if (use_doorbell) {
79 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 76 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
80 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); 77 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
81 } else 78 } else
82 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 79 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
83 80
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 0de00fbe9233..f3a7d207af07 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -191,7 +191,7 @@ enum psp_gfx_fw_type
191 GFX_FW_TYPE_MMSCH = 19, 191 GFX_FW_TYPE_MMSCH = 19,
192 GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20, 192 GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
193 GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21, 193 GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
194 GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22, 194 GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL = 22,
195 GFX_FW_TYPE_UVD1 = 23, 195 GFX_FW_TYPE_UVD1 = 23,
196 GFX_FW_TYPE_MAX = 24 196 GFX_FW_TYPE_MAX = 24
197}; 197};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index d78b4306a36f..77c2bc344dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -38,75 +38,6 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin");
38MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); 38MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
39MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); 39MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
40 40
41static int
42psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
43{
44 switch(ucode->ucode_id) {
45 case AMDGPU_UCODE_ID_SDMA0:
46 *type = GFX_FW_TYPE_SDMA0;
47 break;
48 case AMDGPU_UCODE_ID_SDMA1:
49 *type = GFX_FW_TYPE_SDMA1;
50 break;
51 case AMDGPU_UCODE_ID_CP_CE:
52 *type = GFX_FW_TYPE_CP_CE;
53 break;
54 case AMDGPU_UCODE_ID_CP_PFP:
55 *type = GFX_FW_TYPE_CP_PFP;
56 break;
57 case AMDGPU_UCODE_ID_CP_ME:
58 *type = GFX_FW_TYPE_CP_ME;
59 break;
60 case AMDGPU_UCODE_ID_CP_MEC1:
61 *type = GFX_FW_TYPE_CP_MEC;
62 break;
63 case AMDGPU_UCODE_ID_CP_MEC1_JT:
64 *type = GFX_FW_TYPE_CP_MEC_ME1;
65 break;
66 case AMDGPU_UCODE_ID_CP_MEC2:
67 *type = GFX_FW_TYPE_CP_MEC;
68 break;
69 case AMDGPU_UCODE_ID_CP_MEC2_JT:
70 *type = GFX_FW_TYPE_CP_MEC_ME2;
71 break;
72 case AMDGPU_UCODE_ID_RLC_G:
73 *type = GFX_FW_TYPE_RLC_G;
74 break;
75 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
76 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
77 break;
78 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
79 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
80 break;
81 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
82 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
83 break;
84 case AMDGPU_UCODE_ID_SMC:
85 *type = GFX_FW_TYPE_SMU;
86 break;
87 case AMDGPU_UCODE_ID_UVD:
88 *type = GFX_FW_TYPE_UVD;
89 break;
90 case AMDGPU_UCODE_ID_VCE:
91 *type = GFX_FW_TYPE_VCE;
92 break;
93 case AMDGPU_UCODE_ID_VCN:
94 *type = GFX_FW_TYPE_VCN;
95 break;
96 case AMDGPU_UCODE_ID_DMCU_ERAM:
97 *type = GFX_FW_TYPE_DMCU_ERAM;
98 break;
99 case AMDGPU_UCODE_ID_DMCU_INTV:
100 *type = GFX_FW_TYPE_DMCU_ISR;
101 break;
102 case AMDGPU_UCODE_ID_MAXIMUM:
103 default:
104 return -EINVAL;
105 }
106
107 return 0;
108}
109
110static int psp_v10_0_init_microcode(struct psp_context *psp) 41static int psp_v10_0_init_microcode(struct psp_context *psp)
111{ 42{
112 struct amdgpu_device *adev = psp->adev; 43 struct amdgpu_device *adev = psp->adev;
@@ -158,26 +89,6 @@ out:
158 return err; 89 return err;
159} 90}
160 91
161static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
162 struct psp_gfx_cmd_resp *cmd)
163{
164 int ret;
165 uint64_t fw_mem_mc_addr = ucode->mc_addr;
166
167 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
168
169 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
170 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
171 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
172 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
173
174 ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
175 if (ret)
176 DRM_ERROR("Unknown firmware type\n");
177
178 return ret;
179}
180
181static int psp_v10_0_ring_init(struct psp_context *psp, 92static int psp_v10_0_ring_init(struct psp_context *psp,
182 enum psp_ring_type ring_type) 93 enum psp_ring_type ring_type)
183{ 94{
@@ -454,7 +365,6 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp)
454 365
455static const struct psp_funcs psp_v10_0_funcs = { 366static const struct psp_funcs psp_v10_0_funcs = {
456 .init_microcode = psp_v10_0_init_microcode, 367 .init_microcode = psp_v10_0_init_microcode,
457 .prep_cmd_buf = psp_v10_0_prep_cmd_buf,
458 .ring_init = psp_v10_0_ring_init, 368 .ring_init = psp_v10_0_ring_init,
459 .ring_create = psp_v10_0_ring_create, 369 .ring_create = psp_v10_0_ring_create,
460 .ring_stop = psp_v10_0_ring_stop, 370 .ring_stop = psp_v10_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0c6e7f9b143f..f71384be1f97 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -40,60 +40,6 @@ MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
40/* address block */ 40/* address block */
41#define smnMP1_FIRMWARE_FLAGS 0x3010024 41#define smnMP1_FIRMWARE_FLAGS 0x3010024
42 42
43static int
44psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
45{
46 switch (ucode->ucode_id) {
47 case AMDGPU_UCODE_ID_SDMA0:
48 *type = GFX_FW_TYPE_SDMA0;
49 break;
50 case AMDGPU_UCODE_ID_SDMA1:
51 *type = GFX_FW_TYPE_SDMA1;
52 break;
53 case AMDGPU_UCODE_ID_CP_CE:
54 *type = GFX_FW_TYPE_CP_CE;
55 break;
56 case AMDGPU_UCODE_ID_CP_PFP:
57 *type = GFX_FW_TYPE_CP_PFP;
58 break;
59 case AMDGPU_UCODE_ID_CP_ME:
60 *type = GFX_FW_TYPE_CP_ME;
61 break;
62 case AMDGPU_UCODE_ID_CP_MEC1:
63 *type = GFX_FW_TYPE_CP_MEC;
64 break;
65 case AMDGPU_UCODE_ID_CP_MEC1_JT:
66 *type = GFX_FW_TYPE_CP_MEC_ME1;
67 break;
68 case AMDGPU_UCODE_ID_CP_MEC2:
69 *type = GFX_FW_TYPE_CP_MEC;
70 break;
71 case AMDGPU_UCODE_ID_CP_MEC2_JT:
72 *type = GFX_FW_TYPE_CP_MEC_ME2;
73 break;
74 case AMDGPU_UCODE_ID_RLC_G:
75 *type = GFX_FW_TYPE_RLC_G;
76 break;
77 case AMDGPU_UCODE_ID_SMC:
78 *type = GFX_FW_TYPE_SMU;
79 break;
80 case AMDGPU_UCODE_ID_UVD:
81 *type = GFX_FW_TYPE_UVD;
82 break;
83 case AMDGPU_UCODE_ID_VCE:
84 *type = GFX_FW_TYPE_VCE;
85 break;
86 case AMDGPU_UCODE_ID_UVD1:
87 *type = GFX_FW_TYPE_UVD1;
88 break;
89 case AMDGPU_UCODE_ID_MAXIMUM:
90 default:
91 return -EINVAL;
92 }
93
94 return 0;
95}
96
97static int psp_v11_0_init_microcode(struct psp_context *psp) 43static int psp_v11_0_init_microcode(struct psp_context *psp)
98{ 44{
99 struct amdgpu_device *adev = psp->adev; 45 struct amdgpu_device *adev = psp->adev;
@@ -267,26 +213,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
267 return ret; 213 return ret;
268} 214}
269 215
270static int psp_v11_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
271 struct psp_gfx_cmd_resp *cmd)
272{
273 int ret;
274 uint64_t fw_mem_mc_addr = ucode->mc_addr;
275
276 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
277
278 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
279 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
280 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
281 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
282
283 ret = psp_v11_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
284 if (ret)
285 DRM_ERROR("Unknown firmware type\n");
286
287 return ret;
288}
289
290static int psp_v11_0_ring_init(struct psp_context *psp, 216static int psp_v11_0_ring_init(struct psp_context *psp,
291 enum psp_ring_type ring_type) 217 enum psp_ring_type ring_type)
292{ 218{
@@ -753,7 +679,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
753 .init_microcode = psp_v11_0_init_microcode, 679 .init_microcode = psp_v11_0_init_microcode,
754 .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, 680 .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
755 .bootloader_load_sos = psp_v11_0_bootloader_load_sos, 681 .bootloader_load_sos = psp_v11_0_bootloader_load_sos,
756 .prep_cmd_buf = psp_v11_0_prep_cmd_buf,
757 .ring_init = psp_v11_0_ring_init, 682 .ring_init = psp_v11_0_ring_init,
758 .ring_create = psp_v11_0_ring_create, 683 .ring_create = psp_v11_0_ring_create,
759 .ring_stop = psp_v11_0_ring_stop, 684 .ring_stop = psp_v11_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 79694ff16969..c63de945c021 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -47,57 +47,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
47 47
48static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; 48static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
49 49
50static int
51psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
52{
53 switch(ucode->ucode_id) {
54 case AMDGPU_UCODE_ID_SDMA0:
55 *type = GFX_FW_TYPE_SDMA0;
56 break;
57 case AMDGPU_UCODE_ID_SDMA1:
58 *type = GFX_FW_TYPE_SDMA1;
59 break;
60 case AMDGPU_UCODE_ID_CP_CE:
61 *type = GFX_FW_TYPE_CP_CE;
62 break;
63 case AMDGPU_UCODE_ID_CP_PFP:
64 *type = GFX_FW_TYPE_CP_PFP;
65 break;
66 case AMDGPU_UCODE_ID_CP_ME:
67 *type = GFX_FW_TYPE_CP_ME;
68 break;
69 case AMDGPU_UCODE_ID_CP_MEC1:
70 *type = GFX_FW_TYPE_CP_MEC;
71 break;
72 case AMDGPU_UCODE_ID_CP_MEC1_JT:
73 *type = GFX_FW_TYPE_CP_MEC_ME1;
74 break;
75 case AMDGPU_UCODE_ID_CP_MEC2:
76 *type = GFX_FW_TYPE_CP_MEC;
77 break;
78 case AMDGPU_UCODE_ID_CP_MEC2_JT:
79 *type = GFX_FW_TYPE_CP_MEC_ME2;
80 break;
81 case AMDGPU_UCODE_ID_RLC_G:
82 *type = GFX_FW_TYPE_RLC_G;
83 break;
84 case AMDGPU_UCODE_ID_SMC:
85 *type = GFX_FW_TYPE_SMU;
86 break;
87 case AMDGPU_UCODE_ID_UVD:
88 *type = GFX_FW_TYPE_UVD;
89 break;
90 case AMDGPU_UCODE_ID_VCE:
91 *type = GFX_FW_TYPE_VCE;
92 break;
93 case AMDGPU_UCODE_ID_MAXIMUM:
94 default:
95 return -EINVAL;
96 }
97
98 return 0;
99}
100
101static int psp_v3_1_init_microcode(struct psp_context *psp) 50static int psp_v3_1_init_microcode(struct psp_context *psp)
102{ 51{
103 struct amdgpu_device *adev = psp->adev; 52 struct amdgpu_device *adev = psp->adev;
@@ -277,26 +226,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
277 return ret; 226 return ret;
278} 227}
279 228
280static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
281 struct psp_gfx_cmd_resp *cmd)
282{
283 int ret;
284 uint64_t fw_mem_mc_addr = ucode->mc_addr;
285
286 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
287
288 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
289 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
290 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
291 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
292
293 ret = psp_v3_1_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
294 if (ret)
295 DRM_ERROR("Unknown firmware type\n");
296
297 return ret;
298}
299
300static int psp_v3_1_ring_init(struct psp_context *psp, 229static int psp_v3_1_ring_init(struct psp_context *psp,
301 enum psp_ring_type ring_type) 230 enum psp_ring_type ring_type)
302{ 231{
@@ -615,7 +544,6 @@ static const struct psp_funcs psp_v3_1_funcs = {
615 .init_microcode = psp_v3_1_init_microcode, 544 .init_microcode = psp_v3_1_init_microcode,
616 .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv, 545 .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
617 .bootloader_load_sos = psp_v3_1_bootloader_load_sos, 546 .bootloader_load_sos = psp_v3_1_bootloader_load_sos,
618 .prep_cmd_buf = psp_v3_1_prep_cmd_buf,
619 .ring_init = psp_v3_1_ring_init, 547 .ring_init = psp_v3_1_ring_init,
620 .ring_create = psp_v3_1_ring_create, 548 .ring_create = psp_v3_1_ring_create,
621 .ring_stop = psp_v3_1_ring_stop, 549 .ring_stop = psp_v3_1_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 9f3cb2aec7c2..cca3552b36ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -247,7 +247,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
247static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, 247static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
248 struct amdgpu_job *job, 248 struct amdgpu_job *job,
249 struct amdgpu_ib *ib, 249 struct amdgpu_ib *ib,
250 bool ctx_switch) 250 uint32_t flags)
251{ 251{
252 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 252 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
253 253
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1bccc5fe2d9d..0ce8331baeb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -421,7 +421,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
421static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 421static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
422 struct amdgpu_job *job, 422 struct amdgpu_job *job,
423 struct amdgpu_ib *ib, 423 struct amdgpu_ib *ib,
424 bool ctx_switch) 424 uint32_t flags)
425{ 425{
426 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 426 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
427 427
@@ -1145,8 +1145,7 @@ static int sdma_v3_0_sw_init(void *handle)
1145 ring->ring_obj = NULL; 1145 ring->ring_obj = NULL;
1146 if (!amdgpu_sriov_vf(adev)) { 1146 if (!amdgpu_sriov_vf(adev)) {
1147 ring->use_doorbell = true; 1147 ring->use_doorbell = true;
1148 ring->doorbell_index = (i == 0) ? 1148 ring->doorbell_index = adev->doorbell_index.sdma_engine[i];
1149 adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
1150 } else { 1149 } else {
1151 ring->use_pollmem = true; 1150 ring->use_pollmem = true;
1152 } 1151 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd0bfe140ee0..127b85983e8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), 80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
96static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 95static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 96 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), 97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) 100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
101}; 101};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { 103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), 105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) 108 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
108}; 109};
@@ -499,7 +500,7 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
499static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, 500static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
500 struct amdgpu_job *job, 501 struct amdgpu_job *job,
501 struct amdgpu_ib *ib, 502 struct amdgpu_ib *ib,
502 bool ctx_switch) 503 uint32_t flags)
503{ 504{
504 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 505 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
505 506
@@ -833,8 +834,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
833 OFFSET, ring->doorbell_index); 834 OFFSET, ring->doorbell_index);
834 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell); 835 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
835 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset); 836 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
836 adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
837 ring->doorbell_index);
838 837
839 sdma_v4_0_ring_set_wptr(ring); 838 sdma_v4_0_ring_set_wptr(ring);
840 839
@@ -1521,9 +1520,7 @@ static int sdma_v4_0_sw_init(void *handle)
1521 ring->use_doorbell?"true":"false"); 1520 ring->use_doorbell?"true":"false");
1522 1521
1523 /* doorbell size is 2 dwords, get DWORD offset */ 1522 /* doorbell size is 2 dwords, get DWORD offset */
1524 ring->doorbell_index = (i == 0) ? 1523 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1525 (adev->doorbell_index.sdma_engine0 << 1)
1526 : (adev->doorbell_index.sdma_engine1 << 1);
1527 1524
1528 sprintf(ring->name, "sdma%d", i); 1525 sprintf(ring->name, "sdma%d", i);
1529 r = amdgpu_ring_init(adev, ring, 1024, 1526 r = amdgpu_ring_init(adev, ring, 1024,
@@ -1542,9 +1539,7 @@ static int sdma_v4_0_sw_init(void *handle)
1542 /* paging queue use same doorbell index/routing as gfx queue 1539 /* paging queue use same doorbell index/routing as gfx queue
1543 * with 0x400 (4096 dwords) offset on second doorbell page 1540 * with 0x400 (4096 dwords) offset on second doorbell page
1544 */ 1541 */
1545 ring->doorbell_index = (i == 0) ? 1542 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1546 (adev->doorbell_index.sdma_engine0 << 1)
1547 : (adev->doorbell_index.sdma_engine1 << 1);
1548 ring->doorbell_index += 0x400; 1543 ring->doorbell_index += 0x400;
1549 1544
1550 sprintf(ring->name, "page%d", i); 1545 sprintf(ring->name, "page%d", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f8408f88cd37..79c1a9bbcc21 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -47,6 +47,7 @@
47#include "dce/dce_6_0_d.h" 47#include "dce/dce_6_0_d.h"
48#include "uvd/uvd_4_0_d.h" 48#include "uvd/uvd_4_0_d.h"
49#include "bif/bif_3_0_d.h" 49#include "bif/bif_3_0_d.h"
50#include "bif/bif_3_0_sh_mask.h"
50 51
51static const u32 tahiti_golden_registers[] = 52static const u32 tahiti_golden_registers[] =
52{ 53{
@@ -1258,6 +1259,11 @@ static bool si_need_full_reset(struct amdgpu_device *adev)
1258 return true; 1259 return true;
1259} 1260}
1260 1261
1262static bool si_need_reset_on_init(struct amdgpu_device *adev)
1263{
1264 return false;
1265}
1266
1261static int si_get_pcie_lanes(struct amdgpu_device *adev) 1267static int si_get_pcie_lanes(struct amdgpu_device *adev)
1262{ 1268{
1263 u32 link_width_cntl; 1269 u32 link_width_cntl;
@@ -1323,6 +1329,52 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1323 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1329 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1324} 1330}
1325 1331
1332static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1333 uint64_t *count1)
1334{
1335 uint32_t perfctr = 0;
1336 uint64_t cnt0_of, cnt1_of;
1337 int tmp;
1338
1339 /* This reports 0 on APUs, so return to avoid writing/reading registers
1340 * that may or may not be different from their GPU counterparts
1341 */
1342 if (adev->flags & AMD_IS_APU)
1343 return;
1344
1345 /* Set the 2 events that we wish to watch, defined above */
1346 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1347 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1348 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1349
1350 /* Write to enable desired perf counters */
1351 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1352 /* Zero out and enable the perf counters
1353 * Write 0x5:
1354 * Bit 0 = Start all counters(1)
1355 * Bit 2 = Global counter reset enable(1)
1356 */
1357 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1358
1359 msleep(1000);
1360
1361 /* Load the shadow and disable the perf counters
1362 * Write 0x2:
1363 * Bit 0 = Stop counters(0)
1364 * Bit 1 = Load the shadow counters(1)
1365 */
1366 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1367
1368 /* Read register values to get any >32bit overflow */
1369 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1370 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1371 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1372
1373 /* Get the values and add the overflow */
1374 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1375 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1376}
1377
1326static const struct amdgpu_asic_funcs si_asic_funcs = 1378static const struct amdgpu_asic_funcs si_asic_funcs =
1327{ 1379{
1328 .read_disabled_bios = &si_read_disabled_bios, 1380 .read_disabled_bios = &si_read_disabled_bios,
@@ -1339,6 +1391,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1339 .flush_hdp = &si_flush_hdp, 1391 .flush_hdp = &si_flush_hdp,
1340 .invalidate_hdp = &si_invalidate_hdp, 1392 .invalidate_hdp = &si_invalidate_hdp,
1341 .need_full_reset = &si_need_full_reset, 1393 .need_full_reset = &si_need_full_reset,
1394 .get_pcie_usage = &si_get_pcie_usage,
1395 .need_reset_on_init = &si_need_reset_on_init,
1342}; 1396};
1343 1397
1344static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1398static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index b6e473134e19..f15f196684ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -63,7 +63,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
63static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, 63static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
64 struct amdgpu_job *job, 64 struct amdgpu_job *job,
65 struct amdgpu_ib *ib, 65 struct amdgpu_ib *ib,
66 bool ctx_switch) 66 uint32_t flags)
67{ 67{
68 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 68 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
69 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 69 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 2938fb9f17cc..8c50c9cab455 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -57,9 +57,9 @@ static void si_ih_disable_interrupts(struct amdgpu_device *adev)
57 57
58static int si_ih_irq_init(struct amdgpu_device *adev) 58static int si_ih_irq_init(struct amdgpu_device *adev)
59{ 59{
60 struct amdgpu_ih_ring *ih = &adev->irq.ih;
60 int rb_bufsz; 61 int rb_bufsz;
61 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 62 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
62 u64 wptr_off;
63 63
64 si_ih_disable_interrupts(adev); 64 si_ih_disable_interrupts(adev);
65 WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); 65 WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
@@ -76,9 +76,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
76 (rb_bufsz << 1) | 76 (rb_bufsz << 1) |
77 IH_WPTR_WRITEBACK_ENABLE; 77 IH_WPTR_WRITEBACK_ENABLE;
78 78
79 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 79 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
80 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 80 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
81 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
82 WREG32(IH_RB_CNTL, ih_rb_cntl); 81 WREG32(IH_RB_CNTL, ih_rb_cntl);
83 WREG32(IH_RB_RPTR, 0); 82 WREG32(IH_RB_RPTR, 0);
84 WREG32(IH_RB_WPTR, 0); 83 WREG32(IH_RB_WPTR, 0);
@@ -100,34 +99,36 @@ static void si_ih_irq_disable(struct amdgpu_device *adev)
100 mdelay(1); 99 mdelay(1);
101} 100}
102 101
103static u32 si_ih_get_wptr(struct amdgpu_device *adev) 102static u32 si_ih_get_wptr(struct amdgpu_device *adev,
103 struct amdgpu_ih_ring *ih)
104{ 104{
105 u32 wptr, tmp; 105 u32 wptr, tmp;
106 106
107 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 107 wptr = le32_to_cpu(*ih->wptr_cpu);
108 108
109 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { 109 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
110 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; 110 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
111 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 111 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
112 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 112 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
113 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 113 ih->rptr = (wptr + 16) & ih->ptr_mask;
114 tmp = RREG32(IH_RB_CNTL); 114 tmp = RREG32(IH_RB_CNTL);
115 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 115 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
116 WREG32(IH_RB_CNTL, tmp); 116 WREG32(IH_RB_CNTL, tmp);
117 } 117 }
118 return (wptr & adev->irq.ih.ptr_mask); 118 return (wptr & ih->ptr_mask);
119} 119}
120 120
121static void si_ih_decode_iv(struct amdgpu_device *adev, 121static void si_ih_decode_iv(struct amdgpu_device *adev,
122 struct amdgpu_iv_entry *entry) 122 struct amdgpu_ih_ring *ih,
123 struct amdgpu_iv_entry *entry)
123{ 124{
124 u32 ring_index = adev->irq.ih.rptr >> 2; 125 u32 ring_index = ih->rptr >> 2;
125 uint32_t dw[4]; 126 uint32_t dw[4];
126 127
127 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 128 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
128 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 129 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
129 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 130 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
130 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 131 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
131 132
132 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 133 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
133 entry->src_id = dw[0] & 0xff; 134 entry->src_id = dw[0] & 0xff;
@@ -135,12 +136,13 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
135 entry->ring_id = dw[2] & 0xff; 136 entry->ring_id = dw[2] & 0xff;
136 entry->vmid = (dw[2] >> 8) & 0xff; 137 entry->vmid = (dw[2] >> 8) & 0xff;
137 138
138 adev->irq.ih.rptr += 16; 139 ih->rptr += 16;
139} 140}
140 141
141static void si_ih_set_rptr(struct amdgpu_device *adev) 142static void si_ih_set_rptr(struct amdgpu_device *adev,
143 struct amdgpu_ih_ring *ih)
142{ 144{
143 WREG32(IH_RB_RPTR, adev->irq.ih.rptr); 145 WREG32(IH_RB_RPTR, ih->rptr);
144} 146}
145 147
146static int si_ih_early_init(void *handle) 148static int si_ih_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..62d272b4be19 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -43,6 +43,10 @@
43#include "hdp/hdp_4_0_sh_mask.h" 43#include "hdp/hdp_4_0_sh_mask.h"
44#include "smuio/smuio_9_0_offset.h" 44#include "smuio/smuio_9_0_offset.h"
45#include "smuio/smuio_9_0_sh_mask.h" 45#include "smuio/smuio_9_0_sh_mask.h"
46#include "nbio/nbio_7_0_default.h"
47#include "nbio/nbio_7_0_sh_mask.h"
48#include "nbio/nbio_7_0_smn.h"
49#include "mp/mp_9_0_offset.h"
46 50
47#include "soc15.h" 51#include "soc15.h"
48#include "soc15_common.h" 52#include "soc15_common.h"
@@ -385,14 +389,13 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
385 389
386} 390}
387 391
388 392static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
389static int soc15_asic_reset(struct amdgpu_device *adev)
390{ 393{
391 u32 i; 394 u32 i;
392 395
393 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 396 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
394 397
395 dev_info(adev->dev, "GPU reset\n"); 398 dev_info(adev->dev, "GPU mode1 reset\n");
396 399
397 /* disable BM */ 400 /* disable BM */
398 pci_clear_master(adev->pdev); 401 pci_clear_master(adev->pdev);
@@ -417,6 +420,63 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
417 return 0; 420 return 0;
418} 421}
419 422
423static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
424{
425 void *pp_handle = adev->powerplay.pp_handle;
426 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
427
428 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
429 *cap = false;
430 return -ENOENT;
431 }
432
433 return pp_funcs->get_asic_baco_capability(pp_handle, cap);
434}
435
436static int soc15_asic_baco_reset(struct amdgpu_device *adev)
437{
438 void *pp_handle = adev->powerplay.pp_handle;
439 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
440
441 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
442 return -ENOENT;
443
444 /* enter BACO state */
445 if (pp_funcs->set_asic_baco_state(pp_handle, 1))
446 return -EIO;
447
448 /* exit BACO state */
449 if (pp_funcs->set_asic_baco_state(pp_handle, 0))
450 return -EIO;
451
452 dev_info(adev->dev, "GPU BACO reset\n");
453
454 return 0;
455}
456
457static int soc15_asic_reset(struct amdgpu_device *adev)
458{
459 int ret;
460 bool baco_reset;
461
462 switch (adev->asic_type) {
463 case CHIP_VEGA10:
464 case CHIP_VEGA20:
465 soc15_asic_get_baco_capability(adev, &baco_reset);
466 break;
467 default:
468 baco_reset = false;
469 break;
470 }
471
472 if (baco_reset)
473 ret = soc15_asic_baco_reset(adev);
474 else
475 ret = soc15_asic_mode1_reset(adev);
476
477 return ret;
478}
479
420/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 480/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
421 u32 cntl_reg, u32 status_reg) 481 u32 cntl_reg, u32 status_reg)
422{ 482{
@@ -535,10 +595,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
535 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 595 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
536 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 596 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
537 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 597 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
538 if (adev->asic_type == CHIP_VEGA20) 598 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
539 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 599 if (adev->asic_type == CHIP_VEGA20)
540 else 600 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
541 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 601 else
602 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
603 }
542 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 604 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
543 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 605 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
544 if (!amdgpu_sriov_vf(adev)) 606 if (!amdgpu_sriov_vf(adev))
@@ -560,7 +622,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
560 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 622 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
561 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 623 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
562 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 624 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
563 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 625 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
626 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
564 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 627 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
565 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 628 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
566 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 629 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
@@ -601,6 +664,68 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
601 /* change this when we implement soft reset */ 664 /* change this when we implement soft reset */
602 return true; 665 return true;
603} 666}
667static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
668 uint64_t *count1)
669{
670 uint32_t perfctr = 0;
671 uint64_t cnt0_of, cnt1_of;
672 int tmp;
673
674 /* This reports 0 on APUs, so return to avoid writing/reading registers
675 * that may or may not be different from their GPU counterparts
676 */
677 if (adev->flags & AMD_IS_APU)
678 return;
679
680 /* Set the 2 events that we wish to watch, defined above */
681 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
682 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
683 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
684
685 /* Write to enable desired perf counters */
686 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
687 /* Zero out and enable the perf counters
688 * Write 0x5:
689 * Bit 0 = Start all counters(1)
690 * Bit 2 = Global counter reset enable(1)
691 */
692 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
693
694 msleep(1000);
695
696 /* Load the shadow and disable the perf counters
697 * Write 0x2:
698 * Bit 0 = Stop counters(0)
699 * Bit 1 = Load the shadow counters(1)
700 */
701 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
702
703 /* Read register values to get any >32bit overflow */
704 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
705 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
706 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
707
708 /* Get the values and add the overflow */
709 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
710 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
711}
712
713static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
714{
715 u32 sol_reg;
716
717 if (adev->flags & AMD_IS_APU)
718 return false;
719
720 /* Check sOS sign of life register to confirm sys driver and sOS
721 * are already been loaded.
722 */
723 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
724 if (sol_reg)
725 return true;
726
727 return false;
728}
604 729
605static const struct amdgpu_asic_funcs soc15_asic_funcs = 730static const struct amdgpu_asic_funcs soc15_asic_funcs =
606{ 731{
@@ -617,6 +742,8 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
617 .invalidate_hdp = &soc15_invalidate_hdp, 742 .invalidate_hdp = &soc15_invalidate_hdp,
618 .need_full_reset = &soc15_need_full_reset, 743 .need_full_reset = &soc15_need_full_reset,
619 .init_doorbell_index = &vega10_doorbell_index_init, 744 .init_doorbell_index = &vega10_doorbell_index_init,
745 .get_pcie_usage = &soc15_get_pcie_usage,
746 .need_reset_on_init = &soc15_need_reset_on_init,
620}; 747};
621 748
622static const struct amdgpu_asic_funcs vega20_asic_funcs = 749static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -634,6 +761,8 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
634 .invalidate_hdp = &soc15_invalidate_hdp, 761 .invalidate_hdp = &soc15_invalidate_hdp,
635 .need_full_reset = &soc15_need_full_reset, 762 .need_full_reset = &soc15_need_full_reset,
636 .init_doorbell_index = &vega20_doorbell_index_init, 763 .init_doorbell_index = &vega20_doorbell_index_init,
764 .get_pcie_usage = &soc15_get_pcie_usage,
765 .need_reset_on_init = &soc15_need_reset_on_init,
637}; 766};
638 767
639static int soc15_common_early_init(void *handle) 768static int soc15_common_early_init(void *handle)
@@ -840,6 +969,22 @@ static int soc15_common_sw_fini(void *handle)
840 return 0; 969 return 0;
841} 970}
842 971
972static void soc15_doorbell_range_init(struct amdgpu_device *adev)
973{
974 int i;
975 struct amdgpu_ring *ring;
976
977 for (i = 0; i < adev->sdma.num_instances; i++) {
978 ring = &adev->sdma.instance[i].ring;
979 adev->nbio_funcs->sdma_doorbell_range(adev, i,
980 ring->use_doorbell, ring->doorbell_index,
981 adev->doorbell_index.sdma_doorbell_range);
982 }
983
984 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
985 adev->irq.ih.doorbell_index);
986}
987
843static int soc15_common_hw_init(void *handle) 988static int soc15_common_hw_init(void *handle)
844{ 989{
845 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 990 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -852,6 +997,12 @@ static int soc15_common_hw_init(void *handle)
852 adev->nbio_funcs->init_registers(adev); 997 adev->nbio_funcs->init_registers(adev);
853 /* enable the doorbell aperture */ 998 /* enable the doorbell aperture */
854 soc15_enable_doorbell_aperture(adev, true); 999 soc15_enable_doorbell_aperture(adev, true);
1000 /* HW doorbell routing policy: doorbell writing not
1001 * in SDMA/IH/MM/ACV range will be routed to CP. So
1002 * we need to init SDMA/IH/MM/ACV doorbell range prior
1003 * to CP ip block init and ring test.
1004 */
1005 soc15_doorbell_range_init(adev);
855 1006
856 return 0; 1007 return 0;
857} 1008}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 15da06ddeb75..a20b711a6756 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,9 +99,9 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
99 */ 99 */
100static int tonga_ih_irq_init(struct amdgpu_device *adev) 100static int tonga_ih_irq_init(struct amdgpu_device *adev)
101{ 101{
102 int rb_bufsz;
103 u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; 102 u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
104 u64 wptr_off; 103 struct amdgpu_ih_ring *ih = &adev->irq.ih;
104 int rb_bufsz;
105 105
106 /* disable irqs */ 106 /* disable irqs */
107 tonga_ih_disable_interrupts(adev); 107 tonga_ih_disable_interrupts(adev);
@@ -118,10 +118,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
118 WREG32(mmINTERRUPT_CNTL, interrupt_cntl); 118 WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
119 119
120 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ 120 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
121 if (adev->irq.ih.use_bus_addr) 121 WREG32(mmIH_RB_BASE, ih->gpu_addr >> 8);
122 WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
123 else
124 WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
125 122
126 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); 123 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
127 ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 124 ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
@@ -136,12 +133,8 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
136 WREG32(mmIH_RB_CNTL, ih_rb_cntl); 133 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
137 134
138 /* set the writeback address whether it's enabled or not */ 135 /* set the writeback address whether it's enabled or not */
139 if (adev->irq.ih.use_bus_addr) 136 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
140 wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); 137 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
141 else
142 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
143 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
144 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
145 138
146 /* set rptr, wptr to 0 */ 139 /* set rptr, wptr to 0 */
147 WREG32(mmIH_RB_RPTR, 0); 140 WREG32(mmIH_RB_RPTR, 0);
@@ -193,14 +186,12 @@ static void tonga_ih_irq_disable(struct amdgpu_device *adev)
193 * Used by cz_irq_process(VI). 186 * Used by cz_irq_process(VI).
194 * Returns the value of the wptr. 187 * Returns the value of the wptr.
195 */ 188 */
196static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) 189static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
190 struct amdgpu_ih_ring *ih)
197{ 191{
198 u32 wptr, tmp; 192 u32 wptr, tmp;
199 193
200 if (adev->irq.ih.use_bus_addr) 194 wptr = le32_to_cpu(*ih->wptr_cpu);
201 wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
202 else
203 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
204 195
205 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 196 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
206 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); 197 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -209,13 +200,13 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
209 * this should allow us to catchup. 200 * this should allow us to catchup.
210 */ 201 */
211 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 202 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
212 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 203 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
213 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 204 ih->rptr = (wptr + 16) & ih->ptr_mask;
214 tmp = RREG32(mmIH_RB_CNTL); 205 tmp = RREG32(mmIH_RB_CNTL);
215 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 206 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
216 WREG32(mmIH_RB_CNTL, tmp); 207 WREG32(mmIH_RB_CNTL, tmp);
217 } 208 }
218 return (wptr & adev->irq.ih.ptr_mask); 209 return (wptr & ih->ptr_mask);
219} 210}
220 211
221/** 212/**
@@ -227,16 +218,17 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
227 * position and also advance the position. 218 * position and also advance the position.
228 */ 219 */
229static void tonga_ih_decode_iv(struct amdgpu_device *adev, 220static void tonga_ih_decode_iv(struct amdgpu_device *adev,
230 struct amdgpu_iv_entry *entry) 221 struct amdgpu_ih_ring *ih,
222 struct amdgpu_iv_entry *entry)
231{ 223{
232 /* wptr/rptr are in bytes! */ 224 /* wptr/rptr are in bytes! */
233 u32 ring_index = adev->irq.ih.rptr >> 2; 225 u32 ring_index = ih->rptr >> 2;
234 uint32_t dw[4]; 226 uint32_t dw[4];
235 227
236 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 228 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
237 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 229 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
238 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 230 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
239 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 231 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
240 232
241 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 233 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
242 entry->src_id = dw[0] & 0xff; 234 entry->src_id = dw[0] & 0xff;
@@ -246,7 +238,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
246 entry->pasid = (dw[2] >> 16) & 0xffff; 238 entry->pasid = (dw[2] >> 16) & 0xffff;
247 239
248 /* wptr/rptr are in bytes! */ 240 /* wptr/rptr are in bytes! */
249 adev->irq.ih.rptr += 16; 241 ih->rptr += 16;
250} 242}
251 243
252/** 244/**
@@ -256,17 +248,15 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
256 * 248 *
257 * Set the IH ring buffer rptr. 249 * Set the IH ring buffer rptr.
258 */ 250 */
259static void tonga_ih_set_rptr(struct amdgpu_device *adev) 251static void tonga_ih_set_rptr(struct amdgpu_device *adev,
252 struct amdgpu_ih_ring *ih)
260{ 253{
261 if (adev->irq.ih.use_doorbell) { 254 if (ih->use_doorbell) {
262 /* XXX check if swapping is necessary on BE */ 255 /* XXX check if swapping is necessary on BE */
263 if (adev->irq.ih.use_bus_addr) 256 *ih->rptr_cpu = ih->rptr;
264 adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 257 WDOORBELL32(ih->doorbell_index, ih->rptr);
265 else
266 adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
267 WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
268 } else { 258 } else {
269 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 259 WREG32(mmIH_RB_RPTR, ih->rptr);
270 } 260 }
271} 261}
272 262
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index d69c8f6daaf8..c4fb58667fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -511,7 +511,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
511static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 511static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
512 struct amdgpu_job *job, 512 struct amdgpu_job *job,
513 struct amdgpu_ib *ib, 513 struct amdgpu_ib *ib,
514 bool ctx_switch) 514 uint32_t flags)
515{ 515{
516 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 516 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
517 amdgpu_ring_write(ring, ib->gpu_addr); 517 amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index ee8cd06ddc38..52bd8a654734 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -526,7 +526,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
526static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 526static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
527 struct amdgpu_job *job, 527 struct amdgpu_job *job,
528 struct amdgpu_ib *ib, 528 struct amdgpu_ib *ib,
529 bool ctx_switch) 529 uint32_t flags)
530{ 530{
531 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 531 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
532 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 532 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d4f4a66f8324..c9edddf9f88a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -977,7 +977,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
977static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 977static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
978 struct amdgpu_job *job, 978 struct amdgpu_job *job,
979 struct amdgpu_ib *ib, 979 struct amdgpu_ib *ib,
980 bool ctx_switch) 980 uint32_t flags)
981{ 981{
982 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 982 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
983 983
@@ -1003,7 +1003,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1003static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1003static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1004 struct amdgpu_job *job, 1004 struct amdgpu_job *job,
1005 struct amdgpu_ib *ib, 1005 struct amdgpu_ib *ib,
1006 bool ctx_switch) 1006 uint32_t flags)
1007{ 1007{
1008 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1008 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1009 1009
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index aef924026a28..dc461df48da0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1272,7 +1272,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1272static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 1272static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1273 struct amdgpu_job *job, 1273 struct amdgpu_job *job,
1274 struct amdgpu_ib *ib, 1274 struct amdgpu_ib *ib,
1275 bool ctx_switch) 1275 uint32_t flags)
1276{ 1276{
1277 struct amdgpu_device *adev = ring->adev; 1277 struct amdgpu_device *adev = ring->adev;
1278 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1278 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
@@ -1303,7 +1303,7 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1303static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1303static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1304 struct amdgpu_job *job, 1304 struct amdgpu_job *job,
1305 struct amdgpu_ib *ib, 1305 struct amdgpu_ib *ib,
1306 bool ctx_switch) 1306 uint32_t flags)
1307{ 1307{
1308 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1308 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1309 1309
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 2668effadd27..6ec65cf11112 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -834,7 +834,7 @@ out:
834static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 834static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
835 struct amdgpu_job *job, 835 struct amdgpu_job *job,
836 struct amdgpu_ib *ib, 836 struct amdgpu_ib *ib,
837 bool ctx_switch) 837 uint32_t flags)
838{ 838{
839 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 839 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
840 840
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 9fb34b7d8e03..aadc3e66ebd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -947,7 +947,7 @@ static int vce_v4_0_set_powergating_state(void *handle,
947#endif 947#endif
948 948
949static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, 949static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
950 struct amdgpu_ib *ib, bool ctx_switch) 950 struct amdgpu_ib *ib, uint32_t flags)
951{ 951{
952 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 952 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
953 953
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 89bb2fef90eb..3dbc51f9d3b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1371,7 +1371,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
1371static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, 1371static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1372 struct amdgpu_job *job, 1372 struct amdgpu_job *job,
1373 struct amdgpu_ib *ib, 1373 struct amdgpu_ib *ib,
1374 bool ctx_switch) 1374 uint32_t flags)
1375{ 1375{
1376 struct amdgpu_device *adev = ring->adev; 1376 struct amdgpu_device *adev = ring->adev;
1377 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1377 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
@@ -1531,7 +1531,7 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1531static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1531static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1532 struct amdgpu_job *job, 1532 struct amdgpu_job *job,
1533 struct amdgpu_ib *ib, 1533 struct amdgpu_ib *ib,
1534 bool ctx_switch) 1534 uint32_t flags)
1535{ 1535{
1536 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1536 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1537 1537
@@ -1736,7 +1736,7 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
1736static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, 1736static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1737 struct amdgpu_job *job, 1737 struct amdgpu_job *job,
1738 struct amdgpu_ib *ib, 1738 struct amdgpu_ib *ib,
1739 bool ctx_switch) 1739 uint32_t flags)
1740{ 1740{
1741 struct amdgpu_device *adev = ring->adev; 1741 struct amdgpu_device *adev = ring->adev;
1742 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1742 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 2c250b01a903..6d1f804277f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -50,6 +50,22 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
50 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); 50 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
51 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); 51 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
52 adev->irq.ih.enabled = true; 52 adev->irq.ih.enabled = true;
53
54 if (adev->irq.ih1.ring_size) {
55 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
56 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
57 RB_ENABLE, 1);
58 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
59 adev->irq.ih1.enabled = true;
60 }
61
62 if (adev->irq.ih2.ring_size) {
63 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
64 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
65 RB_ENABLE, 1);
66 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
67 adev->irq.ih2.enabled = true;
68 }
53} 69}
54 70
55/** 71/**
@@ -71,6 +87,53 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
71 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); 87 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
72 adev->irq.ih.enabled = false; 88 adev->irq.ih.enabled = false;
73 adev->irq.ih.rptr = 0; 89 adev->irq.ih.rptr = 0;
90
91 if (adev->irq.ih1.ring_size) {
92 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
93 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
94 RB_ENABLE, 0);
95 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
96 /* set rptr, wptr to 0 */
97 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
98 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
99 adev->irq.ih1.enabled = false;
100 adev->irq.ih1.rptr = 0;
101 }
102
103 if (adev->irq.ih2.ring_size) {
104 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
106 RB_ENABLE, 0);
107 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
108 /* set rptr, wptr to 0 */
109 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
110 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
111 adev->irq.ih2.enabled = false;
112 adev->irq.ih2.rptr = 0;
113 }
114}
115
116static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
117{
118 int rb_bufsz = order_base_2(ih->ring_size / 4);
119
120 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
121 MC_SPACE, ih->use_bus_addr ? 1 : 4);
122 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
123 WPTR_OVERFLOW_CLEAR, 1);
124 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
125 WPTR_OVERFLOW_ENABLE, 1);
126 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
127 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
128 * value is written to memory
129 */
130 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
131 WPTR_WRITEBACK_ENABLE, 1);
132 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
133 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
134 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
135
136 return ih_rb_cntl;
74} 137}
75 138
76/** 139/**
@@ -86,50 +149,32 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
86 */ 149 */
87static int vega10_ih_irq_init(struct amdgpu_device *adev) 150static int vega10_ih_irq_init(struct amdgpu_device *adev)
88{ 151{
152 struct amdgpu_ih_ring *ih;
89 int ret = 0; 153 int ret = 0;
90 int rb_bufsz;
91 u32 ih_rb_cntl, ih_doorbell_rtpr; 154 u32 ih_rb_cntl, ih_doorbell_rtpr;
92 u32 tmp; 155 u32 tmp;
93 u64 wptr_off;
94 156
95 /* disable irqs */ 157 /* disable irqs */
96 vega10_ih_disable_interrupts(adev); 158 vega10_ih_disable_interrupts(adev);
97 159
98 adev->nbio_funcs->ih_control(adev); 160 adev->nbio_funcs->ih_control(adev);
99 161
100 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); 162 ih = &adev->irq.ih;
101 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ 163 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
102 if (adev->irq.ih.use_bus_addr) { 164 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
103 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); 165 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
104 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
106 } else {
107 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
108 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
109 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
110 }
111 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
112 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
113 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
114 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
115 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
116 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
117 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
118 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
119 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
120
121 if (adev->irq.msi_enabled)
122 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
123 166
167 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
168 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
169 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
170 !!adev->irq.msi_enabled);
124 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); 171 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
125 172
126 /* set the writeback address whether it's enabled or not */ 173 /* set the writeback address whether it's enabled or not */
127 if (adev->irq.ih.use_bus_addr) 174 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
128 wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); 175 lower_32_bits(ih->wptr_addr));
129 else 176 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
130 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 177 upper_32_bits(ih->wptr_addr) & 0xFFFF);
131 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
132 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
133 178
134 /* set rptr, wptr to 0 */ 179 /* set rptr, wptr to 0 */
135 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); 180 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
@@ -137,17 +182,48 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
137 182
138 ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR); 183 ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
139 if (adev->irq.ih.use_doorbell) { 184 if (adev->irq.ih.use_doorbell) {
140 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, 185 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
141 OFFSET, adev->irq.ih.doorbell_index); 186 IH_DOORBELL_RPTR, OFFSET,
142 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, 187 adev->irq.ih.doorbell_index);
188 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
189 IH_DOORBELL_RPTR,
143 ENABLE, 1); 190 ENABLE, 1);
144 } else { 191 } else {
145 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, 192 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
193 IH_DOORBELL_RPTR,
146 ENABLE, 0); 194 ENABLE, 0);
147 } 195 }
148 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); 196 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
149 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 197
150 adev->irq.ih.doorbell_index); 198 ih = &adev->irq.ih1;
199 if (ih->ring_size) {
200 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
201 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
202 (ih->gpu_addr >> 40) & 0xff);
203
204 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
205 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
206 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
207
208 /* set rptr, wptr to 0 */
209 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
210 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
211 }
212
213 ih = &adev->irq.ih2;
214 if (ih->ring_size) {
215 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
216 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
217 (ih->gpu_addr >> 40) & 0xff);
218
219 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
220 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
221 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
222
223 /* set rptr, wptr to 0 */
224 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
225 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
226 }
151 227
152 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); 228 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
153 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, 229 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -191,32 +267,58 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
191 * ring buffer overflow and deal with it. 267 * ring buffer overflow and deal with it.
192 * Returns the value of the wptr. 268 * Returns the value of the wptr.
193 */ 269 */
194static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) 270static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
271 struct amdgpu_ih_ring *ih)
195{ 272{
196 u32 wptr, tmp; 273 u32 wptr, reg, tmp;
197 274
198 if (adev->irq.ih.use_bus_addr) 275 wptr = le32_to_cpu(*ih->wptr_cpu);
199 wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); 276
277 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
278 goto out;
279
280 /* Double check that the overflow wasn't already cleared. */
281
282 if (ih == &adev->irq.ih)
283 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
284 else if (ih == &adev->irq.ih1)
285 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
286 else if (ih == &adev->irq.ih2)
287 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
200 else 288 else
201 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 289 BUG();
202 290
203 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 291 wptr = RREG32_NO_KIQ(reg);
204 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); 292 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
205 293 goto out;
206 /* When a ring buffer overflow happen start parsing interrupt 294
207 * from the last not overwritten vector (wptr + 32). Hopefully 295 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
208 * this should allow us to catchup. 296
209 */ 297 /* When a ring buffer overflow happen start parsing interrupt
210 tmp = (wptr + 32) & adev->irq.ih.ptr_mask; 298 * from the last not overwritten vector (wptr + 32). Hopefully
211 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 299 * this should allow us to catchup.
212 wptr, adev->irq.ih.rptr, tmp); 300 */
213 adev->irq.ih.rptr = tmp; 301 tmp = (wptr + 32) & ih->ptr_mask;
214 302 dev_warn(adev->dev, "IH ring buffer overflow "
215 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); 303 "(0x%08X, 0x%08X, 0x%08X)\n",
216 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 304 wptr, ih->rptr, tmp);
217 WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); 305 ih->rptr = tmp;
218 } 306
219 return (wptr & adev->irq.ih.ptr_mask); 307 if (ih == &adev->irq.ih)
308 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
309 else if (ih == &adev->irq.ih1)
310 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
311 else if (ih == &adev->irq.ih2)
312 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
313 else
314 BUG();
315
316 tmp = RREG32_NO_KIQ(reg);
317 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
318 WREG32_NO_KIQ(reg, tmp);
319
320out:
321 return (wptr & ih->ptr_mask);
220} 322}
221 323
222/** 324/**
@@ -228,20 +330,21 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
228 * position and also advance the position. 330 * position and also advance the position.
229 */ 331 */
230static void vega10_ih_decode_iv(struct amdgpu_device *adev, 332static void vega10_ih_decode_iv(struct amdgpu_device *adev,
231 struct amdgpu_iv_entry *entry) 333 struct amdgpu_ih_ring *ih,
334 struct amdgpu_iv_entry *entry)
232{ 335{
233 /* wptr/rptr are in bytes! */ 336 /* wptr/rptr are in bytes! */
234 u32 ring_index = adev->irq.ih.rptr >> 2; 337 u32 ring_index = ih->rptr >> 2;
235 uint32_t dw[8]; 338 uint32_t dw[8];
236 339
237 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 340 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
238 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 341 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
239 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 342 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
240 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 343 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
241 dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); 344 dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
242 dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); 345 dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
243 dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]); 346 dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
244 dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]); 347 dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
245 348
246 entry->client_id = dw[0] & 0xff; 349 entry->client_id = dw[0] & 0xff;
247 entry->src_id = (dw[0] >> 8) & 0xff; 350 entry->src_id = (dw[0] >> 8) & 0xff;
@@ -257,9 +360,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
257 entry->src_data[2] = dw[6]; 360 entry->src_data[2] = dw[6];
258 entry->src_data[3] = dw[7]; 361 entry->src_data[3] = dw[7];
259 362
260
261 /* wptr/rptr are in bytes! */ 363 /* wptr/rptr are in bytes! */
262 adev->irq.ih.rptr += 32; 364 ih->rptr += 32;
263} 365}
264 366
265/** 367/**
@@ -269,37 +371,95 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
269 * 371 *
270 * Set the IH ring buffer rptr. 372 * Set the IH ring buffer rptr.
271 */ 373 */
272static void vega10_ih_set_rptr(struct amdgpu_device *adev) 374static void vega10_ih_set_rptr(struct amdgpu_device *adev,
375 struct amdgpu_ih_ring *ih)
273{ 376{
274 if (adev->irq.ih.use_doorbell) { 377 if (ih->use_doorbell) {
275 /* XXX check if swapping is necessary on BE */ 378 /* XXX check if swapping is necessary on BE */
276 if (adev->irq.ih.use_bus_addr) 379 *ih->rptr_cpu = ih->rptr;
277 adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 380 WDOORBELL32(ih->doorbell_index, ih->rptr);
278 else 381 } else if (ih == &adev->irq.ih) {
279 adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 382 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
280 WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); 383 } else if (ih == &adev->irq.ih1) {
281 } else { 384 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
282 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr); 385 } else if (ih == &adev->irq.ih2) {
386 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
283 } 387 }
284} 388}
285 389
390/**
391 * vega10_ih_self_irq - dispatch work for ring 1 and 2
392 *
393 * @adev: amdgpu_device pointer
394 * @source: irq source
395 * @entry: IV with WPTR update
396 *
397 * Update the WPTR from the IV and schedule work to handle the entries.
398 */
399static int vega10_ih_self_irq(struct amdgpu_device *adev,
400 struct amdgpu_irq_src *source,
401 struct amdgpu_iv_entry *entry)
402{
403 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
404
405 switch (entry->ring_id) {
406 case 1:
407 *adev->irq.ih1.wptr_cpu = wptr;
408 schedule_work(&adev->irq.ih1_work);
409 break;
410 case 2:
411 *adev->irq.ih2.wptr_cpu = wptr;
412 schedule_work(&adev->irq.ih2_work);
413 break;
414 default: break;
415 }
416 return 0;
417}
418
419static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
420 .process = vega10_ih_self_irq,
421};
422
423static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
424{
425 adev->irq.self_irq.num_types = 0;
426 adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
427}
428
286static int vega10_ih_early_init(void *handle) 429static int vega10_ih_early_init(void *handle)
287{ 430{
288 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 431 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
289 432
290 vega10_ih_set_interrupt_funcs(adev); 433 vega10_ih_set_interrupt_funcs(adev);
434 vega10_ih_set_self_irq_funcs(adev);
291 return 0; 435 return 0;
292} 436}
293 437
294static int vega10_ih_sw_init(void *handle) 438static int vega10_ih_sw_init(void *handle)
295{ 439{
296 int r;
297 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
441 int r;
442
443 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
444 &adev->irq.self_irq);
445 if (r)
446 return r;
298 447
299 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); 448 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
300 if (r) 449 if (r)
301 return r; 450 return r;
302 451
452 if (adev->asic_type == CHIP_VEGA10) {
453 r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
454 if (r)
455 return r;
456
457 r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
458 if (r)
459 return r;
460 }
461
462 /* TODO add doorbell for IH1 & IH2 as well */
303 adev->irq.ih.use_doorbell = true; 463 adev->irq.ih.use_doorbell = true;
304 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; 464 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
305 465
@@ -313,6 +473,8 @@ static int vega10_ih_sw_fini(void *handle)
313 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314 474
315 amdgpu_irq_fini(adev); 475 amdgpu_irq_fini(adev);
476 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
477 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
316 amdgpu_ih_ring_fini(adev, &adev->irq.ih); 478 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
317 479
318 return 0; 480 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 422674bb3cdf..4b5d60ea3e78 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -70,8 +70,8 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
70 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START; 70 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
71 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END; 71 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
72 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0; 72 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
73 adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0; 73 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL64_sDMA_ENGINE0;
74 adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1; 74 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL64_sDMA_ENGINE1;
75 adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH; 75 adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
76 adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1; 76 adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
77 adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3; 77 adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
@@ -83,5 +83,6 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
83 adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7; 83 adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
84 /* In unit of dword doorbell */ 84 /* In unit of dword doorbell */
85 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1; 85 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
86 adev->doorbell_index.sdma_doorbell_range = 4;
86} 87}
87 88
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index edce413fda9a..53716c593b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -68,14 +68,14 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
68 adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START; 68 adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
69 adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END; 69 adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
70 adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0; 70 adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
71 adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0; 71 adev->doorbell_index.sdma_engine[0] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
72 adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1; 72 adev->doorbell_index.sdma_engine[1] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
73 adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2; 73 adev->doorbell_index.sdma_engine[2] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
74 adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3; 74 adev->doorbell_index.sdma_engine[3] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
75 adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4; 75 adev->doorbell_index.sdma_engine[4] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
76 adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5; 76 adev->doorbell_index.sdma_engine[5] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
77 adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6; 77 adev->doorbell_index.sdma_engine[6] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
78 adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7; 78 adev->doorbell_index.sdma_engine[7] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
79 adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH; 79 adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
80 adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1; 80 adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
81 adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3; 81 adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
@@ -86,5 +86,6 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
86 adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5; 86 adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
87 adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7; 87 adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
88 adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1; 88 adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
89 adev->doorbell_index.sdma_doorbell_range = 20;
89} 90}
90 91
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 77e367459101..5e5b42a0744a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -941,6 +941,69 @@ static bool vi_need_full_reset(struct amdgpu_device *adev)
941 } 941 }
942} 942}
943 943
944static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
945 uint64_t *count1)
946{
947 uint32_t perfctr = 0;
948 uint64_t cnt0_of, cnt1_of;
949 int tmp;
950
951 /* This reports 0 on APUs, so return to avoid writing/reading registers
952 * that may or may not be different from their GPU counterparts
953 */
954 if (adev->flags & AMD_IS_APU)
955 return;
956
957 /* Set the 2 events that we wish to watch, defined above */
958 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
959 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
960 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
961
962 /* Write to enable desired perf counters */
963 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
964 /* Zero out and enable the perf counters
965 * Write 0x5:
966 * Bit 0 = Start all counters(1)
967 * Bit 2 = Global counter reset enable(1)
968 */
969 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
970
971 msleep(1000);
972
973 /* Load the shadow and disable the perf counters
974 * Write 0x2:
975 * Bit 0 = Stop counters(0)
976 * Bit 1 = Load the shadow counters(1)
977 */
978 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
979
980 /* Read register values to get any >32bit overflow */
981 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
982 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
983 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
984
985 /* Get the values and add the overflow */
986 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
987 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
988}
989
990static bool vi_need_reset_on_init(struct amdgpu_device *adev)
991{
992 u32 clock_cntl, pc;
993
994 if (adev->flags & AMD_IS_APU)
995 return false;
996
997 /* check if the SMC is already running */
998 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
999 pc = RREG32_SMC(ixSMC_PC_C);
1000 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1001 (0x20100 <= pc))
1002 return true;
1003
1004 return false;
1005}
1006
944static const struct amdgpu_asic_funcs vi_asic_funcs = 1007static const struct amdgpu_asic_funcs vi_asic_funcs =
945{ 1008{
946 .read_disabled_bios = &vi_read_disabled_bios, 1009 .read_disabled_bios = &vi_read_disabled_bios,
@@ -956,6 +1019,8 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
956 .invalidate_hdp = &vi_invalidate_hdp, 1019 .invalidate_hdp = &vi_invalidate_hdp,
957 .need_full_reset = &vi_need_full_reset, 1020 .need_full_reset = &vi_need_full_reset,
958 .init_doorbell_index = &legacy_doorbell_index_init, 1021 .init_doorbell_index = &legacy_doorbell_index_init,
1022 .get_pcie_usage = &vi_get_pcie_usage,
1023 .need_reset_on_init = &vi_need_reset_on_init,
959}; 1024};
960 1025
961#define CZ_REV_BRISTOL(rev) \ 1026#define CZ_REV_BRISTOL(rev) \
@@ -1726,8 +1791,8 @@ void legacy_doorbell_index_init(struct amdgpu_device *adev)
1726 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1791 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1727 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1792 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1728 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1793 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1729 adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0; 1794 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1730 adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1; 1795 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1731 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1796 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1732 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1797 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1733} 1798}
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
4 4
5config HSA_AMD 5config HSA_AMD
6 bool "HSA kernel driver for AMD GPU devices" 6 bool "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && (X86_64 || ARM64)
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2 if X86_64
9 select MMU_NOTIFIER 9 select MMU_NOTIFIER
10 help 10 help
11 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..5d85ff341385 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
863 return 0; 863 return 0;
864} 864}
865 865
866#if CONFIG_X86_64
866static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
867 uint32_t *num_entries, 868 uint32_t *num_entries,
868 struct crat_subtype_iolink *sub_type_hdr) 869 struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
905 906
906 return 0; 907 return 0;
907} 908}
909#endif
908 910
909/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU 911/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
910 * 912 *
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
920 struct crat_subtype_generic *sub_type_hdr; 922 struct crat_subtype_generic *sub_type_hdr;
921 int avail_size = *size; 923 int avail_size = *size;
922 int numa_node_id; 924 int numa_node_id;
925#ifdef CONFIG_X86_64
923 uint32_t entries = 0; 926 uint32_t entries = 0;
927#endif
924 int ret = 0; 928 int ret = 0;
925 929
926 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) 930 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
982 sub_type_hdr->length); 986 sub_type_hdr->length);
983 987
984 /* Fill in Subtype: IO Link */ 988 /* Fill in Subtype: IO Link */
989#ifdef CONFIG_X86_64
985 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, 990 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
986 &entries, 991 &entries,
987 (struct crat_subtype_iolink *)sub_type_hdr); 992 (struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
992 997
993 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 998 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
994 sub_type_hdr->length * entries); 999 sub_type_hdr->length * entries);
1000#else
1001 pr_info("IO link not available for non x86 platforms\n");
1002#endif
995 1003
996 crat_table->num_domains++; 1004 crat_table->num_domains++;
997 } 1005 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 8018163414ff..932007eb9168 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -23,22 +23,7 @@
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include "kfd_priv.h" 25#include "kfd_priv.h"
26 26#include "amdgpu_amdkfd.h"
27static const struct kgd2kfd_calls kgd2kfd = {
28 .exit = kgd2kfd_exit,
29 .probe = kgd2kfd_probe,
30 .device_init = kgd2kfd_device_init,
31 .device_exit = kgd2kfd_device_exit,
32 .interrupt = kgd2kfd_interrupt,
33 .suspend = kgd2kfd_suspend,
34 .resume = kgd2kfd_resume,
35 .quiesce_mm = kgd2kfd_quiesce_mm,
36 .resume_mm = kgd2kfd_resume_mm,
37 .schedule_evict_and_restore_process =
38 kgd2kfd_schedule_evict_and_restore_process,
39 .pre_reset = kgd2kfd_pre_reset,
40 .post_reset = kgd2kfd_post_reset,
41};
42 27
43static int kfd_init(void) 28static int kfd_init(void)
44{ 29{
@@ -91,20 +76,10 @@ static void kfd_exit(void)
91 kfd_chardev_exit(); 76 kfd_chardev_exit();
92} 77}
93 78
94int kgd2kfd_init(unsigned int interface_version, 79int kgd2kfd_init()
95 const struct kgd2kfd_calls **g2f)
96{ 80{
97 int err; 81 return kfd_init();
98
99 err = kfd_init();
100 if (err)
101 return err;
102
103 *g2f = &kgd2kfd;
104
105 return 0;
106} 82}
107EXPORT_SYMBOL(kgd2kfd_init);
108 83
109void kgd2kfd_exit(void) 84void kgd2kfd_exit(void)
110{ 85{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0689d4ccbbc0..12b66330fc6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -266,14 +266,6 @@ struct kfd_dev {
266 bool pci_atomic_requested; 266 bool pci_atomic_requested;
267}; 267};
268 268
269/* KGD2KFD callbacks */
270void kgd2kfd_exit(void);
271struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
272 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
273bool kgd2kfd_device_init(struct kfd_dev *kfd,
274 const struct kgd2kfd_shared_resources *gpu_resources);
275void kgd2kfd_device_exit(struct kfd_dev *kfd);
276
277enum kfd_mempool { 269enum kfd_mempool {
278 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 270 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
279 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 271 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -541,11 +533,6 @@ struct qcm_process_device {
541/* Approx. time before evicting the process again */ 533/* Approx. time before evicting the process again */
542#define PROCESS_ACTIVE_TIME_MS 10 534#define PROCESS_ACTIVE_TIME_MS 10
543 535
544int kgd2kfd_quiesce_mm(struct mm_struct *mm);
545int kgd2kfd_resume_mm(struct mm_struct *mm);
546int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
547 struct dma_fence *fence);
548
549/* 8 byte handle containing GPU ID in the most significant 4 bytes and 536/* 8 byte handle containing GPU ID in the most significant 4 bytes and
550 * idr_handle in the least significant 4 bytes 537 * idr_handle in the least significant 4 bytes
551 */ 538 */
@@ -800,20 +787,11 @@ int kfd_numa_node_to_apic_id(int numa_node_id);
800/* Interrupts */ 787/* Interrupts */
801int kfd_interrupt_init(struct kfd_dev *dev); 788int kfd_interrupt_init(struct kfd_dev *dev);
802void kfd_interrupt_exit(struct kfd_dev *dev); 789void kfd_interrupt_exit(struct kfd_dev *dev);
803void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
804bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); 790bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
805bool interrupt_is_wanted(struct kfd_dev *dev, 791bool interrupt_is_wanted(struct kfd_dev *dev,
806 const uint32_t *ih_ring_entry, 792 const uint32_t *ih_ring_entry,
807 uint32_t *patched_ihre, bool *flag); 793 uint32_t *patched_ihre, bool *flag);
808 794
809/* Power Management */
810void kgd2kfd_suspend(struct kfd_dev *kfd);
811int kgd2kfd_resume(struct kfd_dev *kfd);
812
813/* GPU reset */
814int kgd2kfd_pre_reset(struct kfd_dev *kfd);
815int kgd2kfd_post_reset(struct kfd_dev *kfd);
816
817/* amdkfd Apertures */ 795/* amdkfd Apertures */
818int kfd_init_apertures(struct kfd_process *process); 796int kfd_init_apertures(struct kfd_process *process);
819 797
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1093 * the GPU device is not already present in the topology device 1093 * the GPU device is not already present in the topology device
1094 * list then return NULL. This means a new topology device has to 1094 * list then return NULL. This means a new topology device has to
1095 * be created for this GPU. 1095 * be created for this GPU.
1096 * TODO: Rather than assiging @gpu to first topology device withtout
1097 * gpu attached, it will better to have more stringent check.
1098 */ 1096 */
1099static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) 1097static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1100{ 1098{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1102 struct kfd_topology_device *out_dev = NULL; 1100 struct kfd_topology_device *out_dev = NULL;
1103 1101
1104 down_write(&topology_lock); 1102 down_write(&topology_lock);
1105 list_for_each_entry(dev, &topology_device_list, list) 1103 list_for_each_entry(dev, &topology_device_list, list) {
1104 /* Discrete GPUs need their own topology device list
1105 * entries. Don't assign them to CPU/APU nodes.
1106 */
1107 if (!gpu->device_info->needs_iommu_device &&
1108 dev->node_props.cpu_cores_count)
1109 continue;
1110
1106 if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1111 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1107 dev->gpu = gpu; 1112 dev->gpu = gpu;
1108 out_dev = dev; 1113 out_dev = dev;
1109 break; 1114 break;
1110 } 1115 }
1116 }
1111 up_write(&topology_lock); 1117 up_write(&topology_lock);
1112 return out_dev; 1118 return out_dev;
1113} 1119}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1392 1398
1393static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) 1399static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1394{ 1400{
1395 const struct cpuinfo_x86 *cpuinfo;
1396 int first_cpu_of_numa_node; 1401 int first_cpu_of_numa_node;
1397 1402
1398 if (!cpumask || cpumask == cpu_none_mask) 1403 if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1400 first_cpu_of_numa_node = cpumask_first(cpumask); 1405 first_cpu_of_numa_node = cpumask_first(cpumask);
1401 if (first_cpu_of_numa_node >= nr_cpu_ids) 1406 if (first_cpu_of_numa_node >= nr_cpu_ids)
1402 return -1; 1407 return -1;
1403 cpuinfo = &cpu_data(first_cpu_of_numa_node); 1408#ifdef CONFIG_X86_64
1404 1409 return cpu_data(first_cpu_of_numa_node).apicid;
1405 return cpuinfo->apicid; 1410#else
1411 return first_cpu_of_numa_node;
1412#endif
1406} 1413}
1407 1414
1408/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor 1415/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8a626d16e8e3..407f733a47ea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1760,7 +1760,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1760 + caps.min_input_signal * 0x101; 1760 + caps.min_input_signal * 0x101;
1761 1761
1762 if (dc_link_set_backlight_level(dm->backlight_link, 1762 if (dc_link_set_backlight_level(dm->backlight_link,
1763 brightness, 0, 0)) 1763 brightness, 0))
1764 return 0; 1764 return 0;
1765 else 1765 else
1766 return 1; 1766 return 1;
@@ -2284,6 +2284,68 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2284 return r; 2284 return r;
2285} 2285}
2286 2286
2287static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2288{
2289 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2290
2291 return offset ? (address + offset * 256) : 0;
2292}
2293
2294static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
2295 const struct amdgpu_framebuffer *afb,
2296 struct dc_plane_state *plane_state,
2297 uint64_t info)
2298{
2299 struct dc *dc = adev->dm.dc;
2300 struct dc_dcc_surface_param input = {0};
2301 struct dc_surface_dcc_cap output = {0};
2302 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2303 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2304 uint64_t dcc_address;
2305
2306 if (!offset)
2307 return false;
2308
2309 if (!dc->cap_funcs.get_dcc_compression_cap)
2310 return false;
2311
2312 input.format = plane_state->format;
2313 input.surface_size.width =
2314 plane_state->plane_size.grph.surface_size.width;
2315 input.surface_size.height =
2316 plane_state->plane_size.grph.surface_size.height;
2317 input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
2318
2319 if (plane_state->rotation == ROTATION_ANGLE_0 ||
2320 plane_state->rotation == ROTATION_ANGLE_180)
2321 input.scan = SCAN_DIRECTION_HORIZONTAL;
2322 else if (plane_state->rotation == ROTATION_ANGLE_90 ||
2323 plane_state->rotation == ROTATION_ANGLE_270)
2324 input.scan = SCAN_DIRECTION_VERTICAL;
2325
2326 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2327 return false;
2328
2329 if (!output.capable)
2330 return false;
2331
2332 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2333 return false;
2334
2335 plane_state->dcc.enable = 1;
2336 plane_state->dcc.grph.meta_pitch =
2337 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2338 plane_state->dcc.grph.independent_64b_blks = i64b;
2339
2340 dcc_address = get_dcc_address(afb->address, info);
2341 plane_state->address.grph.meta_addr.low_part =
2342 lower_32_bits(dcc_address);
2343 plane_state->address.grph.meta_addr.high_part =
2344 upper_32_bits(dcc_address);
2345
2346 return true;
2347}
2348
2287static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, 2349static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2288 struct dc_plane_state *plane_state, 2350 struct dc_plane_state *plane_state,
2289 const struct amdgpu_framebuffer *amdgpu_fb) 2351 const struct amdgpu_framebuffer *amdgpu_fb)
@@ -2336,6 +2398,10 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2336 return -EINVAL; 2398 return -EINVAL;
2337 } 2399 }
2338 2400
2401 memset(&plane_state->address, 0, sizeof(plane_state->address));
2402 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2403 memset(&plane_state->dcc, 0, sizeof(plane_state->dcc));
2404
2339 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 2405 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2340 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS; 2406 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2341 plane_state->plane_size.grph.surface_size.x = 0; 2407 plane_state->plane_size.grph.surface_size.x = 0;
@@ -2367,8 +2433,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2367 plane_state->color_space = COLOR_SPACE_YCBCR709; 2433 plane_state->color_space = COLOR_SPACE_YCBCR709;
2368 } 2434 }
2369 2435
2370 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2371
2372 /* Fill GFX8 params */ 2436 /* Fill GFX8 params */
2373 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 2437 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2374 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 2438 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
@@ -2417,6 +2481,9 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2417 plane_state->tiling_info.gfx9.swizzle = 2481 plane_state->tiling_info.gfx9.swizzle =
2418 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); 2482 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2419 plane_state->tiling_info.gfx9.shaderEnable = 1; 2483 plane_state->tiling_info.gfx9.shaderEnable = 1;
2484
2485 fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state,
2486 tiling_flags);
2420 } 2487 }
2421 2488
2422 plane_state->visible = true; 2489 plane_state->visible = true;
@@ -2580,7 +2647,7 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2580 * according to HDMI spec, we use YCbCr709 and YCbCr601 2647 * according to HDMI spec, we use YCbCr709 and YCbCr601
2581 * respectively 2648 * respectively
2582 */ 2649 */
2583 if (dc_crtc_timing->pix_clk_khz > 27030) { 2650 if (dc_crtc_timing->pix_clk_100hz > 270300) {
2584 if (dc_crtc_timing->flags.Y_ONLY) 2651 if (dc_crtc_timing->flags.Y_ONLY)
2585 color_space = 2652 color_space =
2586 COLOR_SPACE_YCBCR709_LIMITED; 2653 COLOR_SPACE_YCBCR709_LIMITED;
@@ -2623,7 +2690,7 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
2623 if (timing_out->display_color_depth <= COLOR_DEPTH_888) 2690 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2624 return; 2691 return;
2625 do { 2692 do {
2626 normalized_clk = timing_out->pix_clk_khz; 2693 normalized_clk = timing_out->pix_clk_100hz / 10;
2627 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 2694 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2628 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 2695 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2629 normalized_clk /= 2; 2696 normalized_clk /= 2;
@@ -2666,10 +2733,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2666 timing_out->v_border_bottom = 0; 2733 timing_out->v_border_bottom = 0;
2667 /* TODO: un-hardcode */ 2734 /* TODO: un-hardcode */
2668 if (drm_mode_is_420_only(info, mode_in) 2735 if (drm_mode_is_420_only(info, mode_in)
2669 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2736 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2670 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 2737 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2671 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 2738 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2672 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2739 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2673 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 2740 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2674 else 2741 else
2675 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 2742 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
@@ -2704,14 +2771,14 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2704 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 2771 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2705 timing_out->v_sync_width = 2772 timing_out->v_sync_width =
2706 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 2773 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2707 timing_out->pix_clk_khz = mode_in->crtc_clock; 2774 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
2708 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 2775 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2709 2776
2710 stream->output_color_space = get_output_color_space(timing_out); 2777 stream->output_color_space = get_output_color_space(timing_out);
2711 2778
2712 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 2779 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2713 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 2780 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2714 if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2781 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2715 adjust_colour_depth_from_display_info(timing_out, info); 2782 adjust_colour_depth_from_display_info(timing_out, info);
2716} 2783}
2717 2784
@@ -2832,7 +2899,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
2832 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 2899 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2833 int refresh_rate = 0; 2900 int refresh_rate = 0;
2834 2901
2835 refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/ 2902 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
2836 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 2903 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2837 if (refresh_rate > highest_rfr) { 2904 if (refresh_rate > highest_rfr) {
2838 highest_rfr = refresh_rate; 2905 highest_rfr = refresh_rate;
@@ -2905,6 +2972,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2905 goto finish; 2972 goto finish;
2906 } 2973 }
2907 2974
2975 stream->dm_stream_context = aconnector;
2976
2908 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2977 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2909 /* Search for preferred mode */ 2978 /* Search for preferred mode */
2910 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 2979 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -2956,7 +3025,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2956 drm_connector, 3025 drm_connector,
2957 sink); 3026 sink);
2958 3027
2959 update_stream_signal(stream); 3028 update_stream_signal(stream, sink);
2960 3029
2961 if (dm_state && dm_state->freesync_capable) 3030 if (dm_state && dm_state->freesync_capable)
2962 stream->ignore_msa_timing_param = true; 3031 stream->ignore_msa_timing_param = true;
@@ -3532,6 +3601,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3532 struct amdgpu_bo *rbo; 3601 struct amdgpu_bo *rbo;
3533 uint64_t chroma_addr = 0; 3602 uint64_t chroma_addr = 0;
3534 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 3603 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3604 uint64_t tiling_flags, dcc_address;
3535 unsigned int awidth; 3605 unsigned int awidth;
3536 uint32_t domain; 3606 uint32_t domain;
3537 int r; 3607 int r;
@@ -3572,6 +3642,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3572 DRM_ERROR("%p bind failed\n", rbo); 3642 DRM_ERROR("%p bind failed\n", rbo);
3573 return r; 3643 return r;
3574 } 3644 }
3645
3646 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
3647
3575 amdgpu_bo_unreserve(rbo); 3648 amdgpu_bo_unreserve(rbo);
3576 3649
3577 afb->address = amdgpu_bo_gpu_offset(rbo); 3650 afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -3585,6 +3658,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3585 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 3658 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3586 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address); 3659 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3587 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address); 3660 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3661
3662 dcc_address =
3663 get_dcc_address(afb->address, tiling_flags);
3664 plane_state->address.grph.meta_addr.low_part =
3665 lower_32_bits(dcc_address);
3666 plane_state->address.grph.meta_addr.high_part =
3667 upper_32_bits(dcc_address);
3588 } else { 3668 } else {
3589 awidth = ALIGN(new_state->fb->width, 64); 3669 awidth = ALIGN(new_state->fb->width, 64);
3590 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 3670 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
@@ -4456,20 +4536,6 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4456 acrtc->crtc_id); 4536 acrtc->crtc_id);
4457} 4537}
4458 4538
4459struct dc_stream_status *dc_state_get_stream_status(
4460 struct dc_state *state,
4461 struct dc_stream_state *stream)
4462{
4463 uint8_t i;
4464
4465 for (i = 0; i < state->stream_count; i++) {
4466 if (stream == state->streams[i])
4467 return &state->stream_status[i];
4468 }
4469
4470 return NULL;
4471}
4472
4473static void update_freesync_state_on_stream( 4539static void update_freesync_state_on_stream(
4474 struct amdgpu_display_manager *dm, 4540 struct amdgpu_display_manager *dm,
4475 struct dm_crtc_state *new_crtc_state, 4541 struct dm_crtc_state *new_crtc_state,
@@ -4523,12 +4589,12 @@ static void update_freesync_state_on_stream(
4523 TRANSFER_FUNC_UNKNOWN, 4589 TRANSFER_FUNC_UNKNOWN,
4524 &vrr_infopacket); 4590 &vrr_infopacket);
4525 4591
4526 new_crtc_state->freesync_timing_changed = 4592 new_crtc_state->freesync_timing_changed |=
4527 (memcmp(&new_crtc_state->vrr_params.adjust, 4593 (memcmp(&new_crtc_state->vrr_params.adjust,
4528 &vrr_params.adjust, 4594 &vrr_params.adjust,
4529 sizeof(vrr_params.adjust)) != 0); 4595 sizeof(vrr_params.adjust)) != 0);
4530 4596
4531 new_crtc_state->freesync_vrr_info_changed = 4597 new_crtc_state->freesync_vrr_info_changed |=
4532 (memcmp(&new_crtc_state->vrr_infopacket, 4598 (memcmp(&new_crtc_state->vrr_infopacket,
4533 &vrr_infopacket, 4599 &vrr_infopacket,
4534 sizeof(vrr_infopacket)) != 0); 4600 sizeof(vrr_infopacket)) != 0);
@@ -4552,248 +4618,6 @@ static void update_freesync_state_on_stream(
4552 vrr_params.adjust.v_total_max); 4618 vrr_params.adjust.v_total_max);
4553} 4619}
4554 4620
4555/*
4556 * Executes flip
4557 *
4558 * Waits on all BO's fences and for proper vblank count
4559 */
4560static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4561 struct drm_framebuffer *fb,
4562 uint32_t target,
4563 struct dc_state *state)
4564{
4565 unsigned long flags;
4566 uint64_t timestamp_ns;
4567 uint32_t target_vblank;
4568 int r, vpos, hpos;
4569 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4570 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4571 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4572 struct amdgpu_device *adev = crtc->dev->dev_private;
4573 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4574 struct dc_flip_addrs addr = { {0} };
4575 /* TODO eliminate or rename surface_update */
4576 struct dc_surface_update surface_updates[1] = { {0} };
4577 struct dc_stream_update stream_update = {0};
4578 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4579 struct dc_stream_status *stream_status;
4580 struct dc_plane_state *surface;
4581
4582
4583 /* Prepare wait for target vblank early - before the fence-waits */
4584 target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4585 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4586
4587 /*
4588 * TODO This might fail and hence better not used, wait
4589 * explicitly on fences instead
4590 * and in general should be called for
4591 * blocking commit to as per framework helpers
4592 */
4593 r = amdgpu_bo_reserve(abo, true);
4594 if (unlikely(r != 0)) {
4595 DRM_ERROR("failed to reserve buffer before flip\n");
4596 WARN_ON(1);
4597 }
4598
4599 /* Wait for all fences on this FB */
4600 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4601 MAX_SCHEDULE_TIMEOUT) < 0);
4602
4603 amdgpu_bo_unreserve(abo);
4604
4605 /*
4606 * Wait until we're out of the vertical blank period before the one
4607 * targeted by the flip
4608 */
4609 while ((acrtc->enabled &&
4610 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4611 0, &vpos, &hpos, NULL,
4612 NULL, &crtc->hwmode)
4613 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4614 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4615 (int)(target_vblank -
4616 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4617 usleep_range(1000, 1100);
4618 }
4619
4620 /* Flip */
4621 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4622
4623 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4624 WARN_ON(!acrtc_state->stream);
4625
4626 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4627 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4628 addr.flip_immediate = async_flip;
4629
4630 timestamp_ns = ktime_get_ns();
4631 addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4632
4633
4634 if (acrtc->base.state->event)
4635 prepare_flip_isr(acrtc);
4636
4637 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4638
4639 stream_status = dc_stream_get_status(acrtc_state->stream);
4640 if (!stream_status) {
4641 DRM_ERROR("No stream status for CRTC: id=%d\n",
4642 acrtc->crtc_id);
4643 return;
4644 }
4645
4646 surface = stream_status->plane_states[0];
4647 surface_updates->surface = surface;
4648
4649 if (!surface) {
4650 DRM_ERROR("No surface for CRTC: id=%d\n",
4651 acrtc->crtc_id);
4652 return;
4653 }
4654 surface_updates->flip_addr = &addr;
4655
4656 if (acrtc_state->stream) {
4657 update_freesync_state_on_stream(
4658 &adev->dm,
4659 acrtc_state,
4660 acrtc_state->stream,
4661 surface,
4662 addr.flip_timestamp_in_us);
4663
4664 if (acrtc_state->freesync_timing_changed)
4665 stream_update.adjust =
4666 &acrtc_state->stream->adjust;
4667
4668 if (acrtc_state->freesync_vrr_info_changed)
4669 stream_update.vrr_infopacket =
4670 &acrtc_state->stream->vrr_infopacket;
4671 }
4672
4673 /* Update surface timing information. */
4674 surface->time.time_elapsed_in_us[surface->time.index] =
4675 addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
4676 surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
4677 surface->time.index++;
4678 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
4679 surface->time.index = 0;
4680
4681 mutex_lock(&adev->dm.dc_lock);
4682
4683 dc_commit_updates_for_stream(adev->dm.dc,
4684 surface_updates,
4685 1,
4686 acrtc_state->stream,
4687 &stream_update,
4688 &surface_updates->surface,
4689 state);
4690 mutex_unlock(&adev->dm.dc_lock);
4691
4692 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4693 __func__,
4694 addr.address.grph.addr.high_part,
4695 addr.address.grph.addr.low_part);
4696}
4697
4698/*
4699 * TODO this whole function needs to go
4700 *
4701 * dc_surface_update is needlessly complex. See if we can just replace this
4702 * with a dc_plane_state and follow the atomic model a bit more closely here.
4703 */
4704static bool commit_planes_to_stream(
4705 struct amdgpu_display_manager *dm,
4706 struct dc *dc,
4707 struct dc_plane_state **plane_states,
4708 uint8_t new_plane_count,
4709 struct dm_crtc_state *dm_new_crtc_state,
4710 struct dm_crtc_state *dm_old_crtc_state,
4711 struct dc_state *state)
4712{
4713 /* no need to dynamically allocate this. it's pretty small */
4714 struct dc_surface_update updates[MAX_SURFACES];
4715 struct dc_flip_addrs *flip_addr;
4716 struct dc_plane_info *plane_info;
4717 struct dc_scaling_info *scaling_info;
4718 int i;
4719 struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4720 struct dc_stream_update *stream_update =
4721 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
4722 unsigned int abm_level;
4723
4724 if (!stream_update) {
4725 BREAK_TO_DEBUGGER();
4726 return false;
4727 }
4728
4729 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4730 GFP_KERNEL);
4731 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4732 GFP_KERNEL);
4733 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4734 GFP_KERNEL);
4735
4736 if (!flip_addr || !plane_info || !scaling_info) {
4737 kfree(flip_addr);
4738 kfree(plane_info);
4739 kfree(scaling_info);
4740 kfree(stream_update);
4741 return false;
4742 }
4743
4744 memset(updates, 0, sizeof(updates));
4745
4746 stream_update->src = dc_stream->src;
4747 stream_update->dst = dc_stream->dst;
4748 stream_update->out_transfer_func = dc_stream->out_transfer_func;
4749
4750 if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
4751 abm_level = dm_new_crtc_state->abm_level;
4752 stream_update->abm_level = &abm_level;
4753 }
4754
4755 for (i = 0; i < new_plane_count; i++) {
4756 updates[i].surface = plane_states[i];
4757 updates[i].gamma =
4758 (struct dc_gamma *)plane_states[i]->gamma_correction;
4759 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4760 flip_addr[i].address = plane_states[i]->address;
4761 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4762 plane_info[i].color_space = plane_states[i]->color_space;
4763 plane_info[i].format = plane_states[i]->format;
4764 plane_info[i].plane_size = plane_states[i]->plane_size;
4765 plane_info[i].rotation = plane_states[i]->rotation;
4766 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4767 plane_info[i].stereo_format = plane_states[i]->stereo_format;
4768 plane_info[i].tiling_info = plane_states[i]->tiling_info;
4769 plane_info[i].visible = plane_states[i]->visible;
4770 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4771 plane_info[i].dcc = plane_states[i]->dcc;
4772 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4773 scaling_info[i].src_rect = plane_states[i]->src_rect;
4774 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4775 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4776
4777 updates[i].flip_addr = &flip_addr[i];
4778 updates[i].plane_info = &plane_info[i];
4779 updates[i].scaling_info = &scaling_info[i];
4780 }
4781
4782 mutex_lock(&dm->dc_lock);
4783 dc_commit_updates_for_stream(
4784 dc,
4785 updates,
4786 new_plane_count,
4787 dc_stream, stream_update, plane_states, state);
4788 mutex_unlock(&dm->dc_lock);
4789
4790 kfree(flip_addr);
4791 kfree(plane_info);
4792 kfree(scaling_info);
4793 kfree(stream_update);
4794 return true;
4795}
4796
4797static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 4621static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4798 struct dc_state *dc_state, 4622 struct dc_state *dc_state,
4799 struct drm_device *dev, 4623 struct drm_device *dev,
@@ -4801,26 +4625,50 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4801 struct drm_crtc *pcrtc, 4625 struct drm_crtc *pcrtc,
4802 bool *wait_for_vblank) 4626 bool *wait_for_vblank)
4803{ 4627{
4804 uint32_t i; 4628 uint32_t i, r;
4629 uint64_t timestamp_ns;
4805 struct drm_plane *plane; 4630 struct drm_plane *plane;
4806 struct drm_plane_state *old_plane_state, *new_plane_state; 4631 struct drm_plane_state *old_plane_state, *new_plane_state;
4807 struct dc_stream_state *dc_stream_attach;
4808 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4809 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 4632 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4810 struct drm_crtc_state *new_pcrtc_state = 4633 struct drm_crtc_state *new_pcrtc_state =
4811 drm_atomic_get_new_crtc_state(state, pcrtc); 4634 drm_atomic_get_new_crtc_state(state, pcrtc);
4812 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 4635 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4813 struct dm_crtc_state *dm_old_crtc_state = 4636 struct dm_crtc_state *dm_old_crtc_state =
4814 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 4637 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4815 int planes_count = 0; 4638 int flip_count = 0, planes_count = 0, vpos, hpos;
4816 unsigned long flags; 4639 unsigned long flags;
4640 struct amdgpu_bo *abo;
4641 uint64_t tiling_flags, dcc_address;
4642 struct dc_stream_status *stream_status;
4643 uint32_t target, target_vblank;
4644
4645 struct {
4646 struct dc_surface_update surface_updates[MAX_SURFACES];
4647 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
4648 struct dc_stream_update stream_update;
4649 } *flip;
4650
4651 struct {
4652 struct dc_surface_update surface_updates[MAX_SURFACES];
4653 struct dc_plane_info plane_infos[MAX_SURFACES];
4654 struct dc_scaling_info scaling_infos[MAX_SURFACES];
4655 struct dc_stream_update stream_update;
4656 } *full;
4657
4658 flip = kzalloc(sizeof(*flip), GFP_KERNEL);
4659 full = kzalloc(sizeof(*full), GFP_KERNEL);
4660
4661 if (!flip || !full)
4662 dm_error("Failed to allocate update bundles\n");
4817 4663
4818 /* update planes when needed */ 4664 /* update planes when needed */
4819 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4665 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4820 struct drm_crtc *crtc = new_plane_state->crtc; 4666 struct drm_crtc *crtc = new_plane_state->crtc;
4821 struct drm_crtc_state *new_crtc_state; 4667 struct drm_crtc_state *new_crtc_state;
4822 struct drm_framebuffer *fb = new_plane_state->fb; 4668 struct drm_framebuffer *fb = new_plane_state->fb;
4669 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4823 bool pflip_needed; 4670 bool pflip_needed;
4671 struct dc_plane_state *surface, *dc_plane;
4824 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 4672 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4825 4673
4826 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 4674 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
@@ -4835,72 +4683,205 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4835 if (!new_crtc_state->active) 4683 if (!new_crtc_state->active)
4836 continue; 4684 continue;
4837 4685
4838 pflip_needed = !state->allow_modeset; 4686 pflip_needed = old_plane_state->fb &&
4687 old_plane_state->fb != new_plane_state->fb;
4839 4688
4840 spin_lock_irqsave(&crtc->dev->event_lock, flags); 4689 dc_plane = dm_new_plane_state->dc_state;
4841 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4842 DRM_ERROR("%s: acrtc %d, already busy\n",
4843 __func__,
4844 acrtc_attach->crtc_id);
4845 /* In commit tail framework this cannot happen */
4846 WARN_ON(1);
4847 }
4848 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4849
4850 if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4851 WARN_ON(!dm_new_plane_state->dc_state);
4852
4853 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4854 4690
4855 dc_stream_attach = acrtc_state->stream; 4691 if (pflip_needed) {
4856 planes_count++; 4692 /*
4857 4693 * Assume even ONE crtc with immediate flip means
4858 } else if (new_crtc_state->planes_changed) {
4859 /* Assume even ONE crtc with immediate flip means
4860 * entire can't wait for VBLANK 4694 * entire can't wait for VBLANK
4861 * TODO Check if it's correct 4695 * TODO Check if it's correct
4862 */ 4696 */
4863 *wait_for_vblank = 4697 if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
4864 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 4698 *wait_for_vblank = false;
4865 false : true; 4699
4866 4700 /*
4867 /* TODO: Needs rework for multiplane flip */ 4701 * TODO This might fail and hence better not used, wait
4868 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 4702 * explicitly on fences instead
4869 drm_crtc_vblank_get(crtc); 4703 * and in general should be called for
4870 4704 * blocking commit to as per framework helpers
4871 amdgpu_dm_do_flip( 4705 */
4872 crtc, 4706 abo = gem_to_amdgpu_bo(fb->obj[0]);
4873 fb, 4707 r = amdgpu_bo_reserve(abo, true);
4874 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, 4708 if (unlikely(r != 0)) {
4875 dc_state); 4709 DRM_ERROR("failed to reserve buffer before flip\n");
4710 WARN_ON(1);
4711 }
4712
4713 /* Wait for all fences on this FB */
4714 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4715 MAX_SCHEDULE_TIMEOUT) < 0);
4716
4717 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
4718
4719 amdgpu_bo_unreserve(abo);
4720
4721 flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address);
4722 flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address);
4723
4724 dcc_address = get_dcc_address(afb->address, tiling_flags);
4725 flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address);
4726 flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address);
4727
4728 flip->flip_addrs[flip_count].flip_immediate =
4729 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4730
4731 timestamp_ns = ktime_get_ns();
4732 flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4733 flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count];
4734
4735 stream_status = dc_stream_get_status(acrtc_state->stream);
4736 if (!stream_status) {
4737 DRM_ERROR("No stream status for CRTC: id=%d\n",
4738 acrtc_attach->crtc_id);
4739 continue;
4740 }
4741
4742 surface = stream_status->plane_states[0];
4743 flip->surface_updates[flip_count].surface = surface;
4744 if (!flip->surface_updates[flip_count].surface) {
4745 DRM_ERROR("No surface for CRTC: id=%d\n",
4746 acrtc_attach->crtc_id);
4747 continue;
4748 }
4749
4750 if (acrtc_state->stream)
4751 update_freesync_state_on_stream(
4752 dm,
4753 acrtc_state,
4754 acrtc_state->stream,
4755 surface,
4756 flip->flip_addrs[flip_count].flip_timestamp_in_us);
4757
4758 /* Update surface timing information. */
4759 surface->time.time_elapsed_in_us[surface->time.index] =
4760 flip->flip_addrs[flip_count].flip_timestamp_in_us -
4761 surface->time.prev_update_time_in_us;
4762 surface->time.prev_update_time_in_us = flip->flip_addrs[flip_count].flip_timestamp_in_us;
4763 surface->time.index++;
4764 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
4765 surface->time.index = 0;
4766
4767 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
4768 __func__,
4769 flip->flip_addrs[flip_count].address.grph.addr.high_part,
4770 flip->flip_addrs[flip_count].address.grph.addr.low_part);
4771
4772 flip_count += 1;
4876 } 4773 }
4877 4774
4775 full->surface_updates[planes_count].surface = dc_plane;
4776 if (new_pcrtc_state->color_mgmt_changed) {
4777 full->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
4778 full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
4779 }
4780
4781
4782 full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
4783 full->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
4784 full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
4785 full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
4786 full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count];
4787
4788
4789 full->plane_infos[planes_count].color_space = dc_plane->color_space;
4790 full->plane_infos[planes_count].format = dc_plane->format;
4791 full->plane_infos[planes_count].plane_size = dc_plane->plane_size;
4792 full->plane_infos[planes_count].rotation = dc_plane->rotation;
4793 full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
4794 full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
4795 full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
4796 full->plane_infos[planes_count].visible = dc_plane->visible;
4797 full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
4798 full->plane_infos[planes_count].dcc = dc_plane->dcc;
4799 full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count];
4800
4801 planes_count += 1;
4802
4878 } 4803 }
4879 4804
4880 if (planes_count) { 4805 /*
4881 unsigned long flags; 4806 * TODO: For proper atomic behaviour, we should be calling into DC once with
4807 * all the changes. However, DC refuses to do pageflips and non-pageflip
4808 * changes in the same call. Change DC to respect atomic behaviour,
4809 * hopefully eliminating dc_*_update structs in their entirety.
4810 */
4811 if (flip_count) {
4812 target = (uint32_t)drm_crtc_vblank_count(pcrtc) + *wait_for_vblank;
4813 /* Prepare wait for target vblank early - before the fence-waits */
4814 target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
4815 amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
4882 4816
4883 if (new_pcrtc_state->event) { 4817 /*
4818 * Wait until we're out of the vertical blank period before the one
4819 * targeted by the flip
4820 */
4821 while ((acrtc_attach->enabled &&
4822 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
4823 0, &vpos, &hpos, NULL,
4824 NULL, &pcrtc->hwmode)
4825 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4826 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4827 (int)(target_vblank -
4828 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
4829 usleep_range(1000, 1100);
4830 }
4884 4831
4832 if (acrtc_attach->base.state->event) {
4885 drm_crtc_vblank_get(pcrtc); 4833 drm_crtc_vblank_get(pcrtc);
4886 4834
4887 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 4835 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4836
4837 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
4888 prepare_flip_isr(acrtc_attach); 4838 prepare_flip_isr(acrtc_attach);
4839
4889 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 4840 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4890 } 4841 }
4891 4842
4892 dc_stream_attach->abm_level = acrtc_state->abm_level; 4843 if (acrtc_state->stream) {
4893 4844
4894 if (false == commit_planes_to_stream(dm, 4845 if (acrtc_state->freesync_timing_changed)
4895 dm->dc, 4846 flip->stream_update.adjust =
4896 plane_states_constructed, 4847 &acrtc_state->stream->adjust;
4897 planes_count, 4848
4898 acrtc_state, 4849 if (acrtc_state->freesync_vrr_info_changed)
4899 dm_old_crtc_state, 4850 flip->stream_update.vrr_infopacket =
4900 dc_state)) 4851 &acrtc_state->stream->vrr_infopacket;
4901 dm_error("%s: Failed to attach plane!\n", __func__); 4852 }
4902 } else { 4853
4903 /*TODO BUG Here should go disable planes on CRTC. */ 4854 mutex_lock(&dm->dc_lock);
4855 dc_commit_updates_for_stream(dm->dc,
4856 flip->surface_updates,
4857 flip_count,
4858 acrtc_state->stream,
4859 &flip->stream_update,
4860 dc_state);
4861 mutex_unlock(&dm->dc_lock);
4862 }
4863
4864 if (planes_count) {
4865 if (new_pcrtc_state->mode_changed) {
4866 full->stream_update.src = acrtc_state->stream->src;
4867 full->stream_update.dst = acrtc_state->stream->dst;
4868 }
4869
4870 if (new_pcrtc_state->color_mgmt_changed)
4871 full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
4872
4873 acrtc_state->stream->abm_level = acrtc_state->abm_level;
4874 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
4875 full->stream_update.abm_level = &acrtc_state->abm_level;
4876
4877 mutex_lock(&dm->dc_lock);
4878 dc_commit_updates_for_stream(dm->dc,
4879 full->surface_updates,
4880 planes_count,
4881 acrtc_state->stream,
4882 &full->stream_update,
4883 dc_state);
4884 mutex_unlock(&dm->dc_lock);
4904 } 4885 }
4905} 4886}
4906 4887
@@ -5077,8 +5058,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5077 dc_stream_get_status(dm_new_crtc_state->stream); 5058 dc_stream_get_status(dm_new_crtc_state->stream);
5078 5059
5079 if (!status) 5060 if (!status)
5080 status = dc_state_get_stream_status(dc_state, 5061 status = dc_stream_get_status_from_state(dc_state,
5081 dm_new_crtc_state->stream); 5062 dm_new_crtc_state->stream);
5082 5063
5083 if (!status) 5064 if (!status)
5084 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 5065 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
@@ -5087,11 +5068,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5087 } 5068 }
5088 } 5069 }
5089 5070
5090 /* Handle scaling, underscan, and abm changes*/ 5071 /* Handle connector state changes */
5091 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 5072 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5092 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 5073 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5093 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 5074 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5094 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 5075 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5076 struct dc_surface_update dummy_updates[MAX_SURFACES] = { 0 };
5077 struct dc_stream_update stream_update = { 0 };
5095 struct dc_stream_status *status = NULL; 5078 struct dc_stream_status *status = NULL;
5096 5079
5097 if (acrtc) { 5080 if (acrtc) {
@@ -5103,37 +5086,48 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5103 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 5086 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5104 continue; 5087 continue;
5105 5088
5106
5107 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5089 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5108 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 5090 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5109 5091
5110 /* Skip anything that is not scaling or underscan changes */
5111 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) && 5092 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5112 (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level)) 5093 (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5113 continue; 5094 continue;
5114 5095
5115 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 5096 if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) {
5116 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); 5097 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5098 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5117 5099
5118 if (!dm_new_crtc_state->stream) 5100 stream_update.src = dm_new_crtc_state->stream->src;
5119 continue; 5101 stream_update.dst = dm_new_crtc_state->stream->dst;
5102 }
5103
5104 if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
5105 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5106
5107 stream_update.abm_level = &dm_new_crtc_state->abm_level;
5108 }
5120 5109
5121 status = dc_stream_get_status(dm_new_crtc_state->stream); 5110 status = dc_stream_get_status(dm_new_crtc_state->stream);
5122 WARN_ON(!status); 5111 WARN_ON(!status);
5123 WARN_ON(!status->plane_count); 5112 WARN_ON(!status->plane_count);
5124 5113
5125 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 5114 /*
5115 * TODO: DC refuses to perform stream updates without a dc_surface_update.
5116 * Here we create an empty update on each plane.
5117 * To fix this, DC should permit updating only stream properties.
5118 */
5119 for (j = 0; j < status->plane_count; j++)
5120 dummy_updates[j].surface = status->plane_states[0];
5121
5126 5122
5127 /*TODO How it works with MPO ?*/ 5123 mutex_lock(&dm->dc_lock);
5128 if (!commit_planes_to_stream( 5124 dc_commit_updates_for_stream(dm->dc,
5129 dm, 5125 dummy_updates,
5130 dm->dc, 5126 status->plane_count,
5131 status->plane_states, 5127 dm_new_crtc_state->stream,
5132 status->plane_count, 5128 &stream_update,
5133 dm_new_crtc_state, 5129 dc_state);
5134 to_dm_crtc_state(old_crtc_state), 5130 mutex_unlock(&dm->dc_lock);
5135 dc_state))
5136 dm_error("%s: Failed to update stream scaling!\n", __func__);
5137 } 5131 }
5138 5132
5139 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 5133 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
@@ -5184,18 +5178,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5184 } 5178 }
5185 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 5179 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5186 5180
5181 /* Signal HW programming completion */
5182 drm_atomic_helper_commit_hw_done(state);
5187 5183
5188 if (wait_for_vblank) 5184 if (wait_for_vblank)
5189 drm_atomic_helper_wait_for_flip_done(dev, state); 5185 drm_atomic_helper_wait_for_flip_done(dev, state);
5190 5186
5191 /*
5192 * FIXME:
5193 * Delay hw_done() until flip_done() is signaled. This is to block
5194 * another commit from freeing the CRTC state while we're still
5195 * waiting on flip_done.
5196 */
5197 drm_atomic_helper_commit_hw_done(state);
5198
5199 drm_atomic_helper_cleanup_planes(dev, state); 5187 drm_atomic_helper_cleanup_planes(dev, state);
5200 5188
5201 /* 5189 /*
@@ -5388,15 +5376,15 @@ static void reset_freesync_config_for_crtc(
5388 sizeof(new_crtc_state->vrr_infopacket)); 5376 sizeof(new_crtc_state->vrr_infopacket));
5389} 5377}
5390 5378
5391static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, 5379static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
5392 struct drm_atomic_state *state, 5380 struct drm_atomic_state *state,
5393 bool enable, 5381 struct drm_crtc *crtc,
5394 bool *lock_and_validation_needed) 5382 struct drm_crtc_state *old_crtc_state,
5383 struct drm_crtc_state *new_crtc_state,
5384 bool enable,
5385 bool *lock_and_validation_needed)
5395{ 5386{
5396 struct dm_atomic_state *dm_state = NULL; 5387 struct dm_atomic_state *dm_state = NULL;
5397 struct drm_crtc *crtc;
5398 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5399 int i;
5400 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 5388 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5401 struct dc_stream_state *new_stream; 5389 struct dc_stream_state *new_stream;
5402 int ret = 0; 5390 int ret = 0;
@@ -5405,200 +5393,203 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5405 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 5393 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5406 * update changed items 5394 * update changed items
5407 */ 5395 */
5408 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5396 struct amdgpu_crtc *acrtc = NULL;
5409 struct amdgpu_crtc *acrtc = NULL; 5397 struct amdgpu_dm_connector *aconnector = NULL;
5410 struct amdgpu_dm_connector *aconnector = NULL; 5398 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5411 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 5399 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5412 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 5400 struct drm_plane_state *new_plane_state = NULL;
5413 struct drm_plane_state *new_plane_state = NULL;
5414
5415 new_stream = NULL;
5416 5401
5417 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 5402 new_stream = NULL;
5418 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5419 acrtc = to_amdgpu_crtc(crtc);
5420 5403
5421 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); 5404 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5405 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5406 acrtc = to_amdgpu_crtc(crtc);
5422 5407
5423 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { 5408 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5424 ret = -EINVAL;
5425 goto fail;
5426 }
5427 5409
5428 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 5410 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5411 ret = -EINVAL;
5412 goto fail;
5413 }
5429 5414
5430 /* TODO This hack should go away */ 5415 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5431 if (aconnector && enable) {
5432 /* Make sure fake sink is created in plug-in scenario */
5433 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5434 &aconnector->base);
5435 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5436 &aconnector->base);
5437 5416
5438 if (IS_ERR(drm_new_conn_state)) { 5417 /* TODO This hack should go away */
5439 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 5418 if (aconnector && enable) {
5440 break; 5419 /* Make sure fake sink is created in plug-in scenario */
5441 } 5420 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5421 &aconnector->base);
5422 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5423 &aconnector->base);
5442 5424
5443 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 5425 if (IS_ERR(drm_new_conn_state)) {
5444 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 5426 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5427 goto fail;
5428 }
5445 5429
5446 new_stream = create_stream_for_sink(aconnector, 5430 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5447 &new_crtc_state->mode, 5431 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5448 dm_new_conn_state,
5449 dm_old_crtc_state->stream);
5450 5432
5451 /* 5433 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5452 * we can have no stream on ACTION_SET if a display 5434 goto skip_modeset;
5453 * was disconnected during S3, in this case it is not an
5454 * error, the OS will be updated after detection, and
5455 * will do the right thing on next atomic commit
5456 */
5457 5435
5458 if (!new_stream) { 5436 new_stream = create_stream_for_sink(aconnector,
5459 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 5437 &new_crtc_state->mode,
5460 __func__, acrtc->base.base.id); 5438 dm_new_conn_state,
5461 break; 5439 dm_old_crtc_state->stream);
5462 }
5463 5440
5464 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 5441 /*
5442 * we can have no stream on ACTION_SET if a display
5443 * was disconnected during S3, in this case it is not an
5444 * error, the OS will be updated after detection, and
5445 * will do the right thing on next atomic commit
5446 */
5465 5447
5466 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 5448 if (!new_stream) {
5467 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 5449 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5468 new_crtc_state->mode_changed = false; 5450 __func__, acrtc->base.base.id);
5469 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 5451 ret = -ENOMEM;
5470 new_crtc_state->mode_changed); 5452 goto fail;
5471 }
5472 } 5453 }
5473 5454
5474 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 5455 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5475 goto next_crtc;
5476 5456
5477 DRM_DEBUG_DRIVER( 5457 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5478 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 5458 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5479 "planes_changed:%d, mode_changed:%d,active_changed:%d," 5459 new_crtc_state->mode_changed = false;
5480 "connectors_changed:%d\n", 5460 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5481 acrtc->crtc_id, 5461 new_crtc_state->mode_changed);
5482 new_crtc_state->enable, 5462 }
5483 new_crtc_state->active, 5463 }
5484 new_crtc_state->planes_changed,
5485 new_crtc_state->mode_changed,
5486 new_crtc_state->active_changed,
5487 new_crtc_state->connectors_changed);
5488 5464
5489 /* Remove stream for any changed/disabled CRTC */ 5465 /* mode_changed flag may get updated above, need to check again */
5490 if (!enable) { 5466 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5467 goto skip_modeset;
5491 5468
5492 if (!dm_old_crtc_state->stream) 5469 DRM_DEBUG_DRIVER(
5493 goto next_crtc; 5470 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5471 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5472 "connectors_changed:%d\n",
5473 acrtc->crtc_id,
5474 new_crtc_state->enable,
5475 new_crtc_state->active,
5476 new_crtc_state->planes_changed,
5477 new_crtc_state->mode_changed,
5478 new_crtc_state->active_changed,
5479 new_crtc_state->connectors_changed);
5494 5480
5495 ret = dm_atomic_get_state(state, &dm_state); 5481 /* Remove stream for any changed/disabled CRTC */
5496 if (ret) 5482 if (!enable) {
5497 goto fail;
5498 5483
5499 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 5484 if (!dm_old_crtc_state->stream)
5500 crtc->base.id); 5485 goto skip_modeset;
5501 5486
5502 /* i.e. reset mode */ 5487 ret = dm_atomic_get_state(state, &dm_state);
5503 if (dc_remove_stream_from_ctx( 5488 if (ret)
5504 dm->dc, 5489 goto fail;
5505 dm_state->context,
5506 dm_old_crtc_state->stream) != DC_OK) {
5507 ret = -EINVAL;
5508 goto fail;
5509 }
5510 5490
5511 dc_stream_release(dm_old_crtc_state->stream); 5491 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5512 dm_new_crtc_state->stream = NULL; 5492 crtc->base.id);
5513 5493
5514 reset_freesync_config_for_crtc(dm_new_crtc_state); 5494 /* i.e. reset mode */
5495 if (dc_remove_stream_from_ctx(
5496 dm->dc,
5497 dm_state->context,
5498 dm_old_crtc_state->stream) != DC_OK) {
5499 ret = -EINVAL;
5500 goto fail;
5501 }
5515 5502
5516 *lock_and_validation_needed = true; 5503 dc_stream_release(dm_old_crtc_state->stream);
5504 dm_new_crtc_state->stream = NULL;
5517 5505
5518 } else {/* Add stream for any updated/enabled CRTC */ 5506 reset_freesync_config_for_crtc(dm_new_crtc_state);
5519 /*
5520 * Quick fix to prevent NULL pointer on new_stream when
5521 * added MST connectors not found in existing crtc_state in the chained mode
5522 * TODO: need to dig out the root cause of that
5523 */
5524 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5525 goto next_crtc;
5526 5507
5527 if (modereset_required(new_crtc_state)) 5508 *lock_and_validation_needed = true;
5528 goto next_crtc;
5529 5509
5530 if (modeset_required(new_crtc_state, new_stream, 5510 } else {/* Add stream for any updated/enabled CRTC */
5531 dm_old_crtc_state->stream)) { 5511 /*
5512 * Quick fix to prevent NULL pointer on new_stream when
5513 * added MST connectors not found in existing crtc_state in the chained mode
5514 * TODO: need to dig out the root cause of that
5515 */
5516 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5517 goto skip_modeset;
5532 5518
5533 WARN_ON(dm_new_crtc_state->stream); 5519 if (modereset_required(new_crtc_state))
5520 goto skip_modeset;
5534 5521
5535 ret = dm_atomic_get_state(state, &dm_state); 5522 if (modeset_required(new_crtc_state, new_stream,
5536 if (ret) 5523 dm_old_crtc_state->stream)) {
5537 goto fail;
5538 5524
5539 dm_new_crtc_state->stream = new_stream; 5525 WARN_ON(dm_new_crtc_state->stream);
5540 5526
5541 dc_stream_retain(new_stream); 5527 ret = dm_atomic_get_state(state, &dm_state);
5528 if (ret)
5529 goto fail;
5542 5530
5543 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", 5531 dm_new_crtc_state->stream = new_stream;
5544 crtc->base.id);
5545 5532
5546 if (dc_add_stream_to_ctx( 5533 dc_stream_retain(new_stream);
5547 dm->dc,
5548 dm_state->context,
5549 dm_new_crtc_state->stream) != DC_OK) {
5550 ret = -EINVAL;
5551 goto fail;
5552 }
5553 5534
5554 *lock_and_validation_needed = true; 5535 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5536 crtc->base.id);
5537
5538 if (dc_add_stream_to_ctx(
5539 dm->dc,
5540 dm_state->context,
5541 dm_new_crtc_state->stream) != DC_OK) {
5542 ret = -EINVAL;
5543 goto fail;
5555 } 5544 }
5545
5546 *lock_and_validation_needed = true;
5556 } 5547 }
5548 }
5557 5549
5558next_crtc: 5550skip_modeset:
5559 /* Release extra reference */ 5551 /* Release extra reference */
5560 if (new_stream) 5552 if (new_stream)
5561 dc_stream_release(new_stream); 5553 dc_stream_release(new_stream);
5562 5554
5563 /* 5555 /*
5564 * We want to do dc stream updates that do not require a 5556 * We want to do dc stream updates that do not require a
5565 * full modeset below. 5557 * full modeset below.
5566 */ 5558 */
5567 if (!(enable && aconnector && new_crtc_state->enable && 5559 if (!(enable && aconnector && new_crtc_state->enable &&
5568 new_crtc_state->active)) 5560 new_crtc_state->active))
5569 continue; 5561 return 0;
5570 /* 5562 /*
5571 * Given above conditions, the dc state cannot be NULL because: 5563 * Given above conditions, the dc state cannot be NULL because:
5572 * 1. We're in the process of enabling CRTCs (just been added 5564 * 1. We're in the process of enabling CRTCs (just been added
5573 * to the dc context, or already is on the context) 5565 * to the dc context, or already is on the context)
5574 * 2. Has a valid connector attached, and 5566 * 2. Has a valid connector attached, and
5575 * 3. Is currently active and enabled. 5567 * 3. Is currently active and enabled.
5576 * => The dc stream state currently exists. 5568 * => The dc stream state currently exists.
5577 */ 5569 */
5578 BUG_ON(dm_new_crtc_state->stream == NULL); 5570 BUG_ON(dm_new_crtc_state->stream == NULL);
5579 5571
5580 /* Scaling or underscan settings */ 5572 /* Scaling or underscan settings */
5581 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) 5573 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5582 update_stream_scaling_settings( 5574 update_stream_scaling_settings(
5583 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 5575 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5584 5576
5585 /* 5577 /*
5586 * Color management settings. We also update color properties 5578 * Color management settings. We also update color properties
5587 * when a modeset is needed, to ensure it gets reprogrammed. 5579 * when a modeset is needed, to ensure it gets reprogrammed.
5588 */ 5580 */
5589 if (dm_new_crtc_state->base.color_mgmt_changed || 5581 if (dm_new_crtc_state->base.color_mgmt_changed ||
5590 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 5582 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5591 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); 5583 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5592 if (ret) 5584 if (ret)
5593 goto fail; 5585 goto fail;
5594 amdgpu_dm_set_ctm(dm_new_crtc_state); 5586 amdgpu_dm_set_ctm(dm_new_crtc_state);
5595 }
5596
5597 /* Update Freesync settings. */
5598 get_freesync_config_for_crtc(dm_new_crtc_state,
5599 dm_new_conn_state);
5600 } 5587 }
5601 5588
5589 /* Update Freesync settings. */
5590 get_freesync_config_for_crtc(dm_new_crtc_state,
5591 dm_new_conn_state);
5592
5602 return ret; 5593 return ret;
5603 5594
5604fail: 5595fail:
@@ -5607,145 +5598,141 @@ fail:
5607 return ret; 5598 return ret;
5608} 5599}
5609 5600
5610static int dm_update_planes_state(struct dc *dc, 5601static int dm_update_plane_state(struct dc *dc,
5611 struct drm_atomic_state *state, 5602 struct drm_atomic_state *state,
5612 bool enable, 5603 struct drm_plane *plane,
5613 bool *lock_and_validation_needed) 5604 struct drm_plane_state *old_plane_state,
5605 struct drm_plane_state *new_plane_state,
5606 bool enable,
5607 bool *lock_and_validation_needed)
5614{ 5608{
5615 5609
5616 struct dm_atomic_state *dm_state = NULL; 5610 struct dm_atomic_state *dm_state = NULL;
5617 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 5611 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5618 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 5612 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5619 struct drm_plane *plane;
5620 struct drm_plane_state *old_plane_state, *new_plane_state;
5621 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 5613 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5622 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 5614 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5623 int i ;
5624 /* TODO return page_flip_needed() function */ 5615 /* TODO return page_flip_needed() function */
5625 bool pflip_needed = !state->allow_modeset; 5616 bool pflip_needed = !state->allow_modeset;
5626 int ret = 0; 5617 int ret = 0;
5627 5618
5628 5619
5629 /* Add new planes, in reverse order as DC expectation */ 5620 new_plane_crtc = new_plane_state->crtc;
5630 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 5621 old_plane_crtc = old_plane_state->crtc;
5631 new_plane_crtc = new_plane_state->crtc; 5622 dm_new_plane_state = to_dm_plane_state(new_plane_state);
5632 old_plane_crtc = old_plane_state->crtc; 5623 dm_old_plane_state = to_dm_plane_state(old_plane_state);
5633 dm_new_plane_state = to_dm_plane_state(new_plane_state);
5634 dm_old_plane_state = to_dm_plane_state(old_plane_state);
5635 5624
5636 /*TODO Implement atomic check for cursor plane */ 5625 /*TODO Implement atomic check for cursor plane */
5637 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5626 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5638 continue; 5627 return 0;
5639 5628
5640 /* Remove any changed/removed planes */ 5629 /* Remove any changed/removed planes */
5641 if (!enable) { 5630 if (!enable) {
5642 if (pflip_needed && 5631 if (pflip_needed &&
5643 plane->type != DRM_PLANE_TYPE_OVERLAY) 5632 plane->type != DRM_PLANE_TYPE_OVERLAY)
5644 continue; 5633 return 0;
5645 5634
5646 if (!old_plane_crtc) 5635 if (!old_plane_crtc)
5647 continue; 5636 return 0;
5648 5637
5649 old_crtc_state = drm_atomic_get_old_crtc_state( 5638 old_crtc_state = drm_atomic_get_old_crtc_state(
5650 state, old_plane_crtc); 5639 state, old_plane_crtc);
5651 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 5640 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5652 5641
5653 if (!dm_old_crtc_state->stream) 5642 if (!dm_old_crtc_state->stream)
5654 continue; 5643 return 0;
5655 5644
5656 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 5645 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5657 plane->base.id, old_plane_crtc->base.id); 5646 plane->base.id, old_plane_crtc->base.id);
5658 5647
5659 ret = dm_atomic_get_state(state, &dm_state); 5648 ret = dm_atomic_get_state(state, &dm_state);
5660 if (ret) 5649 if (ret)
5661 return ret; 5650 return ret;
5662 5651
5663 if (!dc_remove_plane_from_context( 5652 if (!dc_remove_plane_from_context(
5664 dc, 5653 dc,
5665 dm_old_crtc_state->stream, 5654 dm_old_crtc_state->stream,
5666 dm_old_plane_state->dc_state, 5655 dm_old_plane_state->dc_state,
5667 dm_state->context)) { 5656 dm_state->context)) {
5668 5657
5669 ret = EINVAL; 5658 ret = EINVAL;
5670 return ret; 5659 return ret;
5671 } 5660 }
5672 5661
5673 5662
5674 dc_plane_state_release(dm_old_plane_state->dc_state); 5663 dc_plane_state_release(dm_old_plane_state->dc_state);
5675 dm_new_plane_state->dc_state = NULL; 5664 dm_new_plane_state->dc_state = NULL;
5676 5665
5677 *lock_and_validation_needed = true; 5666 *lock_and_validation_needed = true;
5678 5667
5679 } else { /* Add new planes */ 5668 } else { /* Add new planes */
5680 struct dc_plane_state *dc_new_plane_state; 5669 struct dc_plane_state *dc_new_plane_state;
5681 5670
5682 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 5671 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5683 continue; 5672 return 0;
5684 5673
5685 if (!new_plane_crtc) 5674 if (!new_plane_crtc)
5686 continue; 5675 return 0;
5687 5676
5688 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 5677 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5689 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5678 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5690 5679
5691 if (!dm_new_crtc_state->stream) 5680 if (!dm_new_crtc_state->stream)
5692 continue; 5681 return 0;
5693 5682
5694 if (pflip_needed && 5683 if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
5695 plane->type != DRM_PLANE_TYPE_OVERLAY) 5684 return 0;
5696 continue;
5697 5685
5698 WARN_ON(dm_new_plane_state->dc_state); 5686 WARN_ON(dm_new_plane_state->dc_state);
5699 5687
5700 dc_new_plane_state = dc_create_plane_state(dc); 5688 dc_new_plane_state = dc_create_plane_state(dc);
5701 if (!dc_new_plane_state) 5689 if (!dc_new_plane_state)
5702 return -ENOMEM; 5690 return -ENOMEM;
5703 5691
5704 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 5692 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5705 plane->base.id, new_plane_crtc->base.id); 5693 plane->base.id, new_plane_crtc->base.id);
5706 5694
5707 ret = fill_plane_attributes( 5695 ret = fill_plane_attributes(
5708 new_plane_crtc->dev->dev_private, 5696 new_plane_crtc->dev->dev_private,
5709 dc_new_plane_state, 5697 dc_new_plane_state,
5710 new_plane_state, 5698 new_plane_state,
5711 new_crtc_state); 5699 new_crtc_state);
5712 if (ret) { 5700 if (ret) {
5713 dc_plane_state_release(dc_new_plane_state); 5701 dc_plane_state_release(dc_new_plane_state);
5714 return ret; 5702 return ret;
5715 } 5703 }
5716 5704
5717 ret = dm_atomic_get_state(state, &dm_state); 5705 ret = dm_atomic_get_state(state, &dm_state);
5718 if (ret) { 5706 if (ret) {
5719 dc_plane_state_release(dc_new_plane_state); 5707 dc_plane_state_release(dc_new_plane_state);
5720 return ret; 5708 return ret;
5721 } 5709 }
5722 5710
5723 /* 5711 /*
5724 * Any atomic check errors that occur after this will 5712 * Any atomic check errors that occur after this will
5725 * not need a release. The plane state will be attached 5713 * not need a release. The plane state will be attached
5726 * to the stream, and therefore part of the atomic 5714 * to the stream, and therefore part of the atomic
5727 * state. It'll be released when the atomic state is 5715 * state. It'll be released when the atomic state is
5728 * cleaned. 5716 * cleaned.
5729 */ 5717 */
5730 if (!dc_add_plane_to_context( 5718 if (!dc_add_plane_to_context(
5731 dc, 5719 dc,
5732 dm_new_crtc_state->stream, 5720 dm_new_crtc_state->stream,
5733 dc_new_plane_state, 5721 dc_new_plane_state,
5734 dm_state->context)) { 5722 dm_state->context)) {
5735
5736 dc_plane_state_release(dc_new_plane_state);
5737 return -EINVAL;
5738 }
5739 5723
5740 dm_new_plane_state->dc_state = dc_new_plane_state; 5724 dc_plane_state_release(dc_new_plane_state);
5725 return -EINVAL;
5726 }
5741 5727
5742 /* Tell DC to do a full surface update every time there 5728 dm_new_plane_state->dc_state = dc_new_plane_state;
5743 * is a plane change. Inefficient, but works for now.
5744 */
5745 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5746 5729
5747 *lock_and_validation_needed = true; 5730 /* Tell DC to do a full surface update every time there
5748 } 5731 * is a plane change. Inefficient, but works for now.
5732 */
5733 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5734
5735 *lock_and_validation_needed = true;
5749 } 5736 }
5750 5737
5751 5738
@@ -5769,11 +5756,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
5769 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; 5756 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
5770 struct dc_stream_status *status = NULL; 5757 struct dc_stream_status *status = NULL;
5771 5758
5772 struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL); 5759 struct dc_surface_update *updates;
5773 struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL); 5760 struct dc_plane_state *surface;
5774 struct dc_stream_update stream_update; 5761 struct dc_stream_update stream_update;
5775 enum surface_update_type update_type = UPDATE_TYPE_FAST; 5762 enum surface_update_type update_type = UPDATE_TYPE_FAST;
5776 5763
5764 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
5765 surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
5766
5777 if (!updates || !surface) { 5767 if (!updates || !surface) {
5778 DRM_ERROR("Plane or surface update failed to allocate"); 5768 DRM_ERROR("Plane or surface update failed to allocate");
5779 /* Set type to FULL to avoid crashing in DC*/ 5769 /* Set type to FULL to avoid crashing in DC*/
@@ -5842,8 +5832,8 @@ dm_determine_update_type_for_commit(struct dc *dc,
5842 goto cleanup; 5832 goto cleanup;
5843 } 5833 }
5844 5834
5845 status = dc_state_get_stream_status(old_dm_state->context, 5835 status = dc_stream_get_status_from_state(old_dm_state->context,
5846 new_dm_crtc_state->stream); 5836 new_dm_crtc_state->stream);
5847 5837
5848 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, 5838 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5849 &stream_update, status); 5839 &stream_update, status);
@@ -5903,6 +5893,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5903 struct drm_connector_state *old_con_state, *new_con_state; 5893 struct drm_connector_state *old_con_state, *new_con_state;
5904 struct drm_crtc *crtc; 5894 struct drm_crtc *crtc;
5905 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 5895 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5896 struct drm_plane *plane;
5897 struct drm_plane_state *old_plane_state, *new_plane_state;
5906 enum surface_update_type update_type = UPDATE_TYPE_FAST; 5898 enum surface_update_type update_type = UPDATE_TYPE_FAST;
5907 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST; 5899 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
5908 5900
@@ -5921,7 +5913,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5921 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5913 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5922 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 5914 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5923 !new_crtc_state->color_mgmt_changed && 5915 !new_crtc_state->color_mgmt_changed &&
5924 !new_crtc_state->vrr_enabled) 5916 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5925 continue; 5917 continue;
5926 5918
5927 if (!new_crtc_state->enable) 5919 if (!new_crtc_state->enable)
@@ -5937,27 +5929,47 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5937 } 5929 }
5938 5930
5939 /* Remove exiting planes if they are modified */ 5931 /* Remove exiting planes if they are modified */
5940 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); 5932 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5941 if (ret) { 5933 ret = dm_update_plane_state(dc, state, plane,
5942 goto fail; 5934 old_plane_state,
5935 new_plane_state,
5936 false,
5937 &lock_and_validation_needed);
5938 if (ret)
5939 goto fail;
5943 } 5940 }
5944 5941
5945 /* Disable all crtcs which require disable */ 5942 /* Disable all crtcs which require disable */
5946 ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed); 5943 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5947 if (ret) { 5944 ret = dm_update_crtc_state(&adev->dm, state, crtc,
5948 goto fail; 5945 old_crtc_state,
5946 new_crtc_state,
5947 false,
5948 &lock_and_validation_needed);
5949 if (ret)
5950 goto fail;
5949 } 5951 }
5950 5952
5951 /* Enable all crtcs which require enable */ 5953 /* Enable all crtcs which require enable */
5952 ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed); 5954 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5953 if (ret) { 5955 ret = dm_update_crtc_state(&adev->dm, state, crtc,
5954 goto fail; 5956 old_crtc_state,
5957 new_crtc_state,
5958 true,
5959 &lock_and_validation_needed);
5960 if (ret)
5961 goto fail;
5955 } 5962 }
5956 5963
5957 /* Add new/modified planes */ 5964 /* Add new/modified planes */
5958 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed); 5965 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5959 if (ret) { 5966 ret = dm_update_plane_state(dc, state, plane,
5960 goto fail; 5967 old_plane_state,
5968 new_plane_state,
5969 true,
5970 &lock_and_validation_needed);
5971 if (ret)
5972 goto fail;
5961 } 5973 }
5962 5974
5963 /* Run this here since we want to validate the streams we created */ 5975 /* Run this here since we want to validate the streams we created */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a7ac58eb18e..cca3e16cda4f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -783,6 +783,45 @@ static ssize_t dtn_log_write(
783 return size; 783 return size;
784} 784}
785 785
786/*
787 * Backlight at this moment. Read only.
788 * As written to display, taking ABM and backlight lut into account.
789 * Ranges from 0x0 to 0x10000 (= 100% PWM)
790 */
791static int current_backlight_read(struct seq_file *m, void *data)
792{
793 struct drm_info_node *node = (struct drm_info_node *)m->private;
794 struct drm_device *dev = node->minor->dev;
795 struct amdgpu_device *adev = dev->dev_private;
796 struct dc *dc = adev->dm.dc;
797 unsigned int backlight = dc_get_current_backlight_pwm(dc);
798
799 seq_printf(m, "0x%x\n", backlight);
800 return 0;
801}
802
803/*
804 * Backlight value that is being approached. Read only.
805 * As written to display, taking ABM and backlight lut into account.
806 * Ranges from 0x0 to 0x10000 (= 100% PWM)
807 */
808static int target_backlight_read(struct seq_file *m, void *data)
809{
810 struct drm_info_node *node = (struct drm_info_node *)m->private;
811 struct drm_device *dev = node->minor->dev;
812 struct amdgpu_device *adev = dev->dev_private;
813 struct dc *dc = adev->dm.dc;
814 unsigned int backlight = dc_get_target_backlight_pwm(dc);
815
816 seq_printf(m, "0x%x\n", backlight);
817 return 0;
818}
819
820static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
821 {"amdgpu_current_backlight_pwm", &current_backlight_read},
822 {"amdgpu_target_backlight_pwm", &target_backlight_read},
823};
824
786int dtn_debugfs_init(struct amdgpu_device *adev) 825int dtn_debugfs_init(struct amdgpu_device *adev)
787{ 826{
788 static const struct file_operations dtn_log_fops = { 827 static const struct file_operations dtn_log_fops = {
@@ -793,9 +832,15 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
793 }; 832 };
794 833
795 struct drm_minor *minor = adev->ddev->primary; 834 struct drm_minor *minor = adev->ddev->primary;
796 struct dentry *root = minor->debugfs_root; 835 struct dentry *ent, *root = minor->debugfs_root;
836 int ret;
837
838 ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list,
839 ARRAY_SIZE(amdgpu_dm_debugfs_list));
840 if (ret)
841 return ret;
797 842
798 struct dentry *ent = debugfs_create_file( 843 ent = debugfs_create_file(
799 "amdgpu_dm_dtn_log", 844 "amdgpu_dm_dtn_log",
800 0644, 845 0644,
801 root, 846 root,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 39997d977efb..e6ab0186955c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -192,7 +192,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
192 int bpp = 0; 192 int bpp = 0;
193 int pbn = 0; 193 int pbn = 0;
194 194
195 aconnector = stream->sink->priv; 195 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
196 196
197 if (!aconnector || !aconnector->mst_port) 197 if (!aconnector || !aconnector->mst_port)
198 return false; 198 return false;
@@ -205,7 +205,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
205 mst_port = aconnector->port; 205 mst_port = aconnector->port;
206 206
207 if (enable) { 207 if (enable) {
208 clock = stream->timing.pix_clk_khz; 208 clock = stream->timing.pix_clk_100hz / 10;
209 209
210 switch (stream->timing.display_color_depth) { 210 switch (stream->timing.display_color_depth) {
211 211
@@ -284,7 +284,7 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
284 struct drm_dp_mst_topology_mgr *mst_mgr; 284 struct drm_dp_mst_topology_mgr *mst_mgr;
285 int ret; 285 int ret;
286 286
287 aconnector = stream->sink->priv; 287 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
288 288
289 if (!aconnector || !aconnector->mst_port) 289 if (!aconnector || !aconnector->mst_port)
290 return false; 290 return false;
@@ -312,7 +312,7 @@ bool dm_helpers_dp_mst_send_payload_allocation(
312 struct drm_dp_mst_port *mst_port; 312 struct drm_dp_mst_port *mst_port;
313 int ret; 313 int ret;
314 314
315 aconnector = stream->sink->priv; 315 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
316 316
317 if (!aconnector || !aconnector->mst_port) 317 if (!aconnector || !aconnector->mst_port)
318 return false; 318 return false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 24632727e127..748d6ff3e4f3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -35,6 +35,8 @@
35 35
36#include "dc_link_ddc.h" 36#include "dc_link_ddc.h"
37 37
38#include "i2caux_interface.h"
39
38/* #define TRACE_DPCD */ 40/* #define TRACE_DPCD */
39 41
40#ifdef TRACE_DPCD 42#ifdef TRACE_DPCD
@@ -81,80 +83,24 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
81 struct drm_dp_aux_msg *msg) 83 struct drm_dp_aux_msg *msg)
82{ 84{
83 ssize_t result = 0; 85 ssize_t result = 0;
84 enum i2caux_transaction_action action; 86 struct aux_payload payload;
85 enum aux_transaction_type type;
86 87
87 if (WARN_ON(msg->size > 16)) 88 if (WARN_ON(msg->size > 16))
88 return -E2BIG; 89 return -E2BIG;
89 90
90 switch (msg->request & ~DP_AUX_I2C_MOT) { 91 payload.address = msg->address;
91 case DP_AUX_NATIVE_READ: 92 payload.data = msg->buffer;
92 type = AUX_TRANSACTION_TYPE_DP; 93 payload.length = msg->size;
93 action = I2CAUX_TRANSACTION_ACTION_DP_READ; 94 payload.reply = &msg->reply;
94 95 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
95 result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, 96 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
96 msg->address, 97 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
97 &msg->reply, 98 payload.defer_delay = 0;
98 msg->buffer,
99 msg->size,
100 type,
101 action);
102 break;
103 case DP_AUX_NATIVE_WRITE:
104 type = AUX_TRANSACTION_TYPE_DP;
105 action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
106
107 dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
108 msg->address,
109 &msg->reply,
110 msg->buffer,
111 msg->size,
112 type,
113 action);
114 result = msg->size;
115 break;
116 case DP_AUX_I2C_READ:
117 type = AUX_TRANSACTION_TYPE_I2C;
118 if (msg->request & DP_AUX_I2C_MOT)
119 action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
120 else
121 action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
122
123 result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
124 msg->address,
125 &msg->reply,
126 msg->buffer,
127 msg->size,
128 type,
129 action);
130 break;
131 case DP_AUX_I2C_WRITE:
132 type = AUX_TRANSACTION_TYPE_I2C;
133 if (msg->request & DP_AUX_I2C_MOT)
134 action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
135 else
136 action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
137
138 dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
139 msg->address,
140 &msg->reply,
141 msg->buffer,
142 msg->size,
143 type,
144 action);
145 result = msg->size;
146 break;
147 default:
148 return -EINVAL;
149 }
150 99
151#ifdef TRACE_DPCD 100 result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload);
152 log_dpcd(msg->request, 101
153 msg->address, 102 if (payload.write)
154 msg->buffer, 103 result = msg->size;
155 msg->size,
156 r == DDC_RESULT_SUCESSFULL);
157#endif
158 104
159 if (result < 0) /* DC doesn't know about kernel error codes */ 105 if (result < 0) /* DC doesn't know about kernel error codes */
160 result = -EIO; 106 result = -EIO;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 9d2d6986b983..e8e9eebbae3f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -559,6 +559,58 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
559 pp_funcs->notify_smu_enable_pwe(pp_handle); 559 pp_funcs->notify_smu_enable_pwe(pp_handle);
560} 560}
561 561
562void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
563{
564 const struct dc_context *ctx = pp->dm;
565 struct amdgpu_device *adev = ctx->driver_context;
566 void *pp_handle = adev->powerplay.pp_handle;
567 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
568
569 if (!pp_funcs || !pp_funcs->set_active_display_count)
570 return;
571
572 pp_funcs->set_active_display_count(pp_handle, count);
573}
574
575void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
576{
577 const struct dc_context *ctx = pp->dm;
578 struct amdgpu_device *adev = ctx->driver_context;
579 void *pp_handle = adev->powerplay.pp_handle;
580 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
581
582 if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
583 return;
584
585 pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
586}
587
588void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
589{
590 const struct dc_context *ctx = pp->dm;
591 struct amdgpu_device *adev = ctx->driver_context;
592 void *pp_handle = adev->powerplay.pp_handle;
593 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
594
595 if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
596 return;
597
598 pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
599}
600
601void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
602{
603 const struct dc_context *ctx = pp->dm;
604 struct amdgpu_device *adev = ctx->driver_context;
605 void *pp_handle = adev->powerplay.pp_handle;
606 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
607
608 if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
609 return;
610
611 pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
612}
613
562void dm_pp_get_funcs_rv( 614void dm_pp_get_funcs_rv(
563 struct dc_context *ctx, 615 struct dc_context *ctx,
564 struct pp_smu_funcs_rv *funcs) 616 struct pp_smu_funcs_rv *funcs)
@@ -567,4 +619,9 @@ void dm_pp_get_funcs_rv(
567 funcs->set_display_requirement = pp_rv_set_display_requirement; 619 funcs->set_display_requirement = pp_rv_set_display_requirement;
568 funcs->set_wm_ranges = pp_rv_set_wm_ranges; 620 funcs->set_wm_ranges = pp_rv_set_wm_ranges;
569 funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; 621 funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
622 funcs->set_display_count = pp_rv_set_active_display_count;
623 funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
624 funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
625 funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
570} 626}
627
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index aed538a4d1ba..b8ddb4acccdb 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -23,7 +23,7 @@
23# Makefile for Display Core (dc) component. 23# Makefile for Display Core (dc) component.
24# 24#
25 25
26DC_LIBS = basics bios calcs dce gpio i2caux irq virtual 26DC_LIBS = basics bios calcs dce gpio irq virtual
27 27
28ifdef CONFIG_DRM_AMD_DC_DCN1_0 28ifdef CONFIG_DRM_AMD_DC_DCN1_0
29DC_LIBS += dcn10 dml 29DC_LIBS += dcn10 dml
@@ -41,7 +41,8 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
41include $(AMD_DC) 41include $(AMD_DC)
42 42
43DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 43DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
44dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o 44dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
45dc_vm_helper.o
45 46
46AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) 47AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
47 48
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c2ab026aee91..a4c97d32e751 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -835,18 +835,6 @@ static enum bp_result bios_parser_enable_crtc(
835 return bp->cmd_tbl.enable_crtc(bp, id, enable); 835 return bp->cmd_tbl.enable_crtc(bp, id, enable);
836} 836}
837 837
838static enum bp_result bios_parser_crtc_source_select(
839 struct dc_bios *dcb,
840 struct bp_crtc_source_select *bp_params)
841{
842 struct bios_parser *bp = BP_FROM_DCB(dcb);
843
844 if (!bp->cmd_tbl.select_crtc_source)
845 return BP_RESULT_FAILURE;
846
847 return bp->cmd_tbl.select_crtc_source(bp, bp_params);
848}
849
850static enum bp_result bios_parser_enable_disp_power_gating( 838static enum bp_result bios_parser_enable_disp_power_gating(
851 struct dc_bios *dcb, 839 struct dc_bios *dcb,
852 enum controller_id controller_id, 840 enum controller_id controller_id,
@@ -2842,8 +2830,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
2842 2830
2843 .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */ 2831 .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */
2844 2832
2845 .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */
2846
2847 .program_display_engine_pll = bios_parser_program_display_engine_pll, 2833 .program_display_engine_pll = bios_parser_program_display_engine_pll,
2848 2834
2849 .enable_disp_power_gating = bios_parser_enable_disp_power_gating, 2835 .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index c513ab6f3843..a1c56f29cfeb 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1083,18 +1083,6 @@ static enum bp_result bios_parser_enable_crtc(
1083 return bp->cmd_tbl.enable_crtc(bp, id, enable); 1083 return bp->cmd_tbl.enable_crtc(bp, id, enable);
1084} 1084}
1085 1085
1086static enum bp_result bios_parser_crtc_source_select(
1087 struct dc_bios *dcb,
1088 struct bp_crtc_source_select *bp_params)
1089{
1090 struct bios_parser *bp = BP_FROM_DCB(dcb);
1091
1092 if (!bp->cmd_tbl.select_crtc_source)
1093 return BP_RESULT_FAILURE;
1094
1095 return bp->cmd_tbl.select_crtc_source(bp, bp_params);
1096}
1097
1098static enum bp_result bios_parser_enable_disp_power_gating( 1086static enum bp_result bios_parser_enable_disp_power_gating(
1099 struct dc_bios *dcb, 1087 struct dc_bios *dcb,
1100 enum controller_id controller_id, 1088 enum controller_id controller_id,
@@ -1899,8 +1887,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
1899 1887
1900 .is_accelerated_mode = bios_parser_is_accelerated_mode, 1888 .is_accelerated_mode = bios_parser_is_accelerated_mode,
1901 1889
1902 .is_active_display = bios_is_active_display,
1903
1904 .set_scratch_critical_state = bios_parser_set_scratch_critical_state, 1890 .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
1905 1891
1906 1892
@@ -1917,8 +1903,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
1917 1903
1918 .program_crtc_timing = bios_parser_program_crtc_timing, 1904 .program_crtc_timing = bios_parser_program_crtc_timing,
1919 1905
1920 .crtc_source_select = bios_parser_crtc_source_select,
1921
1922 .enable_disp_power_gating = bios_parser_enable_disp_power_gating, 1906 .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
1923 1907
1924 .bios_parser_destroy = firmware_parser_destroy, 1908 .bios_parser_destroy = firmware_parser_destroy,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index fdda8aa8e303..fce46ab54c54 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -83,101 +83,7 @@ uint32_t bios_get_vga_enabled_displays(
83{ 83{
84 uint32_t active_disp = 1; 84 uint32_t active_disp = 1;
85 85
86 if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ 86 active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
87 active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
88 return active_disp; 87 return active_disp;
89} 88}
90 89
91bool bios_is_active_display(
92 struct dc_bios *bios,
93 enum signal_type signal,
94 const struct connector_device_tag_info *device_tag)
95{
96 uint32_t active = 0;
97 uint32_t connected = 0;
98 uint32_t bios_scratch_0 = 0;
99 uint32_t bios_scratch_3 = 0;
100
101 switch (signal) {
102 case SIGNAL_TYPE_DVI_SINGLE_LINK:
103 case SIGNAL_TYPE_DVI_DUAL_LINK:
104 case SIGNAL_TYPE_HDMI_TYPE_A:
105 case SIGNAL_TYPE_DISPLAY_PORT:
106 case SIGNAL_TYPE_DISPLAY_PORT_MST:
107 {
108 if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
109 switch (device_tag->dev_id.enum_id) {
110 case 1:
111 {
112 active = ATOM_S3_DFP1_ACTIVE;
113 connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT
114 }
115 break;
116
117 case 2:
118 {
119 active = ATOM_S3_DFP2_ACTIVE;
120 connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
121 }
122 break;
123
124 case 3:
125 {
126 active = ATOM_S3_DFP3_ACTIVE;
127 connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
128 }
129 break;
130
131 case 4:
132 {
133 active = ATOM_S3_DFP4_ACTIVE;
134 connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT
135 }
136 break;
137
138 case 5:
139 {
140 active = ATOM_S3_DFP5_ACTIVE;
141 connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
142 }
143 break;
144
145 case 6:
146 {
147 active = ATOM_S3_DFP6_ACTIVE;
148 connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
149 }
150 break;
151
152 default:
153 break;
154 }
155 }
156 }
157 break;
158
159 case SIGNAL_TYPE_LVDS:
160 case SIGNAL_TYPE_EDP:
161 {
162 active = ATOM_S3_LCD1_ACTIVE;
163 connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT
164 }
165 break;
166
167 default:
168 break;
169 }
170
171
172 if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
173 bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
174 if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
175 bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
176
177 bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
178 if ((active & bios_scratch_3) && (connected & bios_scratch_0))
179 return true;
180
181 return false;
182}
183
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index f33cac2147e3..75a29e68fb27 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -35,10 +35,6 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
35void bios_set_scratch_acc_mode_change(struct dc_bios *bios); 35void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
36void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); 36void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
37uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); 37uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
38bool bios_is_active_display(
39 struct dc_bios *bios,
40 enum signal_type signal,
41 const struct connector_device_tag_info *device_tag);
42 38
43#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) 39#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
44 40
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bd7cd97e00d..5815983caaf8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -55,7 +55,6 @@ static void init_adjust_display_pll(struct bios_parser *bp);
55static void init_dac_encoder_control(struct bios_parser *bp); 55static void init_dac_encoder_control(struct bios_parser *bp);
56static void init_dac_output_control(struct bios_parser *bp); 56static void init_dac_output_control(struct bios_parser *bp);
57static void init_set_crtc_timing(struct bios_parser *bp); 57static void init_set_crtc_timing(struct bios_parser *bp);
58static void init_select_crtc_source(struct bios_parser *bp);
59static void init_enable_crtc(struct bios_parser *bp); 58static void init_enable_crtc(struct bios_parser *bp);
60static void init_enable_crtc_mem_req(struct bios_parser *bp); 59static void init_enable_crtc_mem_req(struct bios_parser *bp);
61static void init_external_encoder_control(struct bios_parser *bp); 60static void init_external_encoder_control(struct bios_parser *bp);
@@ -73,7 +72,6 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
73 init_dac_encoder_control(bp); 72 init_dac_encoder_control(bp);
74 init_dac_output_control(bp); 73 init_dac_output_control(bp);
75 init_set_crtc_timing(bp); 74 init_set_crtc_timing(bp);
76 init_select_crtc_source(bp);
77 init_enable_crtc(bp); 75 init_enable_crtc(bp);
78 init_enable_crtc_mem_req(bp); 76 init_enable_crtc_mem_req(bp);
79 init_program_clock(bp); 77 init_program_clock(bp);
@@ -964,9 +962,9 @@ static enum bp_result set_pixel_clock_v3(
964 allocation.sPCLKInput.ucPostDiv = 962 allocation.sPCLKInput.ucPostDiv =
965 (uint8_t)bp_params->pixel_clock_post_divider; 963 (uint8_t)bp_params->pixel_clock_post_divider;
966 964
967 /* We need to convert from KHz units into 10KHz units */ 965 /* We need to convert from 100Hz units into 10KHz units */
968 allocation.sPCLKInput.usPixelClock = 966 allocation.sPCLKInput.usPixelClock =
969 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10)); 967 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100));
970 968
971 params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput; 969 params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
972 params->ucTransmitterId = 970 params->ucTransmitterId =
@@ -1042,9 +1040,9 @@ static enum bp_result set_pixel_clock_v5(
1042 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( 1040 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
1043 bp_params->signal_type, false); 1041 bp_params->signal_type, false);
1044 1042
1045 /* We need to convert from KHz units into 10KHz units */ 1043 /* We need to convert from 100Hz units into 10KHz units */
1046 clk.sPCLKInput.usPixelClock = 1044 clk.sPCLKInput.usPixelClock =
1047 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10)); 1045 cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100));
1048 1046
1049 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) 1047 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
1050 clk.sPCLKInput.ucMiscInfo |= 1048 clk.sPCLKInput.ucMiscInfo |=
@@ -1118,9 +1116,9 @@ static enum bp_result set_pixel_clock_v6(
1118 (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom( 1116 (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
1119 bp_params->signal_type, false); 1117 bp_params->signal_type, false);
1120 1118
1121 /* We need to convert from KHz units into 10KHz units */ 1119 /* We need to convert from 100 Hz units into 10KHz units */
1122 clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock = 1120 clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
1123 cpu_to_le32(bp_params->target_pixel_clock / 10); 1121 cpu_to_le32(bp_params->target_pixel_clock_100hz / 100);
1124 1122
1125 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) { 1123 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
1126 clk.sPCLKInput.ucMiscInfo |= 1124 clk.sPCLKInput.ucMiscInfo |=
@@ -1182,8 +1180,7 @@ static enum bp_result set_pixel_clock_v7(
1182 clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id)); 1180 clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
1183 clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false); 1181 clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
1184 1182
1185 /* We need to convert from KHz units into 10KHz units */ 1183 clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock_100hz);
1186 clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
1187 1184
1188 clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth); 1185 clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
1189 1186
@@ -1899,120 +1896,6 @@ static enum bp_result set_crtc_using_dtd_timing_v3(
1899/******************************************************************************* 1896/*******************************************************************************
1900 ******************************************************************************** 1897 ********************************************************************************
1901 ** 1898 **
1902 ** SELECT CRTC SOURCE
1903 **
1904 ********************************************************************************
1905 *******************************************************************************/
1906
1907static enum bp_result select_crtc_source_v2(
1908 struct bios_parser *bp,
1909 struct bp_crtc_source_select *bp_params);
1910static enum bp_result select_crtc_source_v3(
1911 struct bios_parser *bp,
1912 struct bp_crtc_source_select *bp_params);
1913
1914static void init_select_crtc_source(struct bios_parser *bp)
1915{
1916 switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
1917 case 2:
1918 bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
1919 break;
1920 case 3:
1921 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
1922 break;
1923 default:
1924 dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
1925 BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
1926 bp->cmd_tbl.select_crtc_source = NULL;
1927 break;
1928 }
1929}
1930
1931static enum bp_result select_crtc_source_v2(
1932 struct bios_parser *bp,
1933 struct bp_crtc_source_select *bp_params)
1934{
1935 enum bp_result result = BP_RESULT_FAILURE;
1936 SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
1937 uint8_t atom_controller_id;
1938 uint32_t atom_engine_id;
1939 enum signal_type s = bp_params->signal;
1940
1941 memset(&params, 0, sizeof(params));
1942
1943 /* set controller id */
1944 if (bp->cmd_helper->controller_id_to_atom(
1945 bp_params->controller_id, &atom_controller_id))
1946 params.ucCRTC = atom_controller_id;
1947 else
1948 return BP_RESULT_FAILURE;
1949
1950 /* set encoder id */
1951 if (bp->cmd_helper->engine_bp_to_atom(
1952 bp_params->engine_id, &atom_engine_id))
1953 params.ucEncoderID = (uint8_t)atom_engine_id;
1954 else
1955 return BP_RESULT_FAILURE;
1956
1957 if (SIGNAL_TYPE_EDP == s ||
1958 (SIGNAL_TYPE_DISPLAY_PORT == s &&
1959 SIGNAL_TYPE_LVDS == bp_params->sink_signal))
1960 s = SIGNAL_TYPE_LVDS;
1961
1962 params.ucEncodeMode =
1963 (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
1964 s, bp_params->enable_dp_audio);
1965
1966 if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
1967 result = BP_RESULT_OK;
1968
1969 return result;
1970}
1971
1972static enum bp_result select_crtc_source_v3(
1973 struct bios_parser *bp,
1974 struct bp_crtc_source_select *bp_params)
1975{
1976 bool result = BP_RESULT_FAILURE;
1977 SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
1978 uint8_t atom_controller_id;
1979 uint32_t atom_engine_id;
1980 enum signal_type s = bp_params->signal;
1981
1982 memset(&params, 0, sizeof(params));
1983
1984 if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
1985 &atom_controller_id))
1986 params.ucCRTC = atom_controller_id;
1987 else
1988 return result;
1989
1990 if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
1991 &atom_engine_id))
1992 params.ucEncoderID = (uint8_t)atom_engine_id;
1993 else
1994 return result;
1995
1996 if (SIGNAL_TYPE_EDP == s ||
1997 (SIGNAL_TYPE_DISPLAY_PORT == s &&
1998 SIGNAL_TYPE_LVDS == bp_params->sink_signal))
1999 s = SIGNAL_TYPE_LVDS;
2000
2001 params.ucEncodeMode =
2002 bp->cmd_helper->encoder_mode_bp_to_atom(
2003 s, bp_params->enable_dp_audio);
2004 /* Needed for VBIOS Random Spatial Dithering feature */
2005 params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
2006
2007 if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
2008 result = BP_RESULT_OK;
2009
2010 return result;
2011}
2012
2013/*******************************************************************************
2014 ********************************************************************************
2015 **
2016 ** ENABLE CRTC 1899 ** ENABLE CRTC
2017 ** 1900 **
2018 ******************************************************************************** 1901 ********************************************************************************
@@ -2164,7 +2047,7 @@ static enum bp_result program_clock_v5(
2164 /* We need to convert from KHz units into 10KHz units */ 2047 /* We need to convert from KHz units into 10KHz units */
2165 params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id; 2048 params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
2166 params.sPCLKInput.usPixelClock = 2049 params.sPCLKInput.usPixelClock =
2167 cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10)); 2050 cpu_to_le16((uint16_t) (bp_params->target_pixel_clock_100hz / 100));
2168 params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID; 2051 params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
2169 2052
2170 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) 2053 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
@@ -2196,7 +2079,7 @@ static enum bp_result program_clock_v6(
2196 /* We need to convert from KHz units into 10KHz units */ 2079 /* We need to convert from KHz units into 10KHz units */
2197 params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id; 2080 params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
2198 params.sPCLKInput.ulDispEngClkFreq = 2081 params.sPCLKInput.ulDispEngClkFreq =
2199 cpu_to_le32(bp_params->target_pixel_clock / 10); 2082 cpu_to_le32(bp_params->target_pixel_clock_100hz / 100);
2200 2083
2201 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) 2084 if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
2202 params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; 2085 params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index 94f3d43a7471..ad533775e724 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -71,9 +71,6 @@ struct cmd_tbl {
71 enum bp_result (*set_crtc_timing)( 71 enum bp_result (*set_crtc_timing)(
72 struct bios_parser *bp, 72 struct bios_parser *bp,
73 struct bp_hw_crtc_timing_parameters *bp_params); 73 struct bp_hw_crtc_timing_parameters *bp_params);
74 enum bp_result (*select_crtc_source)(
75 struct bios_parser *bp,
76 struct bp_crtc_source_select *bp_params);
77 enum bp_result (*enable_crtc)( 74 enum bp_result (*enable_crtc)(
78 struct bios_parser *bp, 75 struct bios_parser *bp,
79 enum controller_id controller_id, 76 enum controller_id controller_id,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2b5dc499a35e..bb2e8105e6ab 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -301,17 +301,17 @@ static enum bp_result set_pixel_clock_v7(
301 cmd_helper->encoder_mode_bp_to_atom( 301 cmd_helper->encoder_mode_bp_to_atom(
302 bp_params->signal_type, false); 302 bp_params->signal_type, false);
303 303
304 /* We need to convert from KHz units into 10KHz units */ 304 clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock_100hz);
305 clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock *
306 10);
307 305
308 clk.deep_color_ratio = 306 clk.deep_color_ratio =
309 (uint8_t) bp->cmd_helper-> 307 (uint8_t) bp->cmd_helper->
310 transmitter_color_depth_to_atom( 308 transmitter_color_depth_to_atom(
311 bp_params->color_depth); 309 bp_params->color_depth);
312 DC_LOG_BIOS("%s:program display clock = %d"\ 310
313 "colorDepth = %d\n", __func__,\ 311 DC_LOG_BIOS("%s:program display clock = %d, tg = %d, pll = %d, "\
314 bp_params->target_pixel_clock, bp_params->color_depth); 312 "colorDepth = %d\n", __func__,
313 bp_params->target_pixel_clock_100hz, (int)controller_id,
314 pll_id, bp_params->color_depth);
315 315
316 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) 316 if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
317 clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL; 317 clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
@@ -463,75 +463,6 @@ static enum bp_result set_crtc_using_dtd_timing_v3(
463/****************************************************************************** 463/******************************************************************************
464 ****************************************************************************** 464 ******************************************************************************
465 ** 465 **
466 ** SELECT CRTC SOURCE
467 **
468 ******************************************************************************
469 *****************************************************************************/
470
471
472static enum bp_result select_crtc_source_v3(
473 struct bios_parser *bp,
474 struct bp_crtc_source_select *bp_params);
475
476static void init_select_crtc_source(struct bios_parser *bp)
477{
478 switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) {
479 case 3:
480 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
481 break;
482 default:
483 dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
484 BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
485 bp->cmd_tbl.select_crtc_source = NULL;
486 break;
487 }
488}
489
490
491static enum bp_result select_crtc_source_v3(
492 struct bios_parser *bp,
493 struct bp_crtc_source_select *bp_params)
494{
495 bool result = BP_RESULT_FAILURE;
496 struct select_crtc_source_parameters_v2_3 params;
497 uint8_t atom_controller_id;
498 uint32_t atom_engine_id;
499 enum signal_type s = bp_params->signal;
500
501 memset(&params, 0, sizeof(params));
502
503 if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
504 &atom_controller_id))
505 params.crtc_id = atom_controller_id;
506 else
507 return result;
508
509 if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
510 &atom_engine_id))
511 params.encoder_id = (uint8_t)atom_engine_id;
512 else
513 return result;
514
515 if (s == SIGNAL_TYPE_EDP ||
516 (s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal ==
517 SIGNAL_TYPE_LVDS))
518 s = SIGNAL_TYPE_LVDS;
519
520 params.encode_mode =
521 bp->cmd_helper->encoder_mode_bp_to_atom(
522 s, bp_params->enable_dp_audio);
523 /* Needed for VBIOS Random Spatial Dithering feature */
524 params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth);
525
526 if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params))
527 result = BP_RESULT_OK;
528
529 return result;
530}
531
532/******************************************************************************
533 ******************************************************************************
534 **
535 ** ENABLE CRTC 466 ** ENABLE CRTC
536 ** 467 **
537 ****************************************************************************** 468 ******************************************************************************
@@ -808,7 +739,6 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
808 739
809 init_set_crtc_timing(bp); 740 init_set_crtc_timing(bp);
810 741
811 init_select_crtc_source(bp);
812 init_enable_crtc(bp); 742 init_enable_crtc(bp);
813 743
814 init_external_encoder_control(bp); 744 init_external_encoder_control(bp);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index ec1c0c9f3f1d..7a2af24dfe60 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -71,9 +71,6 @@ struct cmd_tbl {
71 enum bp_result (*set_crtc_timing)( 71 enum bp_result (*set_crtc_timing)(
72 struct bios_parser *bp, 72 struct bios_parser *bp,
73 struct bp_hw_crtc_timing_parameters *bp_params); 73 struct bp_hw_crtc_timing_parameters *bp_params);
74 enum bp_result (*select_crtc_source)(
75 struct bios_parser *bp,
76 struct bp_crtc_source_select *bp_params);
77 enum bp_result (*enable_crtc)( 74 enum bp_result (*enable_crtc)(
78 struct bios_parser *bp, 75 struct bios_parser *bp,
79 enum controller_id controller_id, 76 enum controller_id controller_id,
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332ee3e7e..dc85a3c088af 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
30 cc_stack_align := -mstack-alignment=16 30 cc_stack_align := -mstack-alignment=16
31endif 31endif
32 32
33calcs_ccflags := -mhard-float -msse $(cc_stack_align) 33calcs_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
34 34
35CFLAGS_dcn_calcs.o := $(calcs_ccflags) 35CFLAGS_dcn_calcs.o := $(calcs_ccflags)
36CFLAGS_dcn_calc_auto.o := $(calcs_ccflags) 36CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 9ebe30ba4dab..f3aa7b53d2aa 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2792,7 +2792,7 @@ static void populate_initial_data(
2792 data->lpt_en[num_displays + 4] = false; 2792 data->lpt_en[num_displays + 4] = false;
2793 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); 2793 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
2794 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); 2794 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
2795 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000); 2795 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000);
2796 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); 2796 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
2797 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; 2797 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
2798 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); 2798 data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
@@ -2881,7 +2881,7 @@ static void populate_initial_data(
2881 2881
2882 /* Pipes without underlay after */ 2882 /* Pipes without underlay after */
2883 for (i = 0; i < pipe_count; i++) { 2883 for (i = 0; i < pipe_count; i++) {
2884 unsigned int pixel_clock_khz; 2884 unsigned int pixel_clock_100hz;
2885 if (!pipe[i].stream || pipe[i].bottom_pipe) 2885 if (!pipe[i].stream || pipe[i].bottom_pipe)
2886 continue; 2886 continue;
2887 2887
@@ -2890,10 +2890,10 @@ static void populate_initial_data(
2890 data->lpt_en[num_displays + 4] = false; 2890 data->lpt_en[num_displays + 4] = false;
2891 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); 2891 data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
2892 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); 2892 data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
2893 pixel_clock_khz = pipe[i].stream->timing.pix_clk_khz; 2893 pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz;
2894 if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 2894 if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2895 pixel_clock_khz *= 2; 2895 pixel_clock_100hz *= 2;
2896 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_khz, 1000); 2896 data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000);
2897 if (pipe[i].plane_state) { 2897 if (pipe[i].plane_state) {
2898 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); 2898 data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
2899 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; 2899 data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index d0fc54f8fb1c..7d102ac0d61b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -797,9 +797,40 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
797 else { 797 else {
798 v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0; 798 v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
799 } 799 }
800 v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); 800
801 v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; 801 do {
802 v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); 802 v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
803 v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
804 v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
805
806 if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
807 v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3(
808 v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k],
809 v->extra_latency,
810 v->htotal[k] / v->pixel_clock[k] / 4.0);
811 } else {
812 v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
813 }
814
815 if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
816 v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3((
817 v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bandwidth[k],
818 v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
819 v->extra_latency);
820 } else {
821 v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2(
822 v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
823 v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
824 }
825
826 v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
827 v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
828 v->maximum_vstartup = v->maximum_vstartup - 1;
829
830 if (v->lines_for_meta_pte_without_immediate_flip[k] < 8.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
831 break;
832
833 } while(1);
803 } 834 }
804 v->bw_available_for_immediate_flip = v->return_bw_per_state[i]; 835 v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
805 for (k = 0; k <= v->number_of_active_planes - 1; k++) { 836 for (k = 0; k <= v->number_of_active_planes - 1; k++) {
@@ -814,24 +845,18 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
814 for (k = 0; k <= v->number_of_active_planes - 1; k++) { 845 for (k = 0; k <= v->number_of_active_planes - 1; k++) {
815 if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { 846 if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
816 v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); 847 v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
817 v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
818 } 848 }
819 else { 849 else {
820 v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; 850 v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
821 v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
822 } 851 }
823 if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { 852 if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
824 v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency); 853 v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
825 v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency);
826 } 854 }
827 else { 855 else {
828 v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip); 856 v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
829 v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip);
830 } 857 }
831 v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; 858 v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
832 v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
833 v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; 859 v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
834 v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
835 v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k]; 860 v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
836 v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k]; 861 v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
837 if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) { 862 if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 43e4a2be0fa6..12d1842079ae 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -290,41 +290,34 @@ static void pipe_ctx_to_e2e_pipe_params (
290 switch (pipe->plane_state->tiling_info.gfx9.swizzle) { 290 switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
291 /* for 4/8/16 high tiles */ 291 /* for 4/8/16 high tiles */
292 case DC_SW_LINEAR: 292 case DC_SW_LINEAR:
293 input->src.is_display_sw = 1;
294 input->src.macro_tile_size = dm_4k_tile; 293 input->src.macro_tile_size = dm_4k_tile;
295 break; 294 break;
296 case DC_SW_4KB_S: 295 case DC_SW_4KB_S:
297 case DC_SW_4KB_S_X: 296 case DC_SW_4KB_S_X:
298 input->src.is_display_sw = 0;
299 input->src.macro_tile_size = dm_4k_tile; 297 input->src.macro_tile_size = dm_4k_tile;
300 break; 298 break;
301 case DC_SW_64KB_S: 299 case DC_SW_64KB_S:
302 case DC_SW_64KB_S_X: 300 case DC_SW_64KB_S_X:
303 case DC_SW_64KB_S_T: 301 case DC_SW_64KB_S_T:
304 input->src.is_display_sw = 0;
305 input->src.macro_tile_size = dm_64k_tile; 302 input->src.macro_tile_size = dm_64k_tile;
306 break; 303 break;
307 case DC_SW_VAR_S: 304 case DC_SW_VAR_S:
308 case DC_SW_VAR_S_X: 305 case DC_SW_VAR_S_X:
309 input->src.is_display_sw = 0;
310 input->src.macro_tile_size = dm_256k_tile; 306 input->src.macro_tile_size = dm_256k_tile;
311 break; 307 break;
312 308
313 /* For 64bpp 2 high tiles */ 309 /* For 64bpp 2 high tiles */
314 case DC_SW_4KB_D: 310 case DC_SW_4KB_D:
315 case DC_SW_4KB_D_X: 311 case DC_SW_4KB_D_X:
316 input->src.is_display_sw = 1;
317 input->src.macro_tile_size = dm_4k_tile; 312 input->src.macro_tile_size = dm_4k_tile;
318 break; 313 break;
319 case DC_SW_64KB_D: 314 case DC_SW_64KB_D:
320 case DC_SW_64KB_D_X: 315 case DC_SW_64KB_D_X:
321 case DC_SW_64KB_D_T: 316 case DC_SW_64KB_D_T:
322 input->src.is_display_sw = 1;
323 input->src.macro_tile_size = dm_64k_tile; 317 input->src.macro_tile_size = dm_64k_tile;
324 break; 318 break;
325 case DC_SW_VAR_D: 319 case DC_SW_VAR_D:
326 case DC_SW_VAR_D_X: 320 case DC_SW_VAR_D_X:
327 input->src.is_display_sw = 1;
328 input->src.macro_tile_size = dm_256k_tile; 321 input->src.macro_tile_size = dm_256k_tile;
329 break; 322 break;
330 323
@@ -423,7 +416,7 @@ static void pipe_ctx_to_e2e_pipe_params (
423 - pipe->stream->timing.v_addressable 416 - pipe->stream->timing.v_addressable
424 - pipe->stream->timing.v_border_bottom 417 - pipe->stream->timing.v_border_bottom
425 - pipe->stream->timing.v_border_top; 418 - pipe->stream->timing.v_border_top;
426 input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0; 419 input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0;
427 input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start; 420 input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
428 input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; 421 input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
429 input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; 422 input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
@@ -670,9 +663,9 @@ static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
670} 663}
671 664
672static void hack_force_pipe_split(struct dcn_bw_internal_vars *v, 665static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
673 unsigned int pixel_rate_khz) 666 unsigned int pixel_rate_100hz)
674{ 667{
675 float pixel_rate_mhz = pixel_rate_khz / 1000; 668 float pixel_rate_mhz = pixel_rate_100hz / 10000;
676 669
677 /* 670 /*
678 * force enabling pipe split by lower dpp clock for DPM0 to just 671 * force enabling pipe split by lower dpp clock for DPM0 to just
@@ -695,7 +688,7 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
695 688
696 if (context->stream_count == 1 && 689 if (context->stream_count == 1 &&
697 dbg->force_single_disp_pipe_split) 690 dbg->force_single_disp_pipe_split)
698 hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_khz); 691 hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
699} 692}
700 693
701bool dcn_validate_bandwidth( 694bool dcn_validate_bandwidth(
@@ -852,7 +845,7 @@ bool dcn_validate_bandwidth(
852 v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total 845 v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
853 - v->vactive[input_idx] 846 - v->vactive[input_idx]
854 - pipe->stream->timing.v_front_porch; 847 - pipe->stream->timing.v_front_porch;
855 v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz/1000.0; 848 v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0;
856 if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 849 if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
857 v->pixel_clock[input_idx] *= 2; 850 v->pixel_clock[input_idx] *= 2;
858 if (!pipe->plane_state) { 851 if (!pipe->plane_state) {
@@ -961,7 +954,7 @@ bool dcn_validate_bandwidth(
961 v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/ 954 v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
962 v->output_format[input_idx] = pipe->stream->timing.pixel_encoding == 955 v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
963 PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444; 956 PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
964 v->output[input_idx] = pipe->stream->sink->sink_signal == 957 v->output[input_idx] = pipe->stream->signal ==
965 SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp; 958 SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
966 v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc; 959 v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
967 if (v->output[input_idx] == dcn_bw_hdmi) { 960 if (v->output[input_idx] == dcn_bw_hdmi) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5fd52094d459..1dabafc12cfe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -384,7 +384,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
384 enum dc_dither_option option) 384 enum dc_dither_option option)
385{ 385{
386 struct bit_depth_reduction_params params; 386 struct bit_depth_reduction_params params;
387 struct dc_link *link = stream->sink->link; 387 struct dc_link *link = stream->link;
388 struct pipe_ctx *pipes = NULL; 388 struct pipe_ctx *pipes = NULL;
389 int i; 389 int i;
390 390
@@ -451,7 +451,7 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
451 pipes, 451 pipes,
452 stream->output_color_space, 452 stream->output_color_space,
453 stream->csc_color_matrix.matrix, 453 stream->csc_color_matrix.matrix,
454 pipes->plane_res.hubp->opp_id); 454 pipes->plane_res.hubp ? pipes->plane_res.hubp->opp_id : 0);
455 ret = true; 455 ret = true;
456 } 456 }
457 } 457 }
@@ -526,9 +526,8 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
526 526
527 for (i = 0; i < MAX_PIPES; i++) { 527 for (i = 0; i < MAX_PIPES; i++) {
528 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 528 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
529 if (pipe->stream && pipe->stream->sink 529 if (pipe->stream && pipe->stream->link) {
530 && pipe->stream->sink->link) { 530 if (pipe->stream->link == link)
531 if (pipe->stream->sink->link == link)
532 break; 531 break;
533 } 532 }
534 } 533 }
@@ -586,9 +585,6 @@ static void destruct(struct dc *dc)
586 if (dc->ctx->gpio_service) 585 if (dc->ctx->gpio_service)
587 dal_gpio_service_destroy(&dc->ctx->gpio_service); 586 dal_gpio_service_destroy(&dc->ctx->gpio_service);
588 587
589 if (dc->ctx->i2caux)
590 dal_i2caux_destroy(&dc->ctx->i2caux);
591
592 if (dc->ctx->created_bios) 588 if (dc->ctx->created_bios)
593 dal_bios_parser_destroy(&dc->ctx->dc_bios); 589 dal_bios_parser_destroy(&dc->ctx->dc_bios);
594 590
@@ -670,6 +666,7 @@ static bool construct(struct dc *dc,
670 dc_ctx->dc = dc; 666 dc_ctx->dc = dc;
671 dc_ctx->asic_id = init_params->asic_id; 667 dc_ctx->asic_id = init_params->asic_id;
672 dc_ctx->dc_sink_id_count = 0; 668 dc_ctx->dc_sink_id_count = 0;
669 dc_ctx->dc_stream_id_count = 0;
673 dc->ctx = dc_ctx; 670 dc->ctx = dc_ctx;
674 671
675 dc->current_state = dc_create_state(); 672 dc->current_state = dc_create_state();
@@ -709,14 +706,6 @@ static bool construct(struct dc *dc,
709 dc_ctx->created_bios = true; 706 dc_ctx->created_bios = true;
710 } 707 }
711 708
712 /* Create I2C AUX */
713 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
714
715 if (!dc_ctx->i2caux) {
716 ASSERT_CRITICAL(false);
717 goto fail;
718 }
719
720 dc_ctx->perf_trace = dc_perf_trace_create(); 709 dc_ctx->perf_trace = dc_perf_trace_create();
721 if (!dc_ctx->perf_trace) { 710 if (!dc_ctx->perf_trace) {
722 ASSERT_CRITICAL(false); 711 ASSERT_CRITICAL(false);
@@ -840,6 +829,11 @@ alloc_fail:
840 return NULL; 829 return NULL;
841} 830}
842 831
832void dc_init_callbacks(struct dc *dc,
833 const struct dc_callback_init *init_params)
834{
835}
836
843void dc_destroy(struct dc **dc) 837void dc_destroy(struct dc **dc)
844{ 838{
845 destruct(*dc); 839 destruct(*dc);
@@ -1040,7 +1034,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
1040 1034
1041 /* Program all planes within new context*/ 1035 /* Program all planes within new context*/
1042 for (i = 0; i < context->stream_count; i++) { 1036 for (i = 0; i < context->stream_count; i++) {
1043 const struct dc_sink *sink = context->streams[i]->sink; 1037 const struct dc_link *link = context->streams[i]->link;
1038 struct dc_stream_status *status;
1044 1039
1045 if (!context->streams[i]->mode_changed) 1040 if (!context->streams[i]->mode_changed)
1046 continue; 1041 continue;
@@ -1065,12 +1060,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
1065 } 1060 }
1066 } 1061 }
1067 1062
1068 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}", 1063 status = dc_stream_get_status_from_state(context, context->streams[i]);
1064 context->streams[i]->out.otg_offset = status->primary_otg_inst;
1065
1066 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1069 context->streams[i]->timing.h_addressable, 1067 context->streams[i]->timing.h_addressable,
1070 context->streams[i]->timing.v_addressable, 1068 context->streams[i]->timing.v_addressable,
1071 context->streams[i]->timing.h_total, 1069 context->streams[i]->timing.h_total,
1072 context->streams[i]->timing.v_total, 1070 context->streams[i]->timing.v_total,
1073 context->streams[i]->timing.pix_clk_khz); 1071 context->streams[i]->timing.pix_clk_100hz / 10);
1074 } 1072 }
1075 1073
1076 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1074 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
@@ -1215,6 +1213,12 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1215 */ 1213 */
1216 update_flags->bits.bpp_change = 1; 1214 update_flags->bits.bpp_change = 1;
1217 1215
1216 if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
1217 || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
1218 || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch)
1219 update_flags->bits.plane_size_change = 1;
1220
1221
1218 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 1222 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1219 sizeof(union dc_tiling_info)) != 0) { 1223 sizeof(union dc_tiling_info)) != 0) {
1220 update_flags->bits.swizzle_change = 1; 1224 update_flags->bits.swizzle_change = 1;
@@ -1236,7 +1240,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1236 || update_flags->bits.output_tf_change) 1240 || update_flags->bits.output_tf_change)
1237 return UPDATE_TYPE_FULL; 1241 return UPDATE_TYPE_FULL;
1238 1242
1239 return UPDATE_TYPE_MED; 1243 return update_flags->raw ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST;
1240} 1244}
1241 1245
1242static enum surface_update_type get_scaling_info_update_type( 1246static enum surface_update_type get_scaling_info_update_type(
@@ -1605,7 +1609,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
1605 int surface_count, 1609 int surface_count,
1606 struct dc_stream_state *stream, 1610 struct dc_stream_state *stream,
1607 struct dc_stream_update *stream_update, 1611 struct dc_stream_update *stream_update,
1608 struct dc_plane_state **plane_states,
1609 struct dc_state *state) 1612 struct dc_state *state)
1610{ 1613{
1611 const struct dc_stream_status *stream_status; 1614 const struct dc_stream_status *stream_status;
@@ -1764,6 +1767,26 @@ void dc_resume(struct dc *dc)
1764 core_link_resume(dc->links[i]); 1767 core_link_resume(dc->links[i]);
1765} 1768}
1766 1769
1770unsigned int dc_get_current_backlight_pwm(struct dc *dc)
1771{
1772 struct abm *abm = dc->res_pool->abm;
1773
1774 if (abm)
1775 return abm->funcs->get_current_backlight(abm);
1776
1777 return 0;
1778}
1779
1780unsigned int dc_get_target_backlight_pwm(struct dc *dc)
1781{
1782 struct abm *abm = dc->res_pool->abm;
1783
1784 if (abm)
1785 return abm->funcs->get_target_backlight(abm);
1786
1787 return 0;
1788}
1789
1767bool dc_is_dmcu_initialized(struct dc *dc) 1790bool dc_is_dmcu_initialized(struct dc *dc)
1768{ 1791{
1769 struct dmcu *dmcu = dc->res_pool->dmcu; 1792 struct dmcu *dmcu = dc->res_pool->dmcu;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..3dd5f2717b53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -43,10 +43,6 @@
43#include "dpcd_defs.h" 43#include "dpcd_defs.h"
44#include "dmcu.h" 44#include "dmcu.h"
45 45
46#include "dce/dce_11_0_d.h"
47#include "dce/dce_11_0_enum.h"
48#include "dce/dce_11_0_sh_mask.h"
49
50#define DC_LOGGER_INIT(logger) 46#define DC_LOGGER_INIT(logger)
51 47
52 48
@@ -789,7 +785,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
789 return false; 785 return false;
790 } 786 }
791 787
792 sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; 788 sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
793 sink->converter_disable_audio = converter_disable_audio; 789 sink->converter_disable_audio = converter_disable_audio;
794 790
795 link->local_sink = sink; 791 link->local_sink = sink;
@@ -1372,7 +1368,7 @@ static void dpcd_configure_panel_mode(
1372static void enable_stream_features(struct pipe_ctx *pipe_ctx) 1368static void enable_stream_features(struct pipe_ctx *pipe_ctx)
1373{ 1369{
1374 struct dc_stream_state *stream = pipe_ctx->stream; 1370 struct dc_stream_state *stream = pipe_ctx->stream;
1375 struct dc_link *link = stream->sink->link; 1371 struct dc_link *link = stream->link;
1376 union down_spread_ctrl old_downspread; 1372 union down_spread_ctrl old_downspread;
1377 union down_spread_ctrl new_downspread; 1373 union down_spread_ctrl new_downspread;
1378 1374
@@ -1397,7 +1393,7 @@ static enum dc_status enable_link_dp(
1397 struct dc_stream_state *stream = pipe_ctx->stream; 1393 struct dc_stream_state *stream = pipe_ctx->stream;
1398 enum dc_status status; 1394 enum dc_status status;
1399 bool skip_video_pattern; 1395 bool skip_video_pattern;
1400 struct dc_link *link = stream->sink->link; 1396 struct dc_link *link = stream->link;
1401 struct dc_link_settings link_settings = {0}; 1397 struct dc_link_settings link_settings = {0};
1402 enum dp_panel_mode panel_mode; 1398 enum dp_panel_mode panel_mode;
1403 1399
@@ -1414,8 +1410,8 @@ static enum dc_status enable_link_dp(
1414 pipe_ctx->clock_source->id, 1410 pipe_ctx->clock_source->id,
1415 &link_settings); 1411 &link_settings);
1416 1412
1417 if (stream->sink->edid_caps.panel_patch.dppowerup_delay > 0) { 1413 if (stream->sink_patches.dppowerup_delay > 0) {
1418 int delay_dp_power_up_in_ms = stream->sink->edid_caps.panel_patch.dppowerup_delay; 1414 int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
1419 1415
1420 msleep(delay_dp_power_up_in_ms); 1416 msleep(delay_dp_power_up_in_ms);
1421 } 1417 }
@@ -1448,7 +1444,7 @@ static enum dc_status enable_link_edp(
1448{ 1444{
1449 enum dc_status status; 1445 enum dc_status status;
1450 struct dc_stream_state *stream = pipe_ctx->stream; 1446 struct dc_stream_state *stream = pipe_ctx->stream;
1451 struct dc_link *link = stream->sink->link; 1447 struct dc_link *link = stream->link;
1452 /*in case it is not on*/ 1448 /*in case it is not on*/
1453 link->dc->hwss.edp_power_control(link, true); 1449 link->dc->hwss.edp_power_control(link, true);
1454 link->dc->hwss.edp_wait_for_hpd_ready(link, true); 1450 link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -1463,7 +1459,7 @@ static enum dc_status enable_link_dp_mst(
1463 struct dc_state *state, 1459 struct dc_state *state,
1464 struct pipe_ctx *pipe_ctx) 1460 struct pipe_ctx *pipe_ctx)
1465{ 1461{
1466 struct dc_link *link = pipe_ctx->stream->sink->link; 1462 struct dc_link *link = pipe_ctx->stream->link;
1467 1463
1468 /* sink signal type after MST branch is MST. Multiple MST sinks 1464 /* sink signal type after MST branch is MST. Multiple MST sinks
1469 * share one link. Link DP PHY is enable or training only once. 1465 * share one link. Link DP PHY is enable or training only once.
@@ -1597,7 +1593,7 @@ static bool i2c_write(struct pipe_ctx *pipe_ctx,
1597 cmd.payloads = &payload; 1593 cmd.payloads = &payload;
1598 1594
1599 if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, 1595 if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
1600 pipe_ctx->stream->sink->link, &cmd)) 1596 pipe_ctx->stream->link, &cmd))
1601 return true; 1597 return true;
1602 1598
1603 return false; 1599 return false;
@@ -1651,7 +1647,7 @@ static void write_i2c_retimer_setting(
1651 else { 1647 else {
1652 i2c_success = 1648 i2c_success =
1653 dal_ddc_service_query_ddc_data( 1649 dal_ddc_service_query_ddc_data(
1654 pipe_ctx->stream->sink->link->ddc, 1650 pipe_ctx->stream->link->ddc,
1655 slave_address, &offset, 1, &value, 1); 1651 slave_address, &offset, 1, &value, 1);
1656 if (!i2c_success) 1652 if (!i2c_success)
1657 /* Write failure */ 1653 /* Write failure */
@@ -1704,7 +1700,7 @@ static void write_i2c_retimer_setting(
1704 else { 1700 else {
1705 i2c_success = 1701 i2c_success =
1706 dal_ddc_service_query_ddc_data( 1702 dal_ddc_service_query_ddc_data(
1707 pipe_ctx->stream->sink->link->ddc, 1703 pipe_ctx->stream->link->ddc,
1708 slave_address, &offset, 1, &value, 1); 1704 slave_address, &offset, 1, &value, 1);
1709 if (!i2c_success) 1705 if (!i2c_success)
1710 /* Write failure */ 1706 /* Write failure */
@@ -1929,7 +1925,7 @@ static void write_i2c_redriver_setting(
1929static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) 1925static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1930{ 1926{
1931 struct dc_stream_state *stream = pipe_ctx->stream; 1927 struct dc_stream_state *stream = pipe_ctx->stream;
1932 struct dc_link *link = stream->sink->link; 1928 struct dc_link *link = stream->link;
1933 enum dc_color_depth display_color_depth; 1929 enum dc_color_depth display_color_depth;
1934 enum engine_id eng_id; 1930 enum engine_id eng_id;
1935 struct ext_hdmi_settings settings = {0}; 1931 struct ext_hdmi_settings settings = {0};
@@ -1938,12 +1934,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1938 && (stream->timing.v_addressable == 480); 1934 && (stream->timing.v_addressable == 480);
1939 1935
1940 if (stream->phy_pix_clk == 0) 1936 if (stream->phy_pix_clk == 0)
1941 stream->phy_pix_clk = stream->timing.pix_clk_khz; 1937 stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
1942 if (stream->phy_pix_clk > 340000) 1938 if (stream->phy_pix_clk > 340000)
1943 is_over_340mhz = true; 1939 is_over_340mhz = true;
1944 1940
1945 if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { 1941 if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
1946 unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps & 1942 unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
1947 EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; 1943 EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
1948 if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { 1944 if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
1949 /* DP159, Retimer settings */ 1945 /* DP159, Retimer settings */
@@ -1964,11 +1960,11 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1964 1960
1965 if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) 1961 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
1966 dal_ddc_service_write_scdc_data( 1962 dal_ddc_service_write_scdc_data(
1967 stream->sink->link->ddc, 1963 stream->link->ddc,
1968 stream->phy_pix_clk, 1964 stream->phy_pix_clk,
1969 stream->timing.flags.LTE_340MCSC_SCRAMBLE); 1965 stream->timing.flags.LTE_340MCSC_SCRAMBLE);
1970 1966
1971 memset(&stream->sink->link->cur_link_settings, 0, 1967 memset(&stream->link->cur_link_settings, 0,
1972 sizeof(struct dc_link_settings)); 1968 sizeof(struct dc_link_settings));
1973 1969
1974 display_color_depth = stream->timing.display_color_depth; 1970 display_color_depth = stream->timing.display_color_depth;
@@ -1989,12 +1985,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1989static void enable_link_lvds(struct pipe_ctx *pipe_ctx) 1985static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
1990{ 1986{
1991 struct dc_stream_state *stream = pipe_ctx->stream; 1987 struct dc_stream_state *stream = pipe_ctx->stream;
1992 struct dc_link *link = stream->sink->link; 1988 struct dc_link *link = stream->link;
1993 1989
1994 if (stream->phy_pix_clk == 0) 1990 if (stream->phy_pix_clk == 0)
1995 stream->phy_pix_clk = stream->timing.pix_clk_khz; 1991 stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
1996 1992
1997 memset(&stream->sink->link->cur_link_settings, 0, 1993 memset(&stream->link->cur_link_settings, 0,
1998 sizeof(struct dc_link_settings)); 1994 sizeof(struct dc_link_settings));
1999 1995
2000 link->link_enc->funcs->enable_lvds_output( 1996 link->link_enc->funcs->enable_lvds_output(
@@ -2067,7 +2063,7 @@ static bool dp_active_dongle_validate_timing(
2067 const struct dc_crtc_timing *timing, 2063 const struct dc_crtc_timing *timing,
2068 const struct dpcd_caps *dpcd_caps) 2064 const struct dpcd_caps *dpcd_caps)
2069{ 2065{
2070 unsigned int required_pix_clk = timing->pix_clk_khz; 2066 unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
2071 const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; 2067 const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
2072 2068
2073 switch (dpcd_caps->dongle_type) { 2069 switch (dpcd_caps->dongle_type) {
@@ -2107,9 +2103,9 @@ static bool dp_active_dongle_validate_timing(
2107 2103
2108 /* Check Color Depth and Pixel Clock */ 2104 /* Check Color Depth and Pixel Clock */
2109 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 2105 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2110 required_pix_clk /= 2; 2106 required_pix_clk_100hz /= 2;
2111 else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 2107 else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
2112 required_pix_clk = required_pix_clk * 2 / 3; 2108 required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
2113 2109
2114 switch (timing->display_color_depth) { 2110 switch (timing->display_color_depth) {
2115 case COLOR_DEPTH_666: 2111 case COLOR_DEPTH_666:
@@ -2119,12 +2115,12 @@ static bool dp_active_dongle_validate_timing(
2119 case COLOR_DEPTH_101010: 2115 case COLOR_DEPTH_101010:
2120 if (dongle_caps->dp_hdmi_max_bpc < 10) 2116 if (dongle_caps->dp_hdmi_max_bpc < 10)
2121 return false; 2117 return false;
2122 required_pix_clk = required_pix_clk * 10 / 8; 2118 required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
2123 break; 2119 break;
2124 case COLOR_DEPTH_121212: 2120 case COLOR_DEPTH_121212:
2125 if (dongle_caps->dp_hdmi_max_bpc < 12) 2121 if (dongle_caps->dp_hdmi_max_bpc < 12)
2126 return false; 2122 return false;
2127 required_pix_clk = required_pix_clk * 12 / 8; 2123 required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
2128 break; 2124 break;
2129 2125
2130 case COLOR_DEPTH_141414: 2126 case COLOR_DEPTH_141414:
@@ -2134,7 +2130,7 @@ static bool dp_active_dongle_validate_timing(
2134 return false; 2130 return false;
2135 } 2131 }
2136 2132
2137 if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk) 2133 if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
2138 return false; 2134 return false;
2139 2135
2140 return true; 2136 return true;
@@ -2145,7 +2141,7 @@ enum dc_status dc_link_validate_mode_timing(
2145 struct dc_link *link, 2141 struct dc_link *link,
2146 const struct dc_crtc_timing *timing) 2142 const struct dc_crtc_timing *timing)
2147{ 2143{
2148 uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; 2144 uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10;
2149 struct dpcd_caps *dpcd_caps = &link->dpcd_caps; 2145 struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
2150 2146
2151 /* A hack to avoid failing any modes for EDID override feature on 2147 /* A hack to avoid failing any modes for EDID override feature on
@@ -2155,7 +2151,7 @@ enum dc_status dc_link_validate_mode_timing(
2155 return DC_OK; 2151 return DC_OK;
2156 2152
2157 /* Passive Dongle */ 2153 /* Passive Dongle */
2158 if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk) 2154 if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
2159 return DC_EXCEED_DONGLE_CAP; 2155 return DC_EXCEED_DONGLE_CAP;
2160 2156
2161 /* Active Dongle*/ 2157 /* Active Dongle*/
@@ -2190,8 +2186,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
2190 2186
2191bool dc_link_set_backlight_level(const struct dc_link *link, 2187bool dc_link_set_backlight_level(const struct dc_link *link,
2192 uint32_t backlight_pwm_u16_16, 2188 uint32_t backlight_pwm_u16_16,
2193 uint32_t frame_ramp, 2189 uint32_t frame_ramp)
2194 const struct dc_stream_state *stream)
2195{ 2190{
2196 struct dc *core_dc = link->ctx->dc; 2191 struct dc *core_dc = link->ctx->dc;
2197 struct abm *abm = core_dc->res_pool->abm; 2192 struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2201,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2206 (abm->funcs->set_backlight_level_pwm == NULL)) 2201 (abm->funcs->set_backlight_level_pwm == NULL))
2207 return false; 2202 return false;
2208 2203
2209 if (stream)
2210 ((struct dc_stream_state *)stream)->bl_pwm_level =
2211 backlight_pwm_u16_16;
2212
2213 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2204 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
2214 2205
2215 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", 2206 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2219,7 +2210,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2219 for (i = 0; i < MAX_PIPES; i++) { 2210 for (i = 0; i < MAX_PIPES; i++) {
2220 if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { 2211 if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
2221 if (core_dc->current_state->res_ctx. 2212 if (core_dc->current_state->res_ctx.
2222 pipe_ctx[i].stream->sink->link 2213 pipe_ctx[i].stream->link
2223 == link) 2214 == link)
2224 /* DMCU -1 for all controller id values, 2215 /* DMCU -1 for all controller id values,
2225 * therefore +1 here 2216 * therefore +1 here
@@ -2279,7 +2270,7 @@ void core_link_resume(struct dc_link *link)
2279static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) 2270static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
2280{ 2271{
2281 struct dc_link_settings *link_settings = 2272 struct dc_link_settings *link_settings =
2282 &stream->sink->link->cur_link_settings; 2273 &stream->link->cur_link_settings;
2283 uint32_t link_rate_in_mbps = 2274 uint32_t link_rate_in_mbps =
2284 link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ; 2275 link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
2285 struct fixed31_32 mbps = dc_fixpt_from_int( 2276 struct fixed31_32 mbps = dc_fixpt_from_int(
@@ -2310,7 +2301,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
2310 uint32_t denominator; 2301 uint32_t denominator;
2311 2302
2312 bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); 2303 bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
2313 kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3; 2304 kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
2314 2305
2315 /* 2306 /*
2316 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 2307 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -2386,7 +2377,7 @@ static void update_mst_stream_alloc_table(
2386static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) 2377static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2387{ 2378{
2388 struct dc_stream_state *stream = pipe_ctx->stream; 2379 struct dc_stream_state *stream = pipe_ctx->stream;
2389 struct dc_link *link = stream->sink->link; 2380 struct dc_link *link = stream->link;
2390 struct link_encoder *link_encoder = link->link_enc; 2381 struct link_encoder *link_encoder = link->link_enc;
2391 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 2382 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
2392 struct dp_mst_stream_allocation_table proposed_table = {0}; 2383 struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -2466,7 +2457,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
2466static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) 2457static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2467{ 2458{
2468 struct dc_stream_state *stream = pipe_ctx->stream; 2459 struct dc_stream_state *stream = pipe_ctx->stream;
2469 struct dc_link *link = stream->sink->link; 2460 struct dc_link *link = stream->link;
2470 struct link_encoder *link_encoder = link->link_enc; 2461 struct link_encoder *link_encoder = link->link_enc;
2471 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 2462 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
2472 struct dp_mst_stream_allocation_table proposed_table = {0}; 2463 struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -2551,8 +2542,8 @@ void core_link_enable_stream(
2551 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); 2542 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
2552 2543
2553 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) { 2544 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
2554 stream->sink->link->link_enc->funcs->setup( 2545 stream->link->link_enc->funcs->setup(
2555 stream->sink->link->link_enc, 2546 stream->link->link_enc,
2556 pipe_ctx->stream->signal); 2547 pipe_ctx->stream->signal);
2557 pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( 2548 pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
2558 pipe_ctx->stream_res.stream_enc, 2549 pipe_ctx->stream_res.stream_enc,
@@ -2604,7 +2595,7 @@ void core_link_enable_stream(
2604 2595
2605 if (status != DC_OK) { 2596 if (status != DC_OK) {
2606 DC_LOG_WARNING("enabling link %u failed: %d\n", 2597 DC_LOG_WARNING("enabling link %u failed: %d\n",
2607 pipe_ctx->stream->sink->link->link_index, 2598 pipe_ctx->stream->link->link_index,
2608 status); 2599 status);
2609 2600
2610 /* Abort stream enable *unless* the failure was due to 2601 /* Abort stream enable *unless* the failure was due to
@@ -2633,15 +2624,10 @@ void core_link_enable_stream(
2633 allocate_mst_payload(pipe_ctx); 2624 allocate_mst_payload(pipe_ctx);
2634 2625
2635 core_dc->hwss.unblank_stream(pipe_ctx, 2626 core_dc->hwss.unblank_stream(pipe_ctx,
2636 &pipe_ctx->stream->sink->link->cur_link_settings); 2627 &pipe_ctx->stream->link->cur_link_settings);
2637 2628
2638 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2629 if (dc_is_dp_signal(pipe_ctx->stream->signal))
2639 enable_stream_features(pipe_ctx); 2630 enable_stream_features(pipe_ctx);
2640
2641 dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
2642 pipe_ctx->stream->bl_pwm_level,
2643 0,
2644 pipe_ctx->stream);
2645 } 2631 }
2646 2632
2647} 2633}
@@ -2657,7 +2643,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2657 2643
2658 core_dc->hwss.disable_stream(pipe_ctx, option); 2644 core_dc->hwss.disable_stream(pipe_ctx, option);
2659 2645
2660 disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); 2646 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
2661} 2647}
2662 2648
2663void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) 2649void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 506a97e16956..b7ee63cd8dc7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,7 +33,7 @@
33#include "include/vector.h" 33#include "include/vector.h"
34#include "core_types.h" 34#include "core_types.h"
35#include "dc_link_ddc.h" 35#include "dc_link_ddc.h"
36#include "aux_engine.h" 36#include "dce/dce_aux.h"
37 37
38#define AUX_POWER_UP_WA_DELAY 500 38#define AUX_POWER_UP_WA_DELAY 500
39#define I2C_OVER_AUX_DEFER_WA_DELAY 70 39#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -42,7 +42,6 @@
42#define CV_SMART_DONGLE_ADDRESS 0x20 42#define CV_SMART_DONGLE_ADDRESS 0x20
43/* DVI-HDMI dongle slave address for retrieving dongle signature*/ 43/* DVI-HDMI dongle slave address for retrieving dongle signature*/
44#define DVI_HDMI_DONGLE_ADDRESS 0x68 44#define DVI_HDMI_DONGLE_ADDRESS 0x68
45static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
46struct dvi_hdmi_dongle_signature_data { 45struct dvi_hdmi_dongle_signature_data {
47 int8_t vendor[3];/* "AMD" */ 46 int8_t vendor[3];/* "AMD" */
48 uint8_t version[2]; 47 uint8_t version[2];
@@ -165,43 +164,6 @@ static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
165 164
166} 165}
167 166
168static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
169{
170 struct aux_payloads *payloads;
171
172 payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL);
173
174 if (!payloads)
175 return NULL;
176
177 if (dal_vector_construct(
178 &payloads->payloads, ctx, count, sizeof(struct aux_payload)))
179 return payloads;
180
181 kfree(payloads);
182 return NULL;
183}
184
185static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
186{
187 return (struct aux_payload *)p->payloads.container;
188}
189
190static uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
191{
192 return p->payloads.count;
193}
194
195static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
196{
197 if (!p || !*p)
198 return;
199
200 dal_vector_destruct(&(*p)->payloads);
201 kfree(*p);
202 *p = NULL;
203}
204
205#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) 167#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
206 168
207void dal_ddc_i2c_payloads_add( 169void dal_ddc_i2c_payloads_add(
@@ -225,27 +187,6 @@ void dal_ddc_i2c_payloads_add(
225 187
226} 188}
227 189
228void dal_ddc_aux_payloads_add(
229 struct aux_payloads *payloads,
230 uint32_t address,
231 uint32_t len,
232 uint8_t *data,
233 bool write)
234{
235 uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
236 uint32_t pos;
237
238 for (pos = 0; pos < len; pos += payload_size) {
239 struct aux_payload payload = {
240 .i2c_over_aux = true,
241 .write = write,
242 .address = address,
243 .length = DDC_MIN(payload_size, len - pos),
244 .data = data + pos };
245 dal_vector_append(&payloads->payloads, &payload);
246 }
247}
248
249static void construct( 190static void construct(
250 struct ddc_service *ddc_service, 191 struct ddc_service *ddc_service,
251 struct ddc_service_init_data *init_data) 192 struct ddc_service_init_data *init_data)
@@ -574,32 +515,34 @@ bool dal_ddc_service_query_ddc_data(
574 /*TODO: len of payload data for i2c and aux is uint8!!!!, 515 /*TODO: len of payload data for i2c and aux is uint8!!!!,
575 * but we want to read 256 over i2c!!!!*/ 516 * but we want to read 256 over i2c!!!!*/
576 if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { 517 if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
577 518 struct aux_payload write_payload = {
578 struct aux_payloads *payloads = 519 .i2c_over_aux = true,
579 dal_ddc_aux_payloads_create(ddc->ctx, payloads_num); 520 .write = true,
580 521 .mot = true,
581 struct aux_command command = { 522 .address = address,
582 .payloads = dal_ddc_aux_payloads_get(payloads), 523 .length = write_size,
583 .number_of_payloads = 0, 524 .data = write_buf,
525 .reply = NULL,
584 .defer_delay = get_defer_delay(ddc), 526 .defer_delay = get_defer_delay(ddc),
585 .max_defer_write_retry = 0 }; 527 };
586 528
587 dal_ddc_aux_payloads_add( 529 struct aux_payload read_payload = {
588 payloads, address, write_size, write_buf, true); 530 .i2c_over_aux = true,
589 531 .write = false,
590 dal_ddc_aux_payloads_add( 532 .mot = false,
591 payloads, address, read_size, read_buf, false); 533 .address = address,
592 534 .length = read_size,
593 command.number_of_payloads = 535 .data = read_buf,
594 dal_ddc_aux_payloads_get_count(payloads); 536 .reply = NULL,
537 .defer_delay = get_defer_delay(ddc),
538 };
595 539
596 ret = dal_i2caux_submit_aux_command( 540 ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
597 ddc->ctx->i2caux,
598 ddc->ddc_pin,
599 &command);
600 541
601 dal_ddc_aux_payloads_destroy(&payloads); 542 if (!ret)
543 return false;
602 544
545 ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
603 } else { 546 } else {
604 struct i2c_payloads *payloads = 547 struct i2c_payloads *payloads =
605 dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); 548 dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -631,56 +574,15 @@ bool dal_ddc_service_query_ddc_data(
631} 574}
632 575
633int dc_link_aux_transfer(struct ddc_service *ddc, 576int dc_link_aux_transfer(struct ddc_service *ddc,
634 unsigned int address, 577 struct aux_payload *payload)
635 uint8_t *reply,
636 void *buffer,
637 unsigned int size,
638 enum aux_transaction_type type,
639 enum i2caux_transaction_action action)
640{ 578{
641 struct ddc *ddc_pin = ddc->ddc_pin; 579 return dce_aux_transfer(ddc, payload);
642 struct aux_engine *aux_engine; 580}
643 enum aux_channel_operation_result operation_result; 581
644 struct aux_request_transaction_data aux_req; 582bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
645 struct aux_reply_transaction_data aux_rep; 583 struct aux_payload *payload)
646 uint8_t returned_bytes = 0; 584{
647 int res = -1; 585 return dce_aux_transfer_with_retries(ddc, payload);
648 uint32_t status;
649
650 memset(&aux_req, 0, sizeof(aux_req));
651 memset(&aux_rep, 0, sizeof(aux_rep));
652
653 aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
654 aux_engine->funcs->acquire(aux_engine, ddc_pin);
655
656 aux_req.type = type;
657 aux_req.action = action;
658
659 aux_req.address = address;
660 aux_req.delay = 0;
661 aux_req.length = size;
662 aux_req.data = buffer;
663
664 aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
665 operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
666
667 switch (operation_result) {
668 case AUX_CHANNEL_OPERATION_SUCCEEDED:
669 res = aux_engine->funcs->read_channel_reply(aux_engine, size,
670 buffer, reply,
671 &status);
672 break;
673 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
674 res = 0;
675 break;
676 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
677 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
678 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
679 res = -1;
680 break;
681 }
682 aux_engine->funcs->release_engine(aux_engine);
683 return res;
684} 586}
685 587
686/*test only function*/ 588/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 0caacb60b02f..92f565ca1260 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -47,7 +47,7 @@ static void wait_for_training_aux_rd_interval(
47 struct dc_link *link, 47 struct dc_link *link,
48 uint32_t default_wait_in_micro_secs) 48 uint32_t default_wait_in_micro_secs)
49{ 49{
50 union training_aux_rd_interval training_rd_interval; 50 union training_aux_rd_interval training_rd_interval = {0};
51 51
52 /* overwrite the delay if rev > 1.1*/ 52 /* overwrite the delay if rev > 1.1*/
53 if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { 53 if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
@@ -117,6 +117,13 @@ static void dpcd_set_link_settings(
117 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, 117 core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
118 &downspread.raw, sizeof(downspread)); 118 &downspread.raw, sizeof(downspread));
119 119
120 if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
121 (link->dpcd_caps.link_rate_set >= 1 &&
122 link->dpcd_caps.link_rate_set <= 8)) {
123 core_link_write_dpcd(link, DP_LINK_RATE_SET,
124 &link->dpcd_caps.link_rate_set, 1);
125 }
126
120 DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n", 127 DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
121 __func__, 128 __func__,
122 DP_LINK_BW_SET, 129 DP_LINK_BW_SET,
@@ -1542,7 +1549,7 @@ static uint32_t bandwidth_in_kbps_from_timing(
1542 1549
1543 ASSERT(bits_per_channel != 0); 1550 ASSERT(bits_per_channel != 0);
1544 1551
1545 kbps = timing->pix_clk_khz; 1552 kbps = timing->pix_clk_100hz / 10;
1546 kbps *= bits_per_channel; 1553 kbps *= bits_per_channel;
1547 1554
1548 if (timing->flags.Y_ONLY != 1) { 1555 if (timing->flags.Y_ONLY != 1) {
@@ -1584,7 +1591,7 @@ bool dp_validate_mode_timing(
1584 const struct dc_link_settings *link_setting; 1591 const struct dc_link_settings *link_setting;
1585 1592
1586 /*always DP fail safe mode*/ 1593 /*always DP fail safe mode*/
1587 if (timing->pix_clk_khz == (uint32_t) 25175 && 1594 if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 &&
1588 timing->h_addressable == (uint32_t) 640 && 1595 timing->h_addressable == (uint32_t) 640 &&
1589 timing->v_addressable == (uint32_t) 480) 1596 timing->v_addressable == (uint32_t) 480)
1590 return true; 1597 return true;
@@ -1634,7 +1641,7 @@ void decide_link_settings(struct dc_stream_state *stream,
1634 1641
1635 req_bw = bandwidth_in_kbps_from_timing(&stream->timing); 1642 req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
1636 1643
1637 link = stream->sink->link; 1644 link = stream->link;
1638 1645
1639 /* if preferred is specified through AMDDP, use it, if it's enough 1646 /* if preferred is specified through AMDDP, use it, if it's enough
1640 * to drive the mode 1647 * to drive the mode
@@ -1656,7 +1663,7 @@ void decide_link_settings(struct dc_stream_state *stream,
1656 } 1663 }
1657 1664
1658 /* EDP use the link cap setting */ 1665 /* EDP use the link cap setting */
1659 if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) { 1666 if (link->connector_signal == SIGNAL_TYPE_EDP) {
1660 *link_setting = link->verified_link_cap; 1667 *link_setting = link->verified_link_cap;
1661 return; 1668 return;
1662 } 1669 }
@@ -2002,11 +2009,7 @@ static void handle_automated_test(struct dc_link *link)
2002 dp_test_send_phy_test_pattern(link); 2009 dp_test_send_phy_test_pattern(link);
2003 test_response.bits.ACK = 1; 2010 test_response.bits.ACK = 1;
2004 } 2011 }
2005 if (!test_request.raw) 2012
2006 /* no requests, revert all test signals
2007 * TODO: revert all test signals
2008 */
2009 test_response.bits.ACK = 1;
2010 /* send request acknowledgment */ 2013 /* send request acknowledgment */
2011 if (test_response.bits.ACK) 2014 if (test_response.bits.ACK)
2012 core_link_write_dpcd( 2015 core_link_write_dpcd(
@@ -2493,13 +2496,105 @@ bool detect_dp_sink_caps(struct dc_link *link)
2493 /* TODO save sink caps in link->sink */ 2496 /* TODO save sink caps in link->sink */
2494} 2497}
2495 2498
2499enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
2500{
2501 enum dc_link_rate link_rate;
2502 // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
2503 switch (link_rate_in_khz) {
2504 case 1620000:
2505 link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane
2506 break;
2507 case 2160000:
2508 link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane
2509 break;
2510 case 2430000:
2511 link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane
2512 break;
2513 case 2700000:
2514 link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane
2515 break;
2516 case 3240000:
2517 link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane
2518 break;
2519 case 4320000:
2520 link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane
2521 break;
2522 case 5400000:
2523 link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane
2524 break;
2525 case 8100000:
2526 link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane
2527 break;
2528 default:
2529 link_rate = LINK_RATE_UNKNOWN;
2530 break;
2531 }
2532 return link_rate;
2533}
2534
2496void detect_edp_sink_caps(struct dc_link *link) 2535void detect_edp_sink_caps(struct dc_link *link)
2497{ 2536{
2498 retrieve_link_cap(link); 2537 uint8_t supported_link_rates[16] = {0};
2538 uint32_t entry;
2539 uint32_t link_rate_in_khz;
2540 enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
2541 uint8_t link_rate_set = 0;
2499 2542
2500 if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) 2543 retrieve_link_cap(link);
2501 link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
2502 2544
2545 if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
2546 // Read DPCD 00010h - 0001Fh 16 bytes at one shot
2547 core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
2548 supported_link_rates, sizeof(supported_link_rates));
2549
2550 link->dpcd_caps.link_rate_set = 0;
2551 for (entry = 0; entry < 16; entry += 2) {
2552 // DPCD register reports per-lane link rate = 16-bit link rate capability
2553 // value X 200 kHz. Need multipler to find link rate in kHz.
2554 link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
2555 supported_link_rates[entry]) * 200;
2556
2557 if (link_rate_in_khz != 0) {
2558 link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
2559 if (link->reported_link_cap.link_rate < link_rate) {
2560 link->reported_link_cap.link_rate = link_rate;
2561
2562 switch (link_rate) {
2563 case LINK_RATE_LOW:
2564 link_rate_set = 1;
2565 break;
2566 case LINK_RATE_RATE_2:
2567 link_rate_set = 2;
2568 break;
2569 case LINK_RATE_RATE_3:
2570 link_rate_set = 3;
2571 break;
2572 case LINK_RATE_HIGH:
2573 link_rate_set = 4;
2574 break;
2575 case LINK_RATE_RBR2:
2576 link_rate_set = 5;
2577 break;
2578 case LINK_RATE_RATE_6:
2579 link_rate_set = 6;
2580 break;
2581 case LINK_RATE_HIGH2:
2582 link_rate_set = 7;
2583 break;
2584 case LINK_RATE_HIGH3:
2585 link_rate_set = 8;
2586 break;
2587 default:
2588 link_rate_set = 0;
2589 break;
2590 }
2591
2592 if (link->dpcd_caps.link_rate_set < link_rate_set)
2593 link->dpcd_caps.link_rate_set = link_rate_set;
2594 }
2595 }
2596 }
2597 }
2503 link->verified_link_cap = link->reported_link_cap; 2598 link->verified_link_cap = link->reported_link_cap;
2504} 2599}
2505 2600
@@ -2621,7 +2716,7 @@ bool dc_link_dp_set_test_pattern(
2621 memset(&training_pattern, 0, sizeof(training_pattern)); 2716 memset(&training_pattern, 0, sizeof(training_pattern));
2622 2717
2623 for (i = 0; i < MAX_PIPES; i++) { 2718 for (i = 0; i < MAX_PIPES; i++) {
2624 if (pipes[i].stream->sink->link == link) { 2719 if (pipes[i].stream->link == link) {
2625 pipe_ctx = &pipes[i]; 2720 pipe_ctx = &pipes[i];
2626 break; 2721 break;
2627 } 2722 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 0065ec7d5330..16d441d3af8a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -70,13 +70,12 @@ void dp_enable_link_phy(
70 */ 70 */
71 for (i = 0; i < MAX_PIPES; i++) { 71 for (i = 0; i < MAX_PIPES; i++) {
72 if (pipes[i].stream != NULL && 72 if (pipes[i].stream != NULL &&
73 pipes[i].stream->sink != NULL && 73 pipes[i].stream->link == link) {
74 pipes[i].stream->sink->link == link) {
75 if (pipes[i].clock_source != NULL && 74 if (pipes[i].clock_source != NULL &&
76 pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { 75 pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
77 pipes[i].clock_source = dp_cs; 76 pipes[i].clock_source = dp_cs;
78 pipes[i].stream_res.pix_clk_params.requested_pix_clk = 77 pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
79 pipes[i].stream->timing.pix_clk_khz; 78 pipes[i].stream->timing.pix_clk_100hz;
80 pipes[i].clock_source->funcs->program_pix_clk( 79 pipes[i].clock_source->funcs->program_pix_clk(
81 pipes[i].clock_source, 80 pipes[i].clock_source,
82 &pipes[i].stream_res.pix_clk_params, 81 &pipes[i].stream_res.pix_clk_params,
@@ -279,10 +278,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
279 for (i = 0; i < MAX_PIPES; i++) { 278 for (i = 0; i < MAX_PIPES; i++) {
280 if (pipes[i].stream != NULL && 279 if (pipes[i].stream != NULL &&
281 !pipes[i].top_pipe && 280 !pipes[i].top_pipe &&
282 pipes[i].stream->sink != NULL && 281 pipes[i].stream->link != NULL &&
283 pipes[i].stream->sink->link != NULL && 282 pipes[i].stream_res.stream_enc != NULL) {
284 pipes[i].stream_res.stream_enc != NULL &&
285 pipes[i].stream->sink->link == link) {
286 udelay(100); 283 udelay(100);
287 284
288 pipes[i].stream_res.stream_enc->funcs->dp_blank( 285 pipes[i].stream_res.stream_enc->funcs->dp_blank(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 76137df74a53..9888bc7659f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -355,8 +355,8 @@ bool resource_are_streams_timing_synchronizable(
355 != stream2->timing.v_addressable) 355 != stream2->timing.v_addressable)
356 return false; 356 return false;
357 357
358 if (stream1->timing.pix_clk_khz 358 if (stream1->timing.pix_clk_100hz
359 != stream2->timing.pix_clk_khz) 359 != stream2->timing.pix_clk_100hz)
360 return false; 360 return false;
361 361
362 if (stream1->clamping.c_depth != stream2->clamping.c_depth) 362 if (stream1->clamping.c_depth != stream2->clamping.c_depth)
@@ -1559,7 +1559,7 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
1559{ 1559{
1560 int i; 1560 int i;
1561 int j = -1; 1561 int j = -1;
1562 struct dc_link *link = stream->sink->link; 1562 struct dc_link *link = stream->link;
1563 1563
1564 for (i = 0; i < pool->stream_enc_count; i++) { 1564 for (i = 0; i < pool->stream_enc_count; i++) {
1565 if (!res_ctx->is_stream_enc_acquired[i] && 1565 if (!res_ctx->is_stream_enc_acquired[i] &&
@@ -1748,7 +1748,7 @@ static struct dc_stream_state *find_pll_sharable_stream(
1748 if (resource_are_streams_timing_synchronizable( 1748 if (resource_are_streams_timing_synchronizable(
1749 stream_needs_pll, stream_has_pll) 1749 stream_needs_pll, stream_has_pll)
1750 && !dc_is_dp_signal(stream_has_pll->signal) 1750 && !dc_is_dp_signal(stream_has_pll->signal)
1751 && stream_has_pll->sink->link->connector_signal 1751 && stream_has_pll->link->connector_signal
1752 != SIGNAL_TYPE_VIRTUAL) 1752 != SIGNAL_TYPE_VIRTUAL)
1753 return stream_has_pll; 1753 return stream_has_pll;
1754 1754
@@ -1759,7 +1759,7 @@ static struct dc_stream_state *find_pll_sharable_stream(
1759 1759
1760static int get_norm_pix_clk(const struct dc_crtc_timing *timing) 1760static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
1761{ 1761{
1762 uint32_t pix_clk = timing->pix_clk_khz; 1762 uint32_t pix_clk = timing->pix_clk_100hz;
1763 uint32_t normalized_pix_clk = pix_clk; 1763 uint32_t normalized_pix_clk = pix_clk;
1764 1764
1765 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 1765 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
@@ -1791,10 +1791,10 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream)
1791 /* update actual pixel clock on all streams */ 1791 /* update actual pixel clock on all streams */
1792 if (dc_is_hdmi_signal(stream->signal)) 1792 if (dc_is_hdmi_signal(stream->signal))
1793 stream->phy_pix_clk = get_norm_pix_clk( 1793 stream->phy_pix_clk = get_norm_pix_clk(
1794 &stream->timing); 1794 &stream->timing) / 10;
1795 else 1795 else
1796 stream->phy_pix_clk = 1796 stream->phy_pix_clk =
1797 stream->timing.pix_clk_khz; 1797 stream->timing.pix_clk_100hz / 10;
1798 1798
1799 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 1799 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1800 stream->phy_pix_clk *= 2; 1800 stream->phy_pix_clk *= 2;
@@ -1842,7 +1842,7 @@ enum dc_status resource_map_pool_resources(
1842 &context->res_ctx, pool, stream); 1842 &context->res_ctx, pool, stream);
1843 1843
1844 if (!pipe_ctx->stream_res.stream_enc) 1844 if (!pipe_ctx->stream_res.stream_enc)
1845 return DC_NO_STREAM_ENG_RESOURCE; 1845 return DC_NO_STREAM_ENC_RESOURCE;
1846 1846
1847 update_stream_engine_usage( 1847 update_stream_engine_usage(
1848 &context->res_ctx, pool, 1848 &context->res_ctx, pool,
@@ -1850,7 +1850,7 @@ enum dc_status resource_map_pool_resources(
1850 true); 1850 true);
1851 1851
1852 /* TODO: Add check if ASIC support and EDID audio */ 1852 /* TODO: Add check if ASIC support and EDID audio */
1853 if (!stream->sink->converter_disable_audio && 1853 if (!stream->converter_disable_audio &&
1854 dc_is_audio_capable_signal(pipe_ctx->stream->signal) && 1854 dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
1855 stream->audio_info.mode_count) { 1855 stream->audio_info.mode_count) {
1856 pipe_ctx->stream_res.audio = find_first_free_audio( 1856 pipe_ctx->stream_res.audio = find_first_free_audio(
@@ -2112,7 +2112,7 @@ static void set_avi_info_frame(
2112 itc = true; 2112 itc = true;
2113 itc_value = 1; 2113 itc_value = 1;
2114 2114
2115 support = stream->sink->edid_caps.content_support; 2115 support = stream->content_support;
2116 2116
2117 if (itc) { 2117 if (itc) {
2118 if (!support.bits.valid_content_type) { 2118 if (!support.bits.valid_content_type) {
@@ -2151,8 +2151,8 @@ static void set_avi_info_frame(
2151 2151
2152 /* TODO : We should handle YCC quantization */ 2152 /* TODO : We should handle YCC quantization */
2153 /* but we do not have matrix calculation */ 2153 /* but we do not have matrix calculation */
2154 if (stream->sink->edid_caps.qs_bit == 1 && 2154 if (stream->qs_bit == 1 &&
2155 stream->sink->edid_caps.qy_bit == 1) { 2155 stream->qy_bit == 1) {
2156 if (color_space == COLOR_SPACE_SRGB || 2156 if (color_space == COLOR_SPACE_SRGB ||
2157 color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { 2157 color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
2158 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; 2158 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
@@ -2596,7 +2596,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
2596enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) 2596enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
2597{ 2597{
2598 struct dc *core_dc = dc; 2598 struct dc *core_dc = dc;
2599 struct dc_link *link = stream->sink->link; 2599 struct dc_link *link = stream->link;
2600 struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; 2600 struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
2601 enum dc_status res = DC_OK; 2601 enum dc_status res = DC_OK;
2602 2602
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 66e5c4623a49..996298c35f42 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -35,20 +35,17 @@
35/******************************************************************************* 35/*******************************************************************************
36 * Private functions 36 * Private functions
37 ******************************************************************************/ 37 ******************************************************************************/
38void update_stream_signal(struct dc_stream_state *stream) 38void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
39{ 39{
40 40 if (sink->sink_signal == SIGNAL_TYPE_NONE)
41 struct dc_sink *dc_sink = stream->sink; 41 stream->signal = stream->link->connector_signal;
42
43 if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
44 stream->signal = stream->sink->link->connector_signal;
45 else 42 else
46 stream->signal = dc_sink->sink_signal; 43 stream->signal = sink->sink_signal;
47 44
48 if (dc_is_dvi_signal(stream->signal)) { 45 if (dc_is_dvi_signal(stream->signal)) {
49 if (stream->ctx->dc->caps.dual_link_dvi && 46 if (stream->ctx->dc->caps.dual_link_dvi &&
50 stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK && 47 (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
51 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) 48 sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
52 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; 49 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
53 else 50 else
54 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 51 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -61,10 +58,15 @@ static void construct(struct dc_stream_state *stream,
61 uint32_t i = 0; 58 uint32_t i = 0;
62 59
63 stream->sink = dc_sink_data; 60 stream->sink = dc_sink_data;
64 stream->ctx = stream->sink->ctx;
65
66 dc_sink_retain(dc_sink_data); 61 dc_sink_retain(dc_sink_data);
67 62
63 stream->ctx = dc_sink_data->ctx;
64 stream->link = dc_sink_data->link;
65 stream->sink_patches = dc_sink_data->edid_caps.panel_patch;
66 stream->converter_disable_audio = dc_sink_data->converter_disable_audio;
67 stream->qs_bit = dc_sink_data->edid_caps.qs_bit;
68 stream->qy_bit = dc_sink_data->edid_caps.qy_bit;
69
68 /* Copy audio modes */ 70 /* Copy audio modes */
69 /* TODO - Remove this translation */ 71 /* TODO - Remove this translation */
70 for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) 72 for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
@@ -100,11 +102,14 @@ static void construct(struct dc_stream_state *stream,
100 /* EDID CAP translation for HDMI 2.0 */ 102 /* EDID CAP translation for HDMI 2.0 */
101 stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; 103 stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
102 104
103 update_stream_signal(stream); 105 update_stream_signal(stream, dc_sink_data);
104 106
105 stream->out_transfer_func = dc_create_transfer_func(); 107 stream->out_transfer_func = dc_create_transfer_func();
106 stream->out_transfer_func->type = TF_TYPE_BYPASS; 108 stream->out_transfer_func->type = TF_TYPE_BYPASS;
107 stream->out_transfer_func->ctx = stream->ctx; 109 stream->out_transfer_func->ctx = stream->ctx;
110
111 stream->stream_id = stream->ctx->dc_stream_id_count;
112 stream->ctx->dc_stream_id_count++;
108} 113}
109 114
110static void destruct(struct dc_stream_state *stream) 115static void destruct(struct dc_stream_state *stream)
@@ -155,21 +160,43 @@ struct dc_stream_state *dc_create_stream_for_sink(
155 return stream; 160 return stream;
156} 161}
157 162
158struct dc_stream_status *dc_stream_get_status( 163/**
164 * dc_stream_get_status_from_state - Get stream status from given dc state
165 * @state: DC state to find the stream status in
166 * @stream: The stream to get the stream status for
167 *
168 * The given stream is expected to exist in the given dc state. Otherwise, NULL
169 * will be returned.
170 */
171struct dc_stream_status *dc_stream_get_status_from_state(
172 struct dc_state *state,
159 struct dc_stream_state *stream) 173 struct dc_stream_state *stream)
160{ 174{
161 uint8_t i; 175 uint8_t i;
162 struct dc *dc = stream->ctx->dc;
163 176
164 for (i = 0; i < dc->current_state->stream_count; i++) { 177 for (i = 0; i < state->stream_count; i++) {
165 if (stream == dc->current_state->streams[i]) 178 if (stream == state->streams[i])
166 return &dc->current_state->stream_status[i]; 179 return &state->stream_status[i];
167 } 180 }
168 181
169 return NULL; 182 return NULL;
170} 183}
171 184
172/** 185/**
186 * dc_stream_get_status() - Get current stream status of the given stream state
187 * @stream: The stream to get the stream status for.
188 *
189 * The given stream is expected to exist in dc->current_state. Otherwise, NULL
190 * will be returned.
191 */
192struct dc_stream_status *dc_stream_get_status(
193 struct dc_stream_state *stream)
194{
195 struct dc *dc = stream->ctx->dc;
196 return dc_stream_get_status_from_state(dc->current_state, stream);
197}
198
199/**
173 * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address 200 * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
174 */ 201 */
175bool dc_stream_set_cursor_attributes( 202bool dc_stream_set_cursor_attributes(
@@ -334,16 +361,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
334 stream->output_color_space); 361 stream->output_color_space);
335 DC_LOG_DC( 362 DC_LOG_DC(
336 "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", 363 "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
337 stream->timing.pix_clk_khz, 364 stream->timing.pix_clk_100hz / 10,
338 stream->timing.h_total, 365 stream->timing.h_total,
339 stream->timing.v_total, 366 stream->timing.v_total,
340 stream->timing.pixel_encoding, 367 stream->timing.pixel_encoding,
341 stream->timing.display_color_depth); 368 stream->timing.display_color_depth);
342 DC_LOG_DC( 369 DC_LOG_DC(
343 "\tsink name: %s, serial: %d\n",
344 stream->sink->edid_caps.display_name,
345 stream->sink->edid_caps.serial_number);
346 DC_LOG_DC(
347 "\tlink: %d\n", 370 "\tlink: %d\n",
348 stream->sink->link->link_index); 371 stream->link->link_index);
349} 372}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index c60c9b4c3075..ee6bd50f60b8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -40,11 +40,14 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
40 plane_state->ctx = ctx; 40 plane_state->ctx = ctx;
41 41
42 plane_state->gamma_correction = dc_create_gamma(); 42 plane_state->gamma_correction = dc_create_gamma();
43 plane_state->gamma_correction->is_identity = true; 43 if (plane_state->gamma_correction != NULL)
44 plane_state->gamma_correction->is_identity = true;
44 45
45 plane_state->in_transfer_func = dc_create_transfer_func(); 46 plane_state->in_transfer_func = dc_create_transfer_func();
46 plane_state->in_transfer_func->type = TF_TYPE_BYPASS; 47 if (plane_state->in_transfer_func != NULL) {
47 plane_state->in_transfer_func->ctx = ctx; 48 plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
49 plane_state->in_transfer_func->ctx = ctx;
50 }
48} 51}
49 52
50static void destruct(struct dc_plane_state *plane_state) 53static void destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
new file mode 100644
index 000000000000..e54b8ac339b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "vm_helper.h"
27
28static void mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_t hubp_idx)
29{
30 struct vmid_usage vmids = vm_helper->hubp_vmid_usage[hubp_idx];
31
32 vmids.vmid_usage[0] = vmids.vmid_usage[1];
33 vmids.vmid_usage[1] = 1 << pos;
34}
35
36static void add_ptb_to_table(struct vm_helper *vm_helper, unsigned int vmid, uint64_t ptb)
37{
38 vm_helper->ptb_assigned_to_vmid[vmid] = ptb;
39 vm_helper->num_vmids_available--;
40}
41
42static void clear_entry_from_vmid_table(struct vm_helper *vm_helper, unsigned int vmid)
43{
44 vm_helper->ptb_assigned_to_vmid[vmid] = 0;
45 vm_helper->num_vmids_available++;
46}
47
48static void evict_vmids(struct vm_helper *vm_helper)
49{
50 int i;
51 uint16_t ord = 0;
52
53 for (i = 0; i < vm_helper->num_vmid; i++)
54 ord |= vm_helper->hubp_vmid_usage[i].vmid_usage[0] | vm_helper->hubp_vmid_usage[i].vmid_usage[1];
55
56 // At this point any positions with value 0 are unused vmids, evict them
57 for (i = 1; i < vm_helper->num_vmid; i++) {
58 if (ord & (1u << i))
59 clear_entry_from_vmid_table(vm_helper, i);
60 }
61}
62
63// Return value of -1 indicates vmid table unitialized or ptb dne in the table
64static int get_existing_vmid_for_ptb(struct vm_helper *vm_helper, uint64_t ptb)
65{
66 int i;
67
68 for (i = 0; i < vm_helper->num_vmid; i++) {
69 if (vm_helper->ptb_assigned_to_vmid[i] == ptb)
70 return i;
71 }
72
73 return -1;
74}
75
76// Expected to be called only when there's an available vmid
77static int get_next_available_vmid(struct vm_helper *vm_helper)
78{
79 int i;
80
81 for (i = 1; i < vm_helper->num_vmid; i++) {
82 if (vm_helper->ptb_assigned_to_vmid[i] == 0)
83 return i;
84 }
85
86 return -1;
87}
88
89uint8_t get_vmid_for_ptb(struct vm_helper *vm_helper, int64_t ptb, uint8_t hubp_idx)
90{
91 unsigned int vmid = 0;
92 int vmid_exists = -1;
93
94 // Physical address gets vmid 0
95 if (ptb == 0)
96 return 0;
97
98 vmid_exists = get_existing_vmid_for_ptb(vm_helper, ptb);
99
100 if (vmid_exists != -1) {
101 mark_vmid_used(vm_helper, vmid_exists, hubp_idx);
102 vmid = vmid_exists;
103 } else {
104 if (vm_helper->num_vmids_available == 0)
105 evict_vmids(vm_helper);
106
107 vmid = get_next_available_vmid(vm_helper);
108 mark_vmid_used(vm_helper, vmid, hubp_idx);
109 add_ptb_to_table(vm_helper, vmid, ptb);
110 }
111
112 return vmid;
113}
114
115struct vm_helper init_vm_helper(unsigned int num_vmid, unsigned int num_hubp)
116{
117 static uint64_t ptb_assigned_to_vmid[MAX_VMID];
118 static struct vmid_usage hubp_vmid_usage[MAX_HUBP];
119
120 return (struct vm_helper){
121 .num_vmid = num_vmid,
122 .num_hubp = num_hubp,
123 .num_vmids_available = num_vmid - 1,
124 .ptb_assigned_to_vmid = ptb_assigned_to_vmid,
125 .hubp_vmid_usage = hubp_vmid_usage
126 };
127}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4b5bbb13ce7f..8391bc39b7a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
39#include "inc/hw/dmcu.h" 39#include "inc/hw/dmcu.h"
40#include "dml/display_mode_lib.h" 40#include "dml/display_mode_lib.h"
41 41
42#define DC_VER "3.2.08" 42#define DC_VER "3.2.15"
43 43
44#define MAX_SURFACES 3 44#define MAX_SURFACES 3
45#define MAX_STREAMS 6 45#define MAX_STREAMS 6
@@ -255,6 +255,7 @@ struct dc_debug_options {
255 bool scl_reset_length10; 255 bool scl_reset_length10;
256 bool hdmi20_disable; 256 bool hdmi20_disable;
257 bool skip_detection_link_training; 257 bool skip_detection_link_training;
258 unsigned int force_fclk_khz;
258}; 259};
259 260
260struct dc_debug_data { 261struct dc_debug_data {
@@ -339,8 +340,13 @@ struct dc_init_data {
339 uint32_t log_mask; 340 uint32_t log_mask;
340}; 341};
341 342
342struct dc *dc_create(const struct dc_init_data *init_params); 343struct dc_callback_init {
344 uint8_t reserved;
345};
343 346
347struct dc *dc_create(const struct dc_init_data *init_params);
348void dc_init_callbacks(struct dc *dc,
349 const struct dc_callback_init *init_params);
344void dc_destroy(struct dc **dc); 350void dc_destroy(struct dc **dc);
345 351
346/******************************************************************************* 352/*******************************************************************************
@@ -440,6 +446,7 @@ union surface_update_flags {
440 uint32_t coeff_reduction_change:1; 446 uint32_t coeff_reduction_change:1;
441 uint32_t output_tf_change:1; 447 uint32_t output_tf_change:1;
442 uint32_t pixel_format_change:1; 448 uint32_t pixel_format_change:1;
449 uint32_t plane_size_change:1;
443 450
444 /* Full updates */ 451 /* Full updates */
445 uint32_t new_plane:1; 452 uint32_t new_plane:1;
@@ -652,6 +659,7 @@ struct dpcd_caps {
652 int8_t branch_dev_name[6]; 659 int8_t branch_dev_name[6];
653 int8_t branch_hw_revision; 660 int8_t branch_hw_revision;
654 int8_t branch_fw_revision[2]; 661 int8_t branch_fw_revision[2];
662 uint8_t link_rate_set;
655 663
656 bool allow_invalid_MSA_timing_param; 664 bool allow_invalid_MSA_timing_param;
657 bool panel_mode_edp; 665 bool panel_mode_edp;
@@ -742,6 +750,9 @@ void dc_set_power_state(
742 struct dc *dc, 750 struct dc *dc,
743 enum dc_acpi_cm_power_state power_state); 751 enum dc_acpi_cm_power_state power_state);
744void dc_resume(struct dc *dc); 752void dc_resume(struct dc *dc);
753unsigned int dc_get_current_backlight_pwm(struct dc *dc);
754unsigned int dc_get_target_backlight_pwm(struct dc *dc);
755
745bool dc_is_dmcu_initialized(struct dc *dc); 756bool dc_is_dmcu_initialized(struct dc *dc);
746 757
747#endif /* DC_INTERFACE_H_ */ 758#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index a8b3cedf9431..78c3b300ec45 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -86,10 +86,6 @@ struct dc_vbios_funcs {
86 86
87 bool (*is_accelerated_mode)( 87 bool (*is_accelerated_mode)(
88 struct dc_bios *bios); 88 struct dc_bios *bios);
89 bool (*is_active_display)(
90 struct dc_bios *bios,
91 enum signal_type signal,
92 const struct connector_device_tag_info *device_tag);
93 void (*set_scratch_critical_state)( 89 void (*set_scratch_critical_state)(
94 struct dc_bios *bios, 90 struct dc_bios *bios,
95 bool state); 91 bool state);
@@ -125,10 +121,6 @@ struct dc_vbios_funcs {
125 enum bp_result (*program_crtc_timing)( 121 enum bp_result (*program_crtc_timing)(
126 struct dc_bios *bios, 122 struct dc_bios *bios,
127 struct bp_hw_crtc_timing_parameters *bp_params); 123 struct bp_hw_crtc_timing_parameters *bp_params);
128
129 enum bp_result (*crtc_source_select)(
130 struct dc_bios *bios,
131 struct bp_crtc_source_select *bp_params);
132 enum bp_result (*program_display_engine_pll)( 124 enum bp_result (*program_display_engine_pll)(
133 struct dc_bios *bios, 125 struct dc_bios *bios,
134 struct bp_pixel_clock_parameters *bp_params); 126 struct bp_pixel_clock_parameters *bp_params);
@@ -145,7 +137,6 @@ struct dc_vbios_funcs {
145}; 137};
146 138
147struct bios_registers { 139struct bios_registers {
148 uint32_t BIOS_SCRATCH_0;
149 uint32_t BIOS_SCRATCH_3; 140 uint32_t BIOS_SCRATCH_3;
150 uint32_t BIOS_SCRATCH_6; 141 uint32_t BIOS_SCRATCH_6;
151}; 142};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index da93ab43f2d8..d4eab33c453b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -46,11 +46,14 @@ enum dc_lane_count {
46 */ 46 */
47enum dc_link_rate { 47enum dc_link_rate {
48 LINK_RATE_UNKNOWN = 0, 48 LINK_RATE_UNKNOWN = 0,
49 LINK_RATE_LOW = 0x06, 49 LINK_RATE_LOW = 0x06, // Rate_1 (RBR) - 1.62 Gbps/Lane
50 LINK_RATE_HIGH = 0x0A, 50 LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
51 LINK_RATE_RBR2 = 0x0C, 51 LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
52 LINK_RATE_HIGH2 = 0x14, 52 LINK_RATE_HIGH = 0x0A, // Rate_4 (HBR) - 2.70 Gbps/Lane
53 LINK_RATE_HIGH3 = 0x1E 53 LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane
54 LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
55 LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane
56 LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane
54}; 57};
55 58
56enum dc_link_spread { 59enum dc_link_spread {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 4842d2378bbf..597d38393379 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -29,31 +29,59 @@
29#include "dm_services.h" 29#include "dm_services.h"
30#include <stdarg.h> 30#include <stdarg.h>
31 31
32struct dc_reg_value_masks {
33 uint32_t value;
34 uint32_t mask;
35};
36
37struct dc_reg_sequence {
38 uint32_t addr;
39 struct dc_reg_value_masks value_masks;
40};
41
42static inline void set_reg_field_value_masks(
43 struct dc_reg_value_masks *field_value_mask,
44 uint32_t value,
45 uint32_t mask,
46 uint8_t shift)
47{
48 ASSERT(mask != 0);
49
50 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift));
51 field_value_mask->mask = field_value_mask->mask | mask;
52}
53
32uint32_t generic_reg_update_ex(const struct dc_context *ctx, 54uint32_t generic_reg_update_ex(const struct dc_context *ctx,
33 uint32_t addr, uint32_t reg_val, int n, 55 uint32_t addr, uint32_t reg_val, int n,
34 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 56 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
35 ...) 57 ...)
36{ 58{
59 struct dc_reg_value_masks field_value_mask = {0};
37 uint32_t shift, mask, field_value; 60 uint32_t shift, mask, field_value;
38 int i = 1; 61 int i = 1;
39 62
40 va_list ap; 63 va_list ap;
41 va_start(ap, field_value1); 64 va_start(ap, field_value1);
42 65
43 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 66 /* gather all bits value/mask getting updated in this register */
67 set_reg_field_value_masks(&field_value_mask,
68 field_value1, mask1, shift1);
44 69
45 while (i < n) { 70 while (i < n) {
46 shift = va_arg(ap, uint32_t); 71 shift = va_arg(ap, uint32_t);
47 mask = va_arg(ap, uint32_t); 72 mask = va_arg(ap, uint32_t);
48 field_value = va_arg(ap, uint32_t); 73 field_value = va_arg(ap, uint32_t);
49 74
50 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 75 set_reg_field_value_masks(&field_value_mask,
76 field_value, mask, shift);
51 i++; 77 i++;
52 } 78 }
53
54 dm_write_reg(ctx, addr, reg_val);
55 va_end(ap); 79 va_end(ap);
56 80
81
82 /* mmio write directly */
83 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
84 dm_write_reg(ctx, addr, reg_val);
57 return reg_val; 85 return reg_val;
58} 86}
59 87
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e72fce4eca65..da55d623647a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -97,6 +97,8 @@ struct dc_plane_address {
97 union large_integer chroma_dcc_const_color; 97 union large_integer chroma_dcc_const_color;
98 } video_progressive; 98 } video_progressive;
99 }; 99 };
100
101 union large_integer page_table_base;
100}; 102};
101 103
102struct dc_size { 104struct dc_size {
@@ -730,7 +732,7 @@ struct dc_crtc_timing {
730 uint32_t v_front_porch; 732 uint32_t v_front_porch;
731 uint32_t v_sync_width; 733 uint32_t v_sync_width;
732 734
733 uint32_t pix_clk_khz; 735 uint32_t pix_clk_100hz;
734 736
735 uint32_t vic; 737 uint32_t vic;
736 uint32_t hdmi_vic; 738 uint32_t hdmi_vic;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..f249ff9be2a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -110,6 +110,7 @@ struct dc_link {
110 union ddi_channel_mapping ddi_channel_mapping; 110 union ddi_channel_mapping ddi_channel_mapping;
111 struct connector_device_tag_info device_tag; 111 struct connector_device_tag_info device_tag;
112 struct dpcd_caps dpcd_caps; 112 struct dpcd_caps dpcd_caps;
113 uint32_t dongle_max_pix_clk;
113 unsigned short chip_caps; 114 unsigned short chip_caps;
114 unsigned int dpcd_sink_count; 115 unsigned int dpcd_sink_count;
115 enum edp_revision edp_revision; 116 enum edp_revision edp_revision;
@@ -146,8 +147,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
146 */ 147 */
147bool dc_link_set_backlight_level(const struct dc_link *dc_link, 148bool dc_link_set_backlight_level(const struct dc_link *dc_link,
148 uint32_t backlight_pwm_u16_16, 149 uint32_t backlight_pwm_u16_16,
149 uint32_t frame_ramp, 150 uint32_t frame_ramp);
150 const struct dc_stream_state *stream);
151 151
152int dc_link_get_backlight_level(const struct dc_link *dc_link); 152int dc_link_get_backlight_level(const struct dc_link *dc_link);
153 153
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..7bb1da18c1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -38,11 +38,6 @@ struct dc_stream_status {
38 int stream_enc_inst; 38 int stream_enc_inst;
39 int plane_count; 39 int plane_count;
40 struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; 40 struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
41
42 /*
43 * link this stream passes through
44 */
45 struct dc_link *link;
46}; 41};
47 42
48// TODO: References to this needs to be removed.. 43// TODO: References to this needs to be removed..
@@ -51,7 +46,13 @@ struct freesync_context {
51}; 46};
52 47
53struct dc_stream_state { 48struct dc_stream_state {
49 // sink is deprecated, new code should not reference
50 // this pointer
54 struct dc_sink *sink; 51 struct dc_sink *sink;
52
53 struct dc_link *link;
54 struct dc_panel_patch sink_patches;
55 union display_content_support content_support;
55 struct dc_crtc_timing timing; 56 struct dc_crtc_timing timing;
56 struct dc_crtc_timing_adjust adjust; 57 struct dc_crtc_timing_adjust adjust;
57 struct dc_info_packet vrr_infopacket; 58 struct dc_info_packet vrr_infopacket;
@@ -80,6 +81,9 @@ struct dc_stream_state {
80 enum view_3d_format view_format; 81 enum view_3d_format view_format;
81 82
82 bool ignore_msa_timing_param; 83 bool ignore_msa_timing_param;
84 bool converter_disable_audio;
85 uint8_t qs_bit;
86 uint8_t qy_bit;
83 87
84 unsigned long long periodic_fn_vsync_delta; 88 unsigned long long periodic_fn_vsync_delta;
85 89
@@ -91,7 +95,6 @@ struct dc_stream_state {
91 95
92 /* DMCU info */ 96 /* DMCU info */
93 unsigned int abm_level; 97 unsigned int abm_level;
94 unsigned int bl_pwm_level;
95 98
96 /* from core_stream struct */ 99 /* from core_stream struct */
97 struct dc_context *ctx; 100 struct dc_context *ctx;
@@ -105,6 +108,8 @@ struct dc_stream_state {
105 bool dpms_off; 108 bool dpms_off;
106 bool apply_edp_fast_boot_optimization; 109 bool apply_edp_fast_boot_optimization;
107 110
111 void *dm_stream_context;
112
108 struct dc_cursor_attributes cursor_attributes; 113 struct dc_cursor_attributes cursor_attributes;
109 struct dc_cursor_position cursor_position; 114 struct dc_cursor_position cursor_position;
110 uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode 115 uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
@@ -117,6 +122,18 @@ struct dc_stream_state {
117 /* Computed state bits */ 122 /* Computed state bits */
118 bool mode_changed : 1; 123 bool mode_changed : 1;
119 124
125 /* Output from DC when stream state is committed or altered
126 * DC may only access these values during:
127 * dc_commit_state, dc_commit_state_no_check, dc_commit_streams
128 * values may not change outside of those calls
129 */
130 struct {
131 // For interrupt management, some hardware instance
132 // offsets need to be exposed to DM
133 uint8_t otg_offset;
134 } out;
135
136 uint32_t stream_id;
120}; 137};
121 138
122struct dc_stream_update { 139struct dc_stream_update {
@@ -163,7 +180,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
163 int surface_count, 180 int surface_count,
164 struct dc_stream_state *stream, 181 struct dc_stream_state *stream,
165 struct dc_stream_update *stream_update, 182 struct dc_stream_update *stream_update,
166 struct dc_plane_state **plane_states,
167 struct dc_state *state); 183 struct dc_state *state);
168/* 184/*
169 * Log the current stream state. 185 * Log the current stream state.
@@ -256,11 +272,14 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
256 */ 272 */
257struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 273struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
258 274
259void update_stream_signal(struct dc_stream_state *stream); 275void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
260 276
261void dc_stream_retain(struct dc_stream_state *dc_stream); 277void dc_stream_retain(struct dc_stream_state *dc_stream);
262void dc_stream_release(struct dc_stream_state *dc_stream); 278void dc_stream_release(struct dc_stream_state *dc_stream);
263 279
280struct dc_stream_status *dc_stream_get_status_from_state(
281 struct dc_state *state,
282 struct dc_stream_state *stream);
264struct dc_stream_status *dc_stream_get_status( 283struct dc_stream_status *dc_stream_get_status(
265 struct dc_stream_state *dc_stream); 284 struct dc_stream_state *dc_stream);
266 285
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0b20ae23f169..56e7f3dab15a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -97,8 +97,8 @@ struct dc_context {
97 struct dc_bios *dc_bios; 97 struct dc_bios *dc_bios;
98 bool created_bios; 98 bool created_bios;
99 struct gpio_service *gpio_service; 99 struct gpio_service *gpio_service;
100 struct i2caux *i2caux;
101 uint32_t dc_sink_id_count; 100 uint32_t dc_sink_id_count;
101 uint32_t dc_stream_id_count;
102 uint64_t fbc_gpu_addr; 102 uint64_t fbc_gpu_addr;
103}; 103};
104 104
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index aaeb7faac0c4..adbb22224e1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include "dm_services.h" 26#include "dm_services.h"
27#include "core_types.h"
27#include "dce_aux.h" 28#include "dce_aux.h"
28#include "dce/dce_11_0_sh_mask.h" 29#include "dce/dce_11_0_sh_mask.h"
29 30
@@ -41,17 +42,17 @@
41 container_of((ptr), struct aux_engine_dce110, base) 42 container_of((ptr), struct aux_engine_dce110, base)
42 43
43#define FROM_ENGINE(ptr) \ 44#define FROM_ENGINE(ptr) \
44 FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base)) 45 FROM_AUX_ENGINE(container_of((ptr), struct dce_aux, base))
45 46
46#define FROM_AUX_ENGINE_ENGINE(ptr) \ 47#define FROM_AUX_ENGINE_ENGINE(ptr) \
47 container_of((ptr), struct aux_engine, base) 48 container_of((ptr), struct dce_aux, base)
48enum { 49enum {
49 AUX_INVALID_REPLY_RETRY_COUNTER = 1, 50 AUX_INVALID_REPLY_RETRY_COUNTER = 1,
50 AUX_TIMED_OUT_RETRY_COUNTER = 2, 51 AUX_TIMED_OUT_RETRY_COUNTER = 2,
51 AUX_DEFER_RETRY_COUNTER = 6 52 AUX_DEFER_RETRY_COUNTER = 6
52}; 53};
53static void release_engine( 54static void release_engine(
54 struct aux_engine *engine) 55 struct dce_aux *engine)
55{ 56{
56 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 57 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
57 58
@@ -66,7 +67,7 @@ static void release_engine(
66#define DMCU_CAN_ACCESS_AUX 2 67#define DMCU_CAN_ACCESS_AUX 2
67 68
68static bool is_engine_available( 69static bool is_engine_available(
69 struct aux_engine *engine) 70 struct dce_aux *engine)
70{ 71{
71 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 72 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
72 73
@@ -79,7 +80,7 @@ static bool is_engine_available(
79 return (field != DMCU_CAN_ACCESS_AUX); 80 return (field != DMCU_CAN_ACCESS_AUX);
80} 81}
81static bool acquire_engine( 82static bool acquire_engine(
82 struct aux_engine *engine) 83 struct dce_aux *engine)
83{ 84{
84 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 85 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
85 86
@@ -155,7 +156,7 @@ static bool acquire_engine(
155 (0xFF & (address)) 156 (0xFF & (address))
156 157
157static void submit_channel_request( 158static void submit_channel_request(
158 struct aux_engine *engine, 159 struct dce_aux *engine,
159 struct aux_request_transaction_data *request) 160 struct aux_request_transaction_data *request)
160{ 161{
161 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 162 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
@@ -247,7 +248,7 @@ static void submit_channel_request(
247 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); 248 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
248} 249}
249 250
250static int read_channel_reply(struct aux_engine *engine, uint32_t size, 251static int read_channel_reply(struct dce_aux *engine, uint32_t size,
251 uint8_t *buffer, uint8_t *reply_result, 252 uint8_t *buffer, uint8_t *reply_result,
252 uint32_t *sw_status) 253 uint32_t *sw_status)
253{ 254{
@@ -273,7 +274,8 @@ static int read_channel_reply(struct aux_engine *engine, uint32_t size,
273 274
274 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32); 275 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
275 reply_result_32 = reply_result_32 >> 4; 276 reply_result_32 = reply_result_32 >> 4;
276 *reply_result = (uint8_t)reply_result_32; 277 if (reply_result != NULL)
278 *reply_result = (uint8_t)reply_result_32;
277 279
278 if (reply_result_32 == 0) { /* ACK */ 280 if (reply_result_32 == 0) { /* ACK */
279 uint32_t i = 0; 281 uint32_t i = 0;
@@ -299,61 +301,8 @@ static int read_channel_reply(struct aux_engine *engine, uint32_t size,
299 return 0; 301 return 0;
300} 302}
301 303
302static void process_channel_reply(
303 struct aux_engine *engine,
304 struct aux_reply_transaction_data *reply)
305{
306 int bytes_replied;
307 uint8_t reply_result;
308 uint32_t sw_status;
309
310 bytes_replied = read_channel_reply(engine, reply->length, reply->data,
311 &reply_result, &sw_status);
312
313 /* in case HPD is LOW, exit AUX transaction */
314 if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
315 reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
316 return;
317 }
318
319 if (bytes_replied < 0) {
320 /* Need to handle an error case...
321 * Hopefully, upper layer function won't call this function if
322 * the number of bytes in the reply was 0, because there was
323 * surely an error that was asserted that should have been
324 * handled for hot plug case, this could happens
325 */
326 if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
327 reply->status = AUX_TRANSACTION_REPLY_INVALID;
328 ASSERT_CRITICAL(false);
329 return;
330 }
331 } else {
332
333 switch (reply_result) {
334 case 0: /* ACK */
335 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
336 break;
337 case 1: /* NACK */
338 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
339 break;
340 case 2: /* DEFER */
341 reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
342 break;
343 case 4: /* AUX ACK / I2C NACK */
344 reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
345 break;
346 case 8: /* AUX ACK / I2C DEFER */
347 reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
348 break;
349 default:
350 reply->status = AUX_TRANSACTION_REPLY_INVALID;
351 }
352 }
353}
354
355static enum aux_channel_operation_result get_channel_status( 304static enum aux_channel_operation_result get_channel_status(
356 struct aux_engine *engine, 305 struct dce_aux *engine,
357 uint8_t *returned_bytes) 306 uint8_t *returned_bytes)
358{ 307{
359 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); 308 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
@@ -414,469 +363,22 @@ static enum aux_channel_operation_result get_channel_status(
414 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; 363 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
415 } 364 }
416} 365}
417static void process_read_reply(
418 struct aux_engine *engine,
419 struct read_command_context *ctx)
420{
421 engine->funcs->process_channel_reply(engine, &ctx->reply);
422
423 switch (ctx->reply.status) {
424 case AUX_TRANSACTION_REPLY_AUX_ACK:
425 ctx->defer_retry_aux = 0;
426 if (ctx->returned_byte > ctx->current_read_length) {
427 ctx->status =
428 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
429 ctx->operation_succeeded = false;
430 } else if (ctx->returned_byte < ctx->current_read_length) {
431 ctx->current_read_length -= ctx->returned_byte;
432
433 ctx->offset += ctx->returned_byte;
434
435 ++ctx->invalid_reply_retry_aux_on_ack;
436
437 if (ctx->invalid_reply_retry_aux_on_ack >
438 AUX_INVALID_REPLY_RETRY_COUNTER) {
439 ctx->status =
440 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
441 ctx->operation_succeeded = false;
442 }
443 } else {
444 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
445 ctx->transaction_complete = true;
446 ctx->operation_succeeded = true;
447 }
448 break;
449 case AUX_TRANSACTION_REPLY_AUX_NACK:
450 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
451 ctx->operation_succeeded = false;
452 break;
453 case AUX_TRANSACTION_REPLY_AUX_DEFER:
454 ++ctx->defer_retry_aux;
455
456 if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
457 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
458 ctx->operation_succeeded = false;
459 }
460 break;
461 case AUX_TRANSACTION_REPLY_I2C_DEFER:
462 ctx->defer_retry_aux = 0;
463
464 ++ctx->defer_retry_i2c;
465
466 if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
467 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
468 ctx->operation_succeeded = false;
469 }
470 break;
471 case AUX_TRANSACTION_REPLY_HPD_DISCON:
472 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
473 ctx->operation_succeeded = false;
474 break;
475 default:
476 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
477 ctx->operation_succeeded = false;
478 }
479}
480static void process_read_request(
481 struct aux_engine *engine,
482 struct read_command_context *ctx)
483{
484 enum aux_channel_operation_result operation_result;
485 366
486 engine->funcs->submit_channel_request(engine, &ctx->request);
487
488 operation_result = engine->funcs->get_channel_status(
489 engine, &ctx->returned_byte);
490
491 switch (operation_result) {
492 case AUX_CHANNEL_OPERATION_SUCCEEDED:
493 if (ctx->returned_byte > ctx->current_read_length) {
494 ctx->status =
495 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
496 ctx->operation_succeeded = false;
497 } else {
498 ctx->timed_out_retry_aux = 0;
499 ctx->invalid_reply_retry_aux = 0;
500
501 ctx->reply.length = ctx->returned_byte;
502 ctx->reply.data = ctx->buffer;
503
504 process_read_reply(engine, ctx);
505 }
506 break;
507 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
508 ++ctx->invalid_reply_retry_aux;
509
510 if (ctx->invalid_reply_retry_aux >
511 AUX_INVALID_REPLY_RETRY_COUNTER) {
512 ctx->status =
513 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
514 ctx->operation_succeeded = false;
515 } else
516 udelay(400);
517 break;
518 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
519 ++ctx->timed_out_retry_aux;
520
521 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
522 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
523 ctx->operation_succeeded = false;
524 } else {
525 /* DP 1.2a, table 2-58:
526 * "S3: AUX Request CMD PENDING:
527 * retry 3 times, with 400usec wait on each"
528 * The HW timeout is set to 550usec,
529 * so we should not wait here
530 */
531 }
532 break;
533 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
534 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
535 ctx->operation_succeeded = false;
536 break;
537 default:
538 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
539 ctx->operation_succeeded = false;
540 }
541}
542static bool read_command(
543 struct aux_engine *engine,
544 struct i2caux_transaction_request *request,
545 bool middle_of_transaction)
546{
547 struct read_command_context ctx;
548
549 ctx.buffer = request->payload.data;
550 ctx.current_read_length = request->payload.length;
551 ctx.offset = 0;
552 ctx.timed_out_retry_aux = 0;
553 ctx.invalid_reply_retry_aux = 0;
554 ctx.defer_retry_aux = 0;
555 ctx.defer_retry_i2c = 0;
556 ctx.invalid_reply_retry_aux_on_ack = 0;
557 ctx.transaction_complete = false;
558 ctx.operation_succeeded = true;
559
560 if (request->payload.address_space ==
561 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
562 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
563 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
564 ctx.request.address = request->payload.address;
565 } else if (request->payload.address_space ==
566 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
567 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
568 ctx.request.action = middle_of_transaction ?
569 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
570 I2CAUX_TRANSACTION_ACTION_I2C_READ;
571 ctx.request.address = request->payload.address >> 1;
572 } else {
573 /* in DAL2, there was no return in such case */
574 BREAK_TO_DEBUGGER();
575 return false;
576 }
577
578 ctx.request.delay = 0;
579
580 do {
581 memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
582
583 ctx.request.data = ctx.buffer + ctx.offset;
584 ctx.request.length = ctx.current_read_length;
585
586 process_read_request(engine, &ctx);
587
588 request->status = ctx.status;
589
590 if (ctx.operation_succeeded && !ctx.transaction_complete)
591 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
592 msleep(engine->delay);
593 } while (ctx.operation_succeeded && !ctx.transaction_complete);
594
595 if (request->payload.address_space ==
596 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
597 DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
598 request->payload.address,
599 request->payload.data[0],
600 ctx.operation_succeeded);
601 }
602
603 return ctx.operation_succeeded;
604}
605
606static void process_write_reply(
607 struct aux_engine *engine,
608 struct write_command_context *ctx)
609{
610 engine->funcs->process_channel_reply(engine, &ctx->reply);
611
612 switch (ctx->reply.status) {
613 case AUX_TRANSACTION_REPLY_AUX_ACK:
614 ctx->operation_succeeded = true;
615
616 if (ctx->returned_byte) {
617 ctx->request.action = ctx->mot ?
618 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
619 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
620
621 ctx->current_write_length = 0;
622
623 ++ctx->ack_m_retry;
624
625 if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
626 ctx->status =
627 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
628 ctx->operation_succeeded = false;
629 } else
630 udelay(300);
631 } else {
632 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
633 ctx->defer_retry_aux = 0;
634 ctx->ack_m_retry = 0;
635 ctx->transaction_complete = true;
636 }
637 break;
638 case AUX_TRANSACTION_REPLY_AUX_NACK:
639 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
640 ctx->operation_succeeded = false;
641 break;
642 case AUX_TRANSACTION_REPLY_AUX_DEFER:
643 ++ctx->defer_retry_aux;
644
645 if (ctx->defer_retry_aux > ctx->max_defer_retry) {
646 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
647 ctx->operation_succeeded = false;
648 }
649 break;
650 case AUX_TRANSACTION_REPLY_I2C_DEFER:
651 ctx->defer_retry_aux = 0;
652 ctx->current_write_length = 0;
653
654 ctx->request.action = ctx->mot ?
655 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
656 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
657
658 ++ctx->defer_retry_i2c;
659
660 if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
661 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
662 ctx->operation_succeeded = false;
663 }
664 break;
665 case AUX_TRANSACTION_REPLY_HPD_DISCON:
666 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
667 ctx->operation_succeeded = false;
668 break;
669 default:
670 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
671 ctx->operation_succeeded = false;
672 }
673}
674static void process_write_request(
675 struct aux_engine *engine,
676 struct write_command_context *ctx)
677{
678 enum aux_channel_operation_result operation_result;
679
680 engine->funcs->submit_channel_request(engine, &ctx->request);
681
682 operation_result = engine->funcs->get_channel_status(
683 engine, &ctx->returned_byte);
684
685 switch (operation_result) {
686 case AUX_CHANNEL_OPERATION_SUCCEEDED:
687 ctx->timed_out_retry_aux = 0;
688 ctx->invalid_reply_retry_aux = 0;
689
690 ctx->reply.length = ctx->returned_byte;
691 ctx->reply.data = ctx->reply_data;
692
693 process_write_reply(engine, ctx);
694 break;
695 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
696 ++ctx->invalid_reply_retry_aux;
697
698 if (ctx->invalid_reply_retry_aux >
699 AUX_INVALID_REPLY_RETRY_COUNTER) {
700 ctx->status =
701 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
702 ctx->operation_succeeded = false;
703 } else
704 udelay(400);
705 break;
706 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
707 ++ctx->timed_out_retry_aux;
708
709 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
710 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
711 ctx->operation_succeeded = false;
712 } else {
713 /* DP 1.2a, table 2-58:
714 * "S3: AUX Request CMD PENDING:
715 * retry 3 times, with 400usec wait on each"
716 * The HW timeout is set to 550usec,
717 * so we should not wait here
718 */
719 }
720 break;
721 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
722 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
723 ctx->operation_succeeded = false;
724 break;
725 default:
726 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
727 ctx->operation_succeeded = false;
728 }
729}
730static bool write_command(
731 struct aux_engine *engine,
732 struct i2caux_transaction_request *request,
733 bool middle_of_transaction)
734{
735 struct write_command_context ctx;
736
737 ctx.mot = middle_of_transaction;
738 ctx.buffer = request->payload.data;
739 ctx.current_write_length = request->payload.length;
740 ctx.timed_out_retry_aux = 0;
741 ctx.invalid_reply_retry_aux = 0;
742 ctx.defer_retry_aux = 0;
743 ctx.defer_retry_i2c = 0;
744 ctx.ack_m_retry = 0;
745 ctx.transaction_complete = false;
746 ctx.operation_succeeded = true;
747
748 if (request->payload.address_space ==
749 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
750 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
751 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
752 ctx.request.address = request->payload.address;
753 } else if (request->payload.address_space ==
754 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
755 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
756 ctx.request.action = middle_of_transaction ?
757 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
758 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
759 ctx.request.address = request->payload.address >> 1;
760 } else {
761 /* in DAL2, there was no return in such case */
762 BREAK_TO_DEBUGGER();
763 return false;
764 }
765
766 ctx.request.delay = 0;
767
768 ctx.max_defer_retry =
769 (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
770 engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
771
772 do {
773 ctx.request.data = ctx.buffer;
774 ctx.request.length = ctx.current_write_length;
775
776 process_write_request(engine, &ctx);
777
778 request->status = ctx.status;
779
780 if (ctx.operation_succeeded && !ctx.transaction_complete)
781 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
782 msleep(engine->delay);
783 } while (ctx.operation_succeeded && !ctx.transaction_complete);
784
785 if (request->payload.address_space ==
786 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
787 DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
788 request->payload.address,
789 request->payload.data[0],
790 ctx.operation_succeeded);
791 }
792
793 return ctx.operation_succeeded;
794}
795static bool end_of_transaction_command(
796 struct aux_engine *engine,
797 struct i2caux_transaction_request *request)
798{
799 struct i2caux_transaction_request dummy_request;
800 uint8_t dummy_data;
801
802 /* [tcheng] We only need to send the stop (read with MOT = 0)
803 * for I2C-over-Aux, not native AUX
804 */
805
806 if (request->payload.address_space !=
807 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
808 return false;
809
810 dummy_request.operation = request->operation;
811 dummy_request.payload.address_space = request->payload.address_space;
812 dummy_request.payload.address = request->payload.address;
813
814 /*
815 * Add a dummy byte due to some receiver quirk
816 * where one byte is sent along with MOT = 0.
817 * Ideally this should be 0.
818 */
819
820 dummy_request.payload.length = 0;
821 dummy_request.payload.data = &dummy_data;
822
823 if (request->operation == I2CAUX_TRANSACTION_READ)
824 return read_command(engine, &dummy_request, false);
825 else
826 return write_command(engine, &dummy_request, false);
827
828 /* according Syed, it does not need now DoDummyMOT */
829}
830static bool submit_request(
831 struct aux_engine *engine,
832 struct i2caux_transaction_request *request,
833 bool middle_of_transaction)
834{
835
836 bool result;
837 bool mot_used = true;
838
839 switch (request->operation) {
840 case I2CAUX_TRANSACTION_READ:
841 result = read_command(engine, request, mot_used);
842 break;
843 case I2CAUX_TRANSACTION_WRITE:
844 result = write_command(engine, request, mot_used);
845 break;
846 default:
847 result = false;
848 }
849
850 /* [tcheng]
851 * need to send stop for the last transaction to free up the AUX
852 * if the above command fails, this would be the last transaction
853 */
854
855 if (!middle_of_transaction || !result)
856 end_of_transaction_command(engine, request);
857
858 /* mask AUX interrupt */
859
860 return result;
861}
862enum i2caux_engine_type get_engine_type( 367enum i2caux_engine_type get_engine_type(
863 const struct aux_engine *engine) 368 const struct dce_aux *engine)
864{ 369{
865 return I2CAUX_ENGINE_TYPE_AUX; 370 return I2CAUX_ENGINE_TYPE_AUX;
866} 371}
867 372
868static bool acquire( 373static bool acquire(
869 struct aux_engine *engine, 374 struct dce_aux *engine,
870 struct ddc *ddc) 375 struct ddc *ddc)
871{ 376{
872 377
873 enum gpio_result result; 378 enum gpio_result result;
874 379
875 if (engine->funcs->is_engine_available) { 380 if (!is_engine_available(engine))
876 /*check whether SW could use the engine*/ 381 return false;
877 if (!engine->funcs->is_engine_available(engine))
878 return false;
879 }
880 382
881 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, 383 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
882 GPIO_DDC_CONFIG_TYPE_MODE_AUX); 384 GPIO_DDC_CONFIG_TYPE_MODE_AUX);
@@ -884,7 +386,7 @@ static bool acquire(
884 if (result != GPIO_RESULT_OK) 386 if (result != GPIO_RESULT_OK)
885 return false; 387 return false;
886 388
887 if (!engine->funcs->acquire_engine(engine)) { 389 if (!acquire_engine(engine)) {
888 dal_ddc_close(ddc); 390 dal_ddc_close(ddc);
889 return false; 391 return false;
890 } 392 }
@@ -894,21 +396,7 @@ static bool acquire(
894 return true; 396 return true;
895} 397}
896 398
897static const struct aux_engine_funcs aux_engine_funcs = { 399void dce110_engine_destroy(struct dce_aux **engine)
898 .acquire_engine = acquire_engine,
899 .submit_channel_request = submit_channel_request,
900 .process_channel_reply = process_channel_reply,
901 .read_channel_reply = read_channel_reply,
902 .get_channel_status = get_channel_status,
903 .is_engine_available = is_engine_available,
904 .release_engine = release_engine,
905 .destroy_engine = dce110_engine_destroy,
906 .submit_request = submit_request,
907 .get_engine_type = get_engine_type,
908 .acquire = acquire,
909};
910
911void dce110_engine_destroy(struct aux_engine **engine)
912{ 400{
913 401
914 struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine); 402 struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
@@ -917,7 +405,7 @@ void dce110_engine_destroy(struct aux_engine **engine)
917 *engine = NULL; 405 *engine = NULL;
918 406
919} 407}
920struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, 408struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
921 struct dc_context *ctx, 409 struct dc_context *ctx,
922 uint32_t inst, 410 uint32_t inst,
923 uint32_t timeout_period, 411 uint32_t timeout_period,
@@ -927,7 +415,6 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng
927 aux_engine110->base.ctx = ctx; 415 aux_engine110->base.ctx = ctx;
928 aux_engine110->base.delay = 0; 416 aux_engine110->base.delay = 0;
929 aux_engine110->base.max_defer_write_retry = 0; 417 aux_engine110->base.max_defer_write_retry = 0;
930 aux_engine110->base.funcs = &aux_engine_funcs;
931 aux_engine110->base.inst = inst; 418 aux_engine110->base.inst = inst;
932 aux_engine110->timeout_period = timeout_period; 419 aux_engine110->timeout_period = timeout_period;
933 aux_engine110->regs = regs; 420 aux_engine110->regs = regs;
@@ -935,3 +422,101 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng
935 return &aux_engine110->base; 422 return &aux_engine110->base;
936} 423}
937 424
425static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload)
426{
427 if (payload->i2c_over_aux) {
428 if (payload->write) {
429 if (payload->mot)
430 return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
431 return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
432 }
433 if (payload->mot)
434 return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
435 return I2CAUX_TRANSACTION_ACTION_I2C_READ;
436 }
437 if (payload->write)
438 return I2CAUX_TRANSACTION_ACTION_DP_WRITE;
439 return I2CAUX_TRANSACTION_ACTION_DP_READ;
440}
441
442int dce_aux_transfer(struct ddc_service *ddc,
443 struct aux_payload *payload)
444{
445 struct ddc *ddc_pin = ddc->ddc_pin;
446 struct dce_aux *aux_engine;
447 enum aux_channel_operation_result operation_result;
448 struct aux_request_transaction_data aux_req;
449 struct aux_reply_transaction_data aux_rep;
450 uint8_t returned_bytes = 0;
451 int res = -1;
452 uint32_t status;
453
454 memset(&aux_req, 0, sizeof(aux_req));
455 memset(&aux_rep, 0, sizeof(aux_rep));
456
457 aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
458 acquire(aux_engine, ddc_pin);
459
460 if (payload->i2c_over_aux)
461 aux_req.type = AUX_TRANSACTION_TYPE_I2C;
462 else
463 aux_req.type = AUX_TRANSACTION_TYPE_DP;
464
465 aux_req.action = i2caux_action_from_payload(payload);
466
467 aux_req.address = payload->address;
468 aux_req.delay = payload->defer_delay * 10;
469 aux_req.length = payload->length;
470 aux_req.data = payload->data;
471
472 submit_channel_request(aux_engine, &aux_req);
473 operation_result = get_channel_status(aux_engine, &returned_bytes);
474
475 switch (operation_result) {
476 case AUX_CHANNEL_OPERATION_SUCCEEDED:
477 res = read_channel_reply(aux_engine, payload->length,
478 payload->data, payload->reply,
479 &status);
480 break;
481 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
482 res = 0;
483 break;
484 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
485 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
486 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
487 res = -1;
488 break;
489 }
490 release_engine(aux_engine);
491 return res;
492}
493
494#define AUX_RETRY_MAX 7
495
496bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
497 struct aux_payload *payload)
498{
499 int i, ret = 0;
500 uint8_t reply;
501 bool payload_reply = true;
502
503 if (!payload->reply) {
504 payload_reply = false;
505 payload->reply = &reply;
506 }
507
508 for (i = 0; i < AUX_RETRY_MAX; i++) {
509 ret = dce_aux_transfer(ddc, payload);
510
511 if (ret >= 0) {
512 if (*payload->reply == 0) {
513 if (!payload_reply)
514 payload->reply = NULL;
515 return true;
516 }
517 }
518
519 msleep(1);
520 }
521 return false;
522}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index f7caab85dc80..d27f22c05e4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -25,7 +25,9 @@
25 25
26#ifndef __DAL_AUX_ENGINE_DCE110_H__ 26#ifndef __DAL_AUX_ENGINE_DCE110_H__
27#define __DAL_AUX_ENGINE_DCE110_H__ 27#define __DAL_AUX_ENGINE_DCE110_H__
28#include "aux_engine.h" 28
29#include "i2caux_interface.h"
30#include "inc/hw/aux_engine.h"
29 31
30#define AUX_COMMON_REG_LIST(id)\ 32#define AUX_COMMON_REG_LIST(id)\
31 SRI(AUX_CONTROL, DP_AUX, id), \ 33 SRI(AUX_CONTROL, DP_AUX, id), \
@@ -75,8 +77,20 @@ enum { /* This is the timeout as defined in DP 1.2a,
75 */ 77 */
76 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 78 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
77}; 79};
80
81struct dce_aux {
82 uint32_t inst;
83 struct ddc *ddc;
84 struct dc_context *ctx;
85 /* following values are expressed in milliseconds */
86 uint32_t delay;
87 uint32_t max_defer_write_retry;
88
89 bool acquire_reset;
90};
91
78struct aux_engine_dce110 { 92struct aux_engine_dce110 {
79 struct aux_engine base; 93 struct dce_aux base;
80 const struct dce110_aux_registers *regs; 94 const struct dce110_aux_registers *regs;
81 struct { 95 struct {
82 uint32_t aux_control; 96 uint32_t aux_control;
@@ -96,16 +110,22 @@ struct aux_engine_dce110_init_data {
96 const struct dce110_aux_registers *regs; 110 const struct dce110_aux_registers *regs;
97}; 111};
98 112
99struct aux_engine *dce110_aux_engine_construct( 113struct dce_aux *dce110_aux_engine_construct(
100 struct aux_engine_dce110 *aux_engine110, 114 struct aux_engine_dce110 *aux_engine110,
101 struct dc_context *ctx, 115 struct dc_context *ctx,
102 uint32_t inst, 116 uint32_t inst,
103 uint32_t timeout_period, 117 uint32_t timeout_period,
104 const struct dce110_aux_registers *regs); 118 const struct dce110_aux_registers *regs);
105 119
106void dce110_engine_destroy(struct aux_engine **engine); 120void dce110_engine_destroy(struct dce_aux **engine);
107 121
108bool dce110_aux_engine_acquire( 122bool dce110_aux_engine_acquire(
109 struct aux_engine *aux_engine, 123 struct dce_aux *aux_engine,
110 struct ddc *ddc); 124 struct ddc *ddc);
125
126int dce_aux_transfer(struct ddc_service *ddc,
127 struct aux_payload *cmd);
128
129bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
130 struct aux_payload *cmd);
111#endif 131#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..3c52a4fc921d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -194,8 +194,8 @@ static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
194 if (pipe_ctx->top_pipe) 194 if (pipe_ctx->top_pipe)
195 continue; 195 continue;
196 196
197 if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) 197 if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk)
198 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; 198 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
199 199
200 /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS 200 /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
201 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail 201 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
@@ -257,7 +257,7 @@ static int dce_set_clock(
257 clk_mgr_dce->dentist_vco_freq_khz / 64); 257 clk_mgr_dce->dentist_vco_freq_khz / 64);
258 258
259 /* Prepare to program display clock*/ 259 /* Prepare to program display clock*/
260 pxl_clk_params.target_pixel_clock = requested_clk_khz; 260 pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
261 pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; 261 pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
262 262
263 if (clk_mgr_dce->dfs_bypass_active) 263 if (clk_mgr_dce->dfs_bypass_active)
@@ -450,6 +450,42 @@ void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
450 } 450 }
451} 451}
452 452
453/**
454 * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info
455 * @clk_mgr: clock manager base structure
456 *
457 * Reads from VBIOS the XGMI spread spectrum info and saves it within
458 * the dce clock manager. This operation will overwrite the existing dprefclk
459 * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also
460 * sets the ->xgmi_enabled flag.
461 */
462void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr)
463{
464 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
465 enum bp_result result;
466 struct spread_spectrum_info info = { { 0 } };
467 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
468
469 clk_mgr_dce->xgmi_enabled = false;
470
471 result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI,
472 0, &info);
473 if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) {
474 clk_mgr_dce->xgmi_enabled = true;
475 clk_mgr_dce->ss_on_dprefclk = true;
476 clk_mgr_dce->dprefclk_ss_divider =
477 info.spread_percentage_divider;
478
479 if (info.type.CENTER_MODE == 0) {
480 /* Currently for DP Reference clock we
481 * need only SS percentage for
482 * downspread */
483 clk_mgr_dce->dprefclk_ss_percentage =
484 info.spread_spectrum_percentage;
485 }
486 }
487}
488
453void dce110_fill_display_configs( 489void dce110_fill_display_configs(
454 const struct dc_state *context, 490 const struct dc_state *context,
455 struct dm_pp_display_configuration *pp_display_cfg) 491 struct dm_pp_display_configuration *pp_display_cfg)
@@ -483,18 +519,18 @@ void dce110_fill_display_configs(
483 cfg->src_height = stream->src.height; 519 cfg->src_height = stream->src.height;
484 cfg->src_width = stream->src.width; 520 cfg->src_width = stream->src.width;
485 cfg->ddi_channel_mapping = 521 cfg->ddi_channel_mapping =
486 stream->sink->link->ddi_channel_mapping.raw; 522 stream->link->ddi_channel_mapping.raw;
487 cfg->transmitter = 523 cfg->transmitter =
488 stream->sink->link->link_enc->transmitter; 524 stream->link->link_enc->transmitter;
489 cfg->link_settings.lane_count = 525 cfg->link_settings.lane_count =
490 stream->sink->link->cur_link_settings.lane_count; 526 stream->link->cur_link_settings.lane_count;
491 cfg->link_settings.link_rate = 527 cfg->link_settings.link_rate =
492 stream->sink->link->cur_link_settings.link_rate; 528 stream->link->cur_link_settings.link_rate;
493 cfg->link_settings.link_spread = 529 cfg->link_settings.link_spread =
494 stream->sink->link->cur_link_settings.link_spread; 530 stream->link->cur_link_settings.link_spread;
495 cfg->sym_clock = stream->phy_pix_clk; 531 cfg->sym_clock = stream->phy_pix_clk;
496 /* Round v_refresh*/ 532 /* Round v_refresh*/
497 cfg->v_refresh = stream->timing.pix_clk_khz * 1000; 533 cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
498 cfg->v_refresh /= stream->timing.h_total; 534 cfg->v_refresh /= stream->timing.h_total;
499 cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) 535 cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
500 / stream->timing.v_total; 536 / stream->timing.v_total;
@@ -518,7 +554,7 @@ static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
518 - stream->timing.v_addressable); 554 - stream->timing.v_addressable);
519 555
520 vertical_blank_time = vertical_blank_in_pixels 556 vertical_blank_time = vertical_blank_in_pixels
521 * 1000 / stream->timing.pix_clk_khz; 557 * 10000 / stream->timing.pix_clk_100hz;
522 558
523 if (min_vertical_blank_time > vertical_blank_time) 559 if (min_vertical_blank_time > vertical_blank_time)
524 min_vertical_blank_time = vertical_blank_time; 560 min_vertical_blank_time = vertical_blank_time;
@@ -612,7 +648,7 @@ static void dce11_pplib_apply_display_requirements(
612 648
613 pp_display_cfg->crtc_index = 649 pp_display_cfg->crtc_index =
614 pp_display_cfg->disp_configs[0].pipe_idx; 650 pp_display_cfg->disp_configs[0].pipe_idx;
615 pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz; 651 pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
616 } 652 }
617 653
618 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) 654 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -625,11 +661,11 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
625{ 661{
626 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 662 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
627 struct dm_pp_power_level_change_request level_change_req; 663 struct dm_pp_power_level_change_request level_change_req;
628 int unpatched_disp_clk = context->bw.dce.dispclk_khz; 664 int patched_disp_clk = context->bw.dce.dispclk_khz;
629 665
630 /*TODO: W/A for dal3 linux, investigate why this works */ 666 /*TODO: W/A for dal3 linux, investigate why this works */
631 if (!clk_mgr_dce->dfs_bypass_active) 667 if (!clk_mgr_dce->dfs_bypass_active)
632 context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; 668 patched_disp_clk = patched_disp_clk * 115 / 100;
633 669
634 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 670 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
635 /* get max clock state from PPLIB */ 671 /* get max clock state from PPLIB */
@@ -639,13 +675,11 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
639 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 675 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
640 } 676 }
641 677
642 if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { 678 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
643 context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); 679 patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk);
644 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 680 clk_mgr->clks.dispclk_khz = patched_disp_clk;
645 } 681 }
646 dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 682 dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
647
648 context->bw.dce.dispclk_khz = unpatched_disp_clk;
649} 683}
650 684
651static void dce11_update_clocks(struct clk_mgr *clk_mgr, 685static void dce11_update_clocks(struct clk_mgr *clk_mgr,
@@ -676,11 +710,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
676{ 710{
677 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 711 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
678 struct dm_pp_power_level_change_request level_change_req; 712 struct dm_pp_power_level_change_request level_change_req;
679 int unpatched_disp_clk = context->bw.dce.dispclk_khz; 713 int patched_disp_clk = context->bw.dce.dispclk_khz;
680 714
681 /*TODO: W/A for dal3 linux, investigate why this works */ 715 /*TODO: W/A for dal3 linux, investigate why this works */
682 if (!clk_mgr_dce->dfs_bypass_active) 716 if (!clk_mgr_dce->dfs_bypass_active)
683 context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; 717 patched_disp_clk = patched_disp_clk * 115 / 100;
684 718
685 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 719 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
686 /* get max clock state from PPLIB */ 720 /* get max clock state from PPLIB */
@@ -690,13 +724,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
690 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 724 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
691 } 725 }
692 726
693 if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { 727 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
694 context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); 728 patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk);
695 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 729 clk_mgr->clks.dispclk_khz = patched_disp_clk;
696 } 730 }
697 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 731 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
698
699 context->bw.dce.dispclk_khz = unpatched_disp_clk;
700} 732}
701 733
702static void dce12_update_clocks(struct clk_mgr *clk_mgr, 734static void dce12_update_clocks(struct clk_mgr *clk_mgr,
@@ -706,17 +738,23 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
706 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 738 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
707 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; 739 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
708 int max_pix_clk = get_max_pixel_clock_for_all_paths(context); 740 int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
709 int unpatched_disp_clk = context->bw.dce.dispclk_khz; 741 int patched_disp_clk = context->bw.dce.dispclk_khz;
710 742
711 /*TODO: W/A for dal3 linux, investigate why this works */ 743 /*TODO: W/A for dal3 linux, investigate why this works */
712 if (!clk_mgr_dce->dfs_bypass_active) 744 if (!clk_mgr_dce->dfs_bypass_active)
713 context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; 745 patched_disp_clk = patched_disp_clk * 115 / 100;
714 746
715 if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { 747 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
716 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; 748 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
717 clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz; 749 /*
718 context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); 750 * When xGMI is enabled, the display clk needs to be adjusted
719 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 751 * with the WAFL link's SS percentage.
752 */
753 if (clk_mgr_dce->xgmi_enabled)
754 patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss(
755 clk_mgr_dce, patched_disp_clk);
756 clock_voltage_req.clocks_in_khz = patched_disp_clk;
757 clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk);
720 758
721 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 759 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
722 } 760 }
@@ -729,8 +767,6 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
729 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 767 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
730 } 768 }
731 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 769 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
732
733 context->bw.dce.dispclk_khz = unpatched_disp_clk;
734} 770}
735 771
736static const struct clk_mgr_funcs dce120_funcs = { 772static const struct clk_mgr_funcs dce120_funcs = {
@@ -882,6 +918,27 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
882 return &clk_mgr_dce->base; 918 return &clk_mgr_dce->base;
883} 919}
884 920
921struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx)
922{
923 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce),
924 GFP_KERNEL);
925
926 if (clk_mgr_dce == NULL) {
927 BREAK_TO_DEBUGGER();
928 return NULL;
929 }
930
931 memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state,
932 sizeof(dce120_max_clks_by_state));
933
934 dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL);
935
936 clk_mgr_dce->dprefclk_khz = 625000;
937 clk_mgr_dce->base.funcs = &dce120_funcs;
938
939 return &clk_mgr_dce->base;
940}
941
885void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) 942void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
886{ 943{
887 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); 944 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 3bceb31d910d..c8f8c442142a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -94,11 +94,37 @@ struct dce_clk_mgr {
94 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */ 94 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
95 int dfs_bypass_disp_clk; 95 int dfs_bypass_disp_clk;
96 96
97 /* Flag for Enabled SS on DPREFCLK */ 97 /**
98 * @ss_on_dprefclk:
99 *
100 * True if spread spectrum is enabled on the DP ref clock.
101 */
98 bool ss_on_dprefclk; 102 bool ss_on_dprefclk;
99 /* DPREFCLK SS percentage (if down-spread enabled) */ 103
104 /**
105 * @xgmi_enabled:
106 *
107 * True if xGMI is enabled. On VG20, both audio and display clocks need
108 * to be adjusted with the WAFL link's SS info if xGMI is enabled.
109 */
110 bool xgmi_enabled;
111
112 /**
113 * @dprefclk_ss_percentage:
114 *
115 * DPREFCLK SS percentage (if down-spread enabled).
116 *
117 * Note that if XGMI is enabled, the SS info (percentage and divider)
118 * from the WAFL link is used instead. This is decided during
119 * dce_clk_mgr initialization.
120 */
100 int dprefclk_ss_percentage; 121 int dprefclk_ss_percentage;
101 /* DPREFCLK SS percentage Divider (100 or 1000) */ 122
123 /**
124 * @dprefclk_ss_divider:
125 *
126 * DPREFCLK SS percentage Divider (100 or 1000).
127 */
102 int dprefclk_ss_divider; 128 int dprefclk_ss_divider;
103 int dprefclk_khz; 129 int dprefclk_khz;
104 130
@@ -163,6 +189,9 @@ struct clk_mgr *dce112_clk_mgr_create(
163 189
164struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); 190struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
165 191
192struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx);
193void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr);
194
166void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); 195void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
167 196
168int dentist_get_divider_from_did(int did); 197int dentist_get_divider_from_did(int did);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 723ce80ed89c..c67e90e5c339 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -108,28 +108,28 @@ static const struct spread_spectrum_data *get_ss_data_entry(
108} 108}
109 109
110/** 110/**
111* Function: calculate_fb_and_fractional_fb_divider 111 * Function: calculate_fb_and_fractional_fb_divider
112* 112 *
113* * DESCRIPTION: Calculates feedback and fractional feedback dividers values 113 * * DESCRIPTION: Calculates feedback and fractional feedback dividers values
114* 114 *
115*PARAMETERS: 115 *PARAMETERS:
116* targetPixelClock Desired frequency in 10 KHz 116 * targetPixelClock Desired frequency in 100 Hz
117* ref_divider Reference divider (already known) 117 * ref_divider Reference divider (already known)
118* postDivider Post Divider (already known) 118 * postDivider Post Divider (already known)
119* feedback_divider_param Pointer where to store 119 * feedback_divider_param Pointer where to store
120* calculated feedback divider value 120 * calculated feedback divider value
121* fract_feedback_divider_param Pointer where to store 121 * fract_feedback_divider_param Pointer where to store
122* calculated fract feedback divider value 122 * calculated fract feedback divider value
123* 123 *
124*RETURNS: 124 *RETURNS:
125* It fills the locations pointed by feedback_divider_param 125 * It fills the locations pointed by feedback_divider_param
126* and fract_feedback_divider_param 126 * and fract_feedback_divider_param
127* It returns - true if feedback divider not 0 127 * It returns - true if feedback divider not 0
128* - false should never happen) 128 * - false should never happen)
129*/ 129 */
130static bool calculate_fb_and_fractional_fb_divider( 130static bool calculate_fb_and_fractional_fb_divider(
131 struct calc_pll_clock_source *calc_pll_cs, 131 struct calc_pll_clock_source *calc_pll_cs,
132 uint32_t target_pix_clk_khz, 132 uint32_t target_pix_clk_100hz,
133 uint32_t ref_divider, 133 uint32_t ref_divider,
134 uint32_t post_divider, 134 uint32_t post_divider,
135 uint32_t *feedback_divider_param, 135 uint32_t *feedback_divider_param,
@@ -138,11 +138,11 @@ static bool calculate_fb_and_fractional_fb_divider(
138 uint64_t feedback_divider; 138 uint64_t feedback_divider;
139 139
140 feedback_divider = 140 feedback_divider =
141 (uint64_t)target_pix_clk_khz * ref_divider * post_divider; 141 (uint64_t)target_pix_clk_100hz * ref_divider * post_divider;
142 feedback_divider *= 10; 142 feedback_divider *= 10;
143 /* additional factor, since we divide by 10 afterwards */ 143 /* additional factor, since we divide by 10 afterwards */
144 feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor); 144 feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
145 feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz); 145 feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz * 10ull);
146 146
147/*Round to the number of precision 147/*Round to the number of precision
148 * The following code replace the old code (ullfeedbackDivider + 5)/10 148 * The following code replace the old code (ullfeedbackDivider + 5)/10
@@ -195,36 +195,36 @@ static bool calc_fb_divider_checking_tolerance(
195{ 195{
196 uint32_t feedback_divider; 196 uint32_t feedback_divider;
197 uint32_t fract_feedback_divider; 197 uint32_t fract_feedback_divider;
198 uint32_t actual_calculated_clock_khz; 198 uint32_t actual_calculated_clock_100hz;
199 uint32_t abs_err; 199 uint32_t abs_err;
200 uint64_t actual_calc_clk_khz; 200 uint64_t actual_calc_clk_100hz;
201 201
202 calculate_fb_and_fractional_fb_divider( 202 calculate_fb_and_fractional_fb_divider(
203 calc_pll_cs, 203 calc_pll_cs,
204 pll_settings->adjusted_pix_clk, 204 pll_settings->adjusted_pix_clk_100hz,
205 ref_divider, 205 ref_divider,
206 post_divider, 206 post_divider,
207 &feedback_divider, 207 &feedback_divider,
208 &fract_feedback_divider); 208 &fract_feedback_divider);
209 209
210 /*Actual calculated value*/ 210 /*Actual calculated value*/
211 actual_calc_clk_khz = (uint64_t)feedback_divider * 211 actual_calc_clk_100hz = (uint64_t)feedback_divider *
212 calc_pll_cs->fract_fb_divider_factor + 212 calc_pll_cs->fract_fb_divider_factor +
213 fract_feedback_divider; 213 fract_feedback_divider;
214 actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz; 214 actual_calc_clk_100hz *= calc_pll_cs->ref_freq_khz * 10;
215 actual_calc_clk_khz = 215 actual_calc_clk_100hz =
216 div_u64(actual_calc_clk_khz, 216 div_u64(actual_calc_clk_100hz,
217 ref_divider * post_divider * 217 ref_divider * post_divider *
218 calc_pll_cs->fract_fb_divider_factor); 218 calc_pll_cs->fract_fb_divider_factor);
219 219
220 actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz); 220 actual_calculated_clock_100hz = (uint32_t)(actual_calc_clk_100hz);
221 221
222 abs_err = (actual_calculated_clock_khz > 222 abs_err = (actual_calculated_clock_100hz >
223 pll_settings->adjusted_pix_clk) 223 pll_settings->adjusted_pix_clk_100hz)
224 ? actual_calculated_clock_khz - 224 ? actual_calculated_clock_100hz -
225 pll_settings->adjusted_pix_clk 225 pll_settings->adjusted_pix_clk_100hz
226 : pll_settings->adjusted_pix_clk - 226 : pll_settings->adjusted_pix_clk_100hz -
227 actual_calculated_clock_khz; 227 actual_calculated_clock_100hz;
228 228
229 if (abs_err <= tolerance) { 229 if (abs_err <= tolerance) {
230 /*found good values*/ 230 /*found good values*/
@@ -233,10 +233,10 @@ static bool calc_fb_divider_checking_tolerance(
233 pll_settings->feedback_divider = feedback_divider; 233 pll_settings->feedback_divider = feedback_divider;
234 pll_settings->fract_feedback_divider = fract_feedback_divider; 234 pll_settings->fract_feedback_divider = fract_feedback_divider;
235 pll_settings->pix_clk_post_divider = post_divider; 235 pll_settings->pix_clk_post_divider = post_divider;
236 pll_settings->calculated_pix_clk = 236 pll_settings->calculated_pix_clk_100hz =
237 actual_calculated_clock_khz; 237 actual_calculated_clock_100hz;
238 pll_settings->vco_freq = 238 pll_settings->vco_freq =
239 actual_calculated_clock_khz * post_divider; 239 actual_calculated_clock_100hz * post_divider / 10;
240 return true; 240 return true;
241 } 241 }
242 return false; 242 return false;
@@ -257,8 +257,8 @@ static bool calc_pll_dividers_in_range(
257 257
258/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25% 258/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
259 * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/ 259 * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
260 tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) / 260 tolerance = (pll_settings->adjusted_pix_clk_100hz * err_tolerance) /
261 10000; 261 100000;
262 if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE) 262 if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
263 tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE; 263 tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
264 264
@@ -294,7 +294,7 @@ static uint32_t calculate_pixel_clock_pll_dividers(
294 uint32_t min_ref_divider; 294 uint32_t min_ref_divider;
295 uint32_t max_ref_divider; 295 uint32_t max_ref_divider;
296 296
297 if (pll_settings->adjusted_pix_clk == 0) { 297 if (pll_settings->adjusted_pix_clk_100hz == 0) {
298 DC_LOG_ERROR( 298 DC_LOG_ERROR(
299 "%s Bad requested pixel clock", __func__); 299 "%s Bad requested pixel clock", __func__);
300 return MAX_PLL_CALC_ERROR; 300 return MAX_PLL_CALC_ERROR;
@@ -306,21 +306,21 @@ static uint32_t calculate_pixel_clock_pll_dividers(
306 max_post_divider = pll_settings->pix_clk_post_divider; 306 max_post_divider = pll_settings->pix_clk_post_divider;
307 } else { 307 } else {
308 min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider; 308 min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
309 if (min_post_divider * pll_settings->adjusted_pix_clk < 309 if (min_post_divider * pll_settings->adjusted_pix_clk_100hz <
310 calc_pll_cs->min_vco_khz) { 310 calc_pll_cs->min_vco_khz * 10) {
311 min_post_divider = calc_pll_cs->min_vco_khz / 311 min_post_divider = calc_pll_cs->min_vco_khz * 10 /
312 pll_settings->adjusted_pix_clk; 312 pll_settings->adjusted_pix_clk_100hz;
313 if ((min_post_divider * 313 if ((min_post_divider *
314 pll_settings->adjusted_pix_clk) < 314 pll_settings->adjusted_pix_clk_100hz) <
315 calc_pll_cs->min_vco_khz) 315 calc_pll_cs->min_vco_khz * 10)
316 min_post_divider++; 316 min_post_divider++;
317 } 317 }
318 318
319 max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider; 319 max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
320 if (max_post_divider * pll_settings->adjusted_pix_clk 320 if (max_post_divider * pll_settings->adjusted_pix_clk_100hz
321 > calc_pll_cs->max_vco_khz) 321 > calc_pll_cs->max_vco_khz * 10)
322 max_post_divider = calc_pll_cs->max_vco_khz / 322 max_post_divider = calc_pll_cs->max_vco_khz * 10 /
323 pll_settings->adjusted_pix_clk; 323 pll_settings->adjusted_pix_clk_100hz;
324 } 324 }
325 325
326/* 2) Find Reference divider ranges 326/* 2) Find Reference divider ranges
@@ -392,47 +392,47 @@ static bool pll_adjust_pix_clk(
392 struct pixel_clk_params *pix_clk_params, 392 struct pixel_clk_params *pix_clk_params,
393 struct pll_settings *pll_settings) 393 struct pll_settings *pll_settings)
394{ 394{
395 uint32_t actual_pix_clk_khz = 0; 395 uint32_t actual_pix_clk_100hz = 0;
396 uint32_t requested_clk_khz = 0; 396 uint32_t requested_clk_100hz = 0;
397 struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = { 397 struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
398 0 }; 398 0 };
399 enum bp_result bp_result; 399 enum bp_result bp_result;
400 switch (pix_clk_params->signal_type) { 400 switch (pix_clk_params->signal_type) {
401 case SIGNAL_TYPE_HDMI_TYPE_A: { 401 case SIGNAL_TYPE_HDMI_TYPE_A: {
402 requested_clk_khz = pix_clk_params->requested_pix_clk; 402 requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz;
403 if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) { 403 if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
404 switch (pix_clk_params->color_depth) { 404 switch (pix_clk_params->color_depth) {
405 case COLOR_DEPTH_101010: 405 case COLOR_DEPTH_101010:
406 requested_clk_khz = (requested_clk_khz * 5) >> 2; 406 requested_clk_100hz = (requested_clk_100hz * 5) >> 2;
407 break; /* x1.25*/ 407 break; /* x1.25*/
408 case COLOR_DEPTH_121212: 408 case COLOR_DEPTH_121212:
409 requested_clk_khz = (requested_clk_khz * 6) >> 2; 409 requested_clk_100hz = (requested_clk_100hz * 6) >> 2;
410 break; /* x1.5*/ 410 break; /* x1.5*/
411 case COLOR_DEPTH_161616: 411 case COLOR_DEPTH_161616:
412 requested_clk_khz = requested_clk_khz * 2; 412 requested_clk_100hz = requested_clk_100hz * 2;
413 break; /* x2.0*/ 413 break; /* x2.0*/
414 default: 414 default:
415 break; 415 break;
416 } 416 }
417 } 417 }
418 actual_pix_clk_khz = requested_clk_khz; 418 actual_pix_clk_100hz = requested_clk_100hz;
419 } 419 }
420 break; 420 break;
421 421
422 case SIGNAL_TYPE_DISPLAY_PORT: 422 case SIGNAL_TYPE_DISPLAY_PORT:
423 case SIGNAL_TYPE_DISPLAY_PORT_MST: 423 case SIGNAL_TYPE_DISPLAY_PORT_MST:
424 case SIGNAL_TYPE_EDP: 424 case SIGNAL_TYPE_EDP:
425 requested_clk_khz = pix_clk_params->requested_sym_clk; 425 requested_clk_100hz = pix_clk_params->requested_sym_clk * 10;
426 actual_pix_clk_khz = pix_clk_params->requested_pix_clk; 426 actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz;
427 break; 427 break;
428 428
429 default: 429 default:
430 requested_clk_khz = pix_clk_params->requested_pix_clk; 430 requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz;
431 actual_pix_clk_khz = pix_clk_params->requested_pix_clk; 431 actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz;
432 break; 432 break;
433 } 433 }
434 434
435 bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz; 435 bp_adjust_pixel_clock_params.pixel_clock = requested_clk_100hz / 10;
436 bp_adjust_pixel_clock_params. 436 bp_adjust_pixel_clock_params.
437 encoder_object_id = pix_clk_params->encoder_object_id; 437 encoder_object_id = pix_clk_params->encoder_object_id;
438 bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type; 438 bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
@@ -441,9 +441,9 @@ static bool pll_adjust_pix_clk(
441 bp_result = clk_src->bios->funcs->adjust_pixel_clock( 441 bp_result = clk_src->bios->funcs->adjust_pixel_clock(
442 clk_src->bios, &bp_adjust_pixel_clock_params); 442 clk_src->bios, &bp_adjust_pixel_clock_params);
443 if (bp_result == BP_RESULT_OK) { 443 if (bp_result == BP_RESULT_OK) {
444 pll_settings->actual_pix_clk = actual_pix_clk_khz; 444 pll_settings->actual_pix_clk_100hz = actual_pix_clk_100hz;
445 pll_settings->adjusted_pix_clk = 445 pll_settings->adjusted_pix_clk_100hz =
446 bp_adjust_pixel_clock_params.adjusted_pixel_clock; 446 bp_adjust_pixel_clock_params.adjusted_pixel_clock * 10;
447 pll_settings->reference_divider = 447 pll_settings->reference_divider =
448 bp_adjust_pixel_clock_params.reference_divider; 448 bp_adjust_pixel_clock_params.reference_divider;
449 pll_settings->pix_clk_post_divider = 449 pll_settings->pix_clk_post_divider =
@@ -490,7 +490,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
490 const struct spread_spectrum_data *ss_data = get_ss_data_entry( 490 const struct spread_spectrum_data *ss_data = get_ss_data_entry(
491 clk_src, 491 clk_src,
492 pix_clk_params->signal_type, 492 pix_clk_params->signal_type,
493 pll_settings->adjusted_pix_clk); 493 pll_settings->adjusted_pix_clk_100hz / 10);
494 494
495 if (NULL != ss_data) 495 if (NULL != ss_data)
496 pll_settings->ss_percentage = ss_data->percentage; 496 pll_settings->ss_percentage = ss_data->percentage;
@@ -502,13 +502,13 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
502 * to continue. */ 502 * to continue. */
503 DC_LOG_ERROR( 503 DC_LOG_ERROR(
504 "%s: Failed to adjust pixel clock!!", __func__); 504 "%s: Failed to adjust pixel clock!!", __func__);
505 pll_settings->actual_pix_clk = 505 pll_settings->actual_pix_clk_100hz =
506 pix_clk_params->requested_pix_clk; 506 pix_clk_params->requested_pix_clk_100hz;
507 pll_settings->adjusted_pix_clk = 507 pll_settings->adjusted_pix_clk_100hz =
508 pix_clk_params->requested_pix_clk; 508 pix_clk_params->requested_pix_clk_100hz;
509 509
510 if (dc_is_dp_signal(pix_clk_params->signal_type)) 510 if (dc_is_dp_signal(pix_clk_params->signal_type))
511 pll_settings->adjusted_pix_clk = 100000; 511 pll_settings->adjusted_pix_clk_100hz = 1000000;
512 } 512 }
513 513
514 /* Calculate Dividers */ 514 /* Calculate Dividers */
@@ -533,28 +533,28 @@ static void dce112_get_pix_clk_dividers_helper (
533 struct pll_settings *pll_settings, 533 struct pll_settings *pll_settings,
534 struct pixel_clk_params *pix_clk_params) 534 struct pixel_clk_params *pix_clk_params)
535{ 535{
536 uint32_t actualPixelClockInKHz; 536 uint32_t actual_pixel_clock_100hz;
537 537
538 actualPixelClockInKHz = pix_clk_params->requested_pix_clk; 538 actual_pixel_clock_100hz = pix_clk_params->requested_pix_clk_100hz;
539 /* Calculate Dividers */ 539 /* Calculate Dividers */
540 if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { 540 if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
541 switch (pix_clk_params->color_depth) { 541 switch (pix_clk_params->color_depth) {
542 case COLOR_DEPTH_101010: 542 case COLOR_DEPTH_101010:
543 actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2; 543 actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
544 break; 544 break;
545 case COLOR_DEPTH_121212: 545 case COLOR_DEPTH_121212:
546 actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2; 546 actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
547 break; 547 break;
548 case COLOR_DEPTH_161616: 548 case COLOR_DEPTH_161616:
549 actualPixelClockInKHz = actualPixelClockInKHz * 2; 549 actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
550 break; 550 break;
551 default: 551 default:
552 break; 552 break;
553 } 553 }
554 } 554 }
555 pll_settings->actual_pix_clk = actualPixelClockInKHz; 555 pll_settings->actual_pix_clk_100hz = actual_pixel_clock_100hz;
556 pll_settings->adjusted_pix_clk = actualPixelClockInKHz; 556 pll_settings->adjusted_pix_clk_100hz = actual_pixel_clock_100hz;
557 pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk; 557 pll_settings->calculated_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz;
558} 558}
559 559
560static uint32_t dce110_get_pix_clk_dividers( 560static uint32_t dce110_get_pix_clk_dividers(
@@ -567,7 +567,7 @@ static uint32_t dce110_get_pix_clk_dividers(
567 DC_LOGGER_INIT(); 567 DC_LOGGER_INIT();
568 568
569 if (pix_clk_params == NULL || pll_settings == NULL 569 if (pix_clk_params == NULL || pll_settings == NULL
570 || pix_clk_params->requested_pix_clk == 0) { 570 || pix_clk_params->requested_pix_clk_100hz == 0) {
571 DC_LOG_ERROR( 571 DC_LOG_ERROR(
572 "%s: Invalid parameters!!\n", __func__); 572 "%s: Invalid parameters!!\n", __func__);
573 return pll_calc_error; 573 return pll_calc_error;
@@ -577,10 +577,10 @@ static uint32_t dce110_get_pix_clk_dividers(
577 577
578 if (cs->id == CLOCK_SOURCE_ID_DP_DTO || 578 if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
579 cs->id == CLOCK_SOURCE_ID_EXTERNAL) { 579 cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
580 pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz; 580 pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10;
581 pll_settings->calculated_pix_clk = clk_src->ext_clk_khz; 581 pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10;
582 pll_settings->actual_pix_clk = 582 pll_settings->actual_pix_clk_100hz =
583 pix_clk_params->requested_pix_clk; 583 pix_clk_params->requested_pix_clk_100hz;
584 return 0; 584 return 0;
585 } 585 }
586 586
@@ -599,7 +599,7 @@ static uint32_t dce112_get_pix_clk_dividers(
599 DC_LOGGER_INIT(); 599 DC_LOGGER_INIT();
600 600
601 if (pix_clk_params == NULL || pll_settings == NULL 601 if (pix_clk_params == NULL || pll_settings == NULL
602 || pix_clk_params->requested_pix_clk == 0) { 602 || pix_clk_params->requested_pix_clk_100hz == 0) {
603 DC_LOG_ERROR( 603 DC_LOG_ERROR(
604 "%s: Invalid parameters!!\n", __func__); 604 "%s: Invalid parameters!!\n", __func__);
605 return -1; 605 return -1;
@@ -609,10 +609,10 @@ static uint32_t dce112_get_pix_clk_dividers(
609 609
610 if (cs->id == CLOCK_SOURCE_ID_DP_DTO || 610 if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
611 cs->id == CLOCK_SOURCE_ID_EXTERNAL) { 611 cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
612 pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz; 612 pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10;
613 pll_settings->calculated_pix_clk = clk_src->ext_clk_khz; 613 pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10;
614 pll_settings->actual_pix_clk = 614 pll_settings->actual_pix_clk_100hz =
615 pix_clk_params->requested_pix_clk; 615 pix_clk_params->requested_pix_clk_100hz;
616 return -1; 616 return -1;
617 } 617 }
618 618
@@ -714,7 +714,7 @@ static bool enable_spread_spectrum(
714 ss_data = get_ss_data_entry( 714 ss_data = get_ss_data_entry(
715 clk_src, 715 clk_src,
716 signal, 716 signal,
717 pll_settings->calculated_pix_clk); 717 pll_settings->calculated_pix_clk_100hz / 10);
718 718
719/* Pixel clock PLL has been programmed to generate desired pixel clock, 719/* Pixel clock PLL has been programmed to generate desired pixel clock,
720 * now enable SS on pixel clock */ 720 * now enable SS on pixel clock */
@@ -853,7 +853,7 @@ static bool dce110_program_pix_clk(
853 /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ 853 /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
854 bp_pc_params.controller_id = pix_clk_params->controller_id; 854 bp_pc_params.controller_id = pix_clk_params->controller_id;
855 bp_pc_params.pll_id = clock_source->id; 855 bp_pc_params.pll_id = clock_source->id;
856 bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk; 856 bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz;
857 bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; 857 bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
858 bp_pc_params.signal_type = pix_clk_params->signal_type; 858 bp_pc_params.signal_type = pix_clk_params->signal_type;
859 859
@@ -903,12 +903,12 @@ static bool dce112_program_pix_clk(
903#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 903#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
904 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { 904 if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
905 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; 905 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
906 unsigned dp_dto_ref_kHz = 700000; 906 unsigned dp_dto_ref_100hz = 7000000;
907 unsigned clock_kHz = pll_settings->actual_pix_clk; 907 unsigned clock_100hz = pll_settings->actual_pix_clk_100hz;
908 908
909 /* Set DTO values: phase = target clock, modulo = reference clock */ 909 /* Set DTO values: phase = target clock, modulo = reference clock */
910 REG_WRITE(PHASE[inst], clock_kHz); 910 REG_WRITE(PHASE[inst], clock_100hz);
911 REG_WRITE(MODULO[inst], dp_dto_ref_kHz); 911 REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
912 912
913 /* Enable DTO */ 913 /* Enable DTO */
914 REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); 914 REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
@@ -927,7 +927,7 @@ static bool dce112_program_pix_clk(
927 /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ 927 /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
928 bp_pc_params.controller_id = pix_clk_params->controller_id; 928 bp_pc_params.controller_id = pix_clk_params->controller_id;
929 bp_pc_params.pll_id = clock_source->id; 929 bp_pc_params.pll_id = clock_source->id;
930 bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk; 930 bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz;
931 bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; 931 bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
932 bp_pc_params.signal_type = pix_clk_params->signal_type; 932 bp_pc_params.signal_type = pix_clk_params->signal_type;
933 933
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index dea40b322191..c2926cf19dee 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -51,7 +51,6 @@
51#define PSR_SET_WAITLOOP 0x31 51#define PSR_SET_WAITLOOP 0x31
52#define MCP_INIT_DMCU 0x88 52#define MCP_INIT_DMCU 0x88
53#define MCP_INIT_IRAM 0x89 53#define MCP_INIT_IRAM 0x89
54#define MCP_DMCU_VERSION 0x90
55#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L 54#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
56 55
57static bool dce_dmcu_init(struct dmcu *dmcu) 56static bool dce_dmcu_init(struct dmcu *dmcu)
@@ -317,38 +316,11 @@ static void dce_get_psr_wait_loop(
317} 316}
318 317
319#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 318#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
320static void dcn10_get_dmcu_state(struct dmcu *dmcu)
321{
322 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
323 uint32_t dmcu_state_offset = 0xf6;
324
325 /* Enable write access to IRAM */
326 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
327 IRAM_HOST_ACCESS_EN, 1,
328 IRAM_RD_ADDR_AUTO_INC, 1);
329
330 REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
331
332 /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
333 REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_state_offset);
334
335 /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
336 dmcu->dmcu_state = REG_READ(DMCU_IRAM_RD_DATA);
337
338 /* Disable write access to IRAM to allow dynamic sleep state */
339 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
340 IRAM_HOST_ACCESS_EN, 0,
341 IRAM_RD_ADDR_AUTO_INC, 0);
342}
343
344static void dcn10_get_dmcu_version(struct dmcu *dmcu) 319static void dcn10_get_dmcu_version(struct dmcu *dmcu)
345{ 320{
346 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); 321 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
347 uint32_t dmcu_version_offset = 0xf1; 322 uint32_t dmcu_version_offset = 0xf1;
348 323
349 /* Clear scratch */
350 REG_WRITE(DC_DMCU_SCRATCH, 0);
351
352 /* Enable write access to IRAM */ 324 /* Enable write access to IRAM */
353 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, 325 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
354 IRAM_HOST_ACCESS_EN, 1, 326 IRAM_HOST_ACCESS_EN, 1,
@@ -359,85 +331,74 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu)
359 /* Write address to IRAM_RD_ADDR and read from DATA register */ 331 /* Write address to IRAM_RD_ADDR and read from DATA register */
360 REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_version_offset); 332 REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_version_offset);
361 dmcu->dmcu_version.interface_version = REG_READ(DMCU_IRAM_RD_DATA); 333 dmcu->dmcu_version.interface_version = REG_READ(DMCU_IRAM_RD_DATA);
362 dmcu->dmcu_version.year = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) | 334 dmcu->dmcu_version.abm_version = REG_READ(DMCU_IRAM_RD_DATA);
335 dmcu->dmcu_version.psr_version = REG_READ(DMCU_IRAM_RD_DATA);
336 dmcu->dmcu_version.build_version = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) |
363 REG_READ(DMCU_IRAM_RD_DATA)); 337 REG_READ(DMCU_IRAM_RD_DATA));
364 dmcu->dmcu_version.month = REG_READ(DMCU_IRAM_RD_DATA);
365 dmcu->dmcu_version.date = REG_READ(DMCU_IRAM_RD_DATA);
366 338
367 /* Disable write access to IRAM to allow dynamic sleep state */ 339 /* Disable write access to IRAM to allow dynamic sleep state */
368 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, 340 REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
369 IRAM_HOST_ACCESS_EN, 0, 341 IRAM_HOST_ACCESS_EN, 0,
370 IRAM_RD_ADDR_AUTO_INC, 0); 342 IRAM_RD_ADDR_AUTO_INC, 0);
371
372 /* Send MCP command message to DMCU to get version reply from FW.
373 * We expect this version should match the one in IRAM, otherwise
374 * something is wrong with DMCU and we should fail and disable UC.
375 */
376 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
377
378 /* Set command to get DMCU version from microcontroller */
379 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
380 MCP_DMCU_VERSION);
381
382 /* Notify microcontroller of new command */
383 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
384
385 /* Ensure command has been executed before continuing */
386 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
387
388 /* Somehow version does not match, so fail and return version 0 */
389 if (dmcu->dmcu_version.interface_version != REG_READ(DC_DMCU_SCRATCH))
390 dmcu->dmcu_version.interface_version = 0;
391} 343}
392 344
393static bool dcn10_dmcu_init(struct dmcu *dmcu) 345static bool dcn10_dmcu_init(struct dmcu *dmcu)
394{ 346{
395 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); 347 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
348 bool status = false;
396 349
397 /* DMCU FW should populate the scratch register if running */ 350 /* Definition of DC_DMCU_SCRATCH
398 if (REG_READ(DC_DMCU_SCRATCH) == 0) 351 * 0 : firmare not loaded
399 return false; 352 * 1 : PSP load DMCU FW but not initialized
400 353 * 2 : Firmware already initialized
401 /* Check state is uninitialized */ 354 */
402 dcn10_get_dmcu_state(dmcu); 355 dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
403
404 /* If microcontroller is already initialized, do nothing */
405 if (dmcu->dmcu_state == DMCU_RUNNING)
406 return true;
407
408 /* Retrieve and cache the DMCU firmware version. */
409 dcn10_get_dmcu_version(dmcu);
410
411 /* Check interface version to confirm firmware is loaded and running */
412 if (dmcu->dmcu_version.interface_version == 0)
413 return false;
414 356
415 /* Wait until microcontroller is ready to process interrupt */ 357 switch (dmcu->dmcu_state) {
416 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 358 case DMCU_UNLOADED:
359 status = false;
360 break;
361 case DMCU_LOADED_UNINITIALIZED:
362 /* Wait until microcontroller is ready to process interrupt */
363 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
417 364
418 /* Set initialized ramping boundary value */ 365 /* Set initialized ramping boundary value */
419 REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF); 366 REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
420 367
421 /* Set command to initialize microcontroller */ 368 /* Set command to initialize microcontroller */
422 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, 369 REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
423 MCP_INIT_DMCU); 370 MCP_INIT_DMCU);
424 371
425 /* Notify microcontroller of new command */ 372 /* Notify microcontroller of new command */
426 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 373 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
427 374
428 /* Ensure command has been executed before continuing */ 375 /* Ensure command has been executed before continuing */
429 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 376 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
430 377
431 // Check state is initialized 378 // Check state is initialized
432 dcn10_get_dmcu_state(dmcu); 379 dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
433 380
434 // If microcontroller is not in running state, fail 381 // If microcontroller is not in running state, fail
435 if (dmcu->dmcu_state != DMCU_RUNNING) 382 if (dmcu->dmcu_state == DMCU_RUNNING) {
436 return false; 383 /* Retrieve and cache the DMCU firmware version. */
384 dcn10_get_dmcu_version(dmcu);
385 status = true;
386 } else
387 status = false;
437 388
438 return true; 389 break;
390 case DMCU_RUNNING:
391 status = true;
392 break;
393 default:
394 status = false;
395 break;
396 }
397
398 return status;
439} 399}
440 400
401
441static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, 402static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
442 unsigned int start_offset, 403 unsigned int start_offset,
443 const char *src, 404 const char *src,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index c83a7f05f14c..956bdf14503f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -133,6 +133,10 @@
133 SR(DCHUB_AGP_TOP), \ 133 SR(DCHUB_AGP_TOP), \
134 BL_REG_LIST() 134 BL_REG_LIST()
135 135
136#define HWSEQ_VG20_REG_LIST() \
137 HWSEQ_DCE120_REG_LIST(),\
138 MMHUB_SR(MC_VM_XGMI_LFB_CNTL)
139
136#define HWSEQ_DCE112_REG_LIST() \ 140#define HWSEQ_DCE112_REG_LIST() \
137 HWSEQ_DCE10_REG_LIST(), \ 141 HWSEQ_DCE10_REG_LIST(), \
138 HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \ 142 HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
@@ -298,6 +302,7 @@ struct dce_hwseq_registers {
298 uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; 302 uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
299 uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR; 303 uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
300 uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR; 304 uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
305 uint32_t MC_VM_XGMI_LFB_CNTL;
301 uint32_t AZALIA_AUDIO_DTO; 306 uint32_t AZALIA_AUDIO_DTO;
302 uint32_t AZALIA_CONTROLLER_CLOCK_GATING; 307 uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
303}; 308};
@@ -382,6 +387,11 @@ struct dce_hwseq_registers {
382 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 387 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
383 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 388 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
384 389
390#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
391 HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
392 HWS_SF(, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION, mask_sh),\
393 HWS_SF(, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION, mask_sh)
394
385#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\ 395#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
386 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\ 396 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
387 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \ 397 HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
@@ -470,6 +480,8 @@ struct dce_hwseq_registers {
470 type PHYSICAL_PAGE_NUMBER_MSB;\ 480 type PHYSICAL_PAGE_NUMBER_MSB;\
471 type PHYSICAL_PAGE_NUMBER_LSB;\ 481 type PHYSICAL_PAGE_NUMBER_LSB;\
472 type LOGICAL_ADDR; \ 482 type LOGICAL_ADDR; \
483 type PF_LFB_REGION;\
484 type PF_MAX_REGION;\
473 type ENABLE_L1_TLB;\ 485 type ENABLE_L1_TLB;\
474 type SYSTEM_ACCESS_MODE;\ 486 type SYSTEM_ACCESS_MODE;\
475 type LVTMA_BLON;\ 487 type LVTMA_BLON;\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 3e18ea84b1f9..314c04a915d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -599,12 +599,12 @@ bool dce110_link_encoder_validate_dvi_output(
599 if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || 599 if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
600 connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && 600 connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
601 signal != SIGNAL_TYPE_HDMI_TYPE_A && 601 signal != SIGNAL_TYPE_HDMI_TYPE_A &&
602 crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK) 602 crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10))
603 return false; 603 return false;
604 if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) 604 if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10))
605 return false; 605 return false;
606 606
607 if (crtc_timing->pix_clk_khz > max_pixel_clock) 607 if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10))
608 return false; 608 return false;
609 609
610 /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ 610 /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
@@ -788,7 +788,7 @@ bool dce110_link_encoder_validate_output_with_stream(
788 case SIGNAL_TYPE_DVI_DUAL_LINK: 788 case SIGNAL_TYPE_DVI_DUAL_LINK:
789 is_valid = dce110_link_encoder_validate_dvi_output( 789 is_valid = dce110_link_encoder_validate_dvi_output(
790 enc110, 790 enc110,
791 stream->sink->link->connector_signal, 791 stream->link->connector_signal,
792 stream->signal, 792 stream->signal,
793 &stream->timing); 793 &stream->timing);
794 break; 794 break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index cce0d18f91da..1fa2d4fd7a35 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -288,9 +288,18 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
288#endif 288#endif
289 289
290 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 290 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
291 291 struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
292 if (hw_crtc_timing.flags.INTERLACE) {
293 /*the input timing is in VESA spec format with Interlace flag =1*/
294 hw_crtc_timing.v_total /= 2;
295 hw_crtc_timing.v_border_top /= 2;
296 hw_crtc_timing.v_addressable /= 2;
297 hw_crtc_timing.v_border_bottom /= 2;
298 hw_crtc_timing.v_front_porch /= 2;
299 hw_crtc_timing.v_sync_width /= 2;
300 }
292 /* set pixel encoding */ 301 /* set pixel encoding */
293 switch (crtc_timing->pixel_encoding) { 302 switch (hw_crtc_timing.pixel_encoding) {
294 case PIXEL_ENCODING_YCBCR422: 303 case PIXEL_ENCODING_YCBCR422:
295 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 304 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
296 DP_PIXEL_ENCODING_TYPE_YCBCR422); 305 DP_PIXEL_ENCODING_TYPE_YCBCR422);
@@ -299,8 +308,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
299 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, 308 REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
300 DP_PIXEL_ENCODING_TYPE_YCBCR444); 309 DP_PIXEL_ENCODING_TYPE_YCBCR444);
301 310
302 if (crtc_timing->flags.Y_ONLY) 311 if (hw_crtc_timing.flags.Y_ONLY)
303 if (crtc_timing->display_color_depth != COLOR_DEPTH_666) 312 if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666)
304 /* HW testing only, no use case yet. 313 /* HW testing only, no use case yet.
305 * Color depth of Y-only could be 314 * Color depth of Y-only could be
306 * 8, 10, 12, 16 bits */ 315 * 8, 10, 12, 16 bits */
@@ -335,7 +344,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
335 344
336 /* set color depth */ 345 /* set color depth */
337 346
338 switch (crtc_timing->display_color_depth) { 347 switch (hw_crtc_timing.display_color_depth) {
339 case COLOR_DEPTH_666: 348 case COLOR_DEPTH_666:
340 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 349 REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
341 0); 350 0);
@@ -363,7 +372,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
363 372
364 373
365#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 374#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
366 switch (crtc_timing->display_color_depth) { 375 switch (hw_crtc_timing.display_color_depth) {
367 case COLOR_DEPTH_666: 376 case COLOR_DEPTH_666:
368 colorimetry_bpc = 0; 377 colorimetry_bpc = 0;
369 break; 378 break;
@@ -401,9 +410,9 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
401 misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ 410 misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
402 misc1 = misc1 & ~0x80; /* bit7 = 0*/ 411 misc1 = misc1 & ~0x80; /* bit7 = 0*/
403 dynamic_range_ycbcr = 0; /*bt601*/ 412 dynamic_range_ycbcr = 0; /*bt601*/
404 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 413 if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
405 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ 414 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
406 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) 415 else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
407 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ 416 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
408 break; 417 break;
409 case COLOR_SPACE_YCBCR709: 418 case COLOR_SPACE_YCBCR709:
@@ -411,9 +420,9 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
411 misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ 420 misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
412 misc1 = misc1 & ~0x80; /* bit7 = 0*/ 421 misc1 = misc1 & ~0x80; /* bit7 = 0*/
413 dynamic_range_ycbcr = 1; /*bt709*/ 422 dynamic_range_ycbcr = 1; /*bt709*/
414 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 423 if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
415 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ 424 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
416 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) 425 else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
417 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ 426 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
418 break; 427 break;
419 case COLOR_SPACE_2020_RGB_LIMITEDRANGE: 428 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
@@ -453,27 +462,27 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
453 */ 462 */
454 if (REG(DP_MSA_TIMING_PARAM1)) 463 if (REG(DP_MSA_TIMING_PARAM1))
455 REG_SET_2(DP_MSA_TIMING_PARAM1, 0, 464 REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
456 DP_MSA_HTOTAL, crtc_timing->h_total, 465 DP_MSA_HTOTAL, hw_crtc_timing.h_total,
457 DP_MSA_VTOTAL, crtc_timing->v_total); 466 DP_MSA_VTOTAL, hw_crtc_timing.v_total);
458#endif 467#endif
459 468
460 /* calcuate from vesa timing parameters 469 /* calcuate from vesa timing parameters
461 * h_active_start related to leading edge of sync 470 * h_active_start related to leading edge of sync
462 */ 471 */
463 472
464 h_blank = crtc_timing->h_total - crtc_timing->h_border_left - 473 h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
465 crtc_timing->h_addressable - crtc_timing->h_border_right; 474 hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
466 475
467 h_back_porch = h_blank - crtc_timing->h_front_porch - 476 h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
468 crtc_timing->h_sync_width; 477 hw_crtc_timing.h_sync_width;
469 478
470 /* start at begining of left border */ 479 /* start at begining of left border */
471 h_active_start = crtc_timing->h_sync_width + h_back_porch; 480 h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
472 481
473 482
474 v_active_start = crtc_timing->v_total - crtc_timing->v_border_top - 483 v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
475 crtc_timing->v_addressable - crtc_timing->v_border_bottom - 484 hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
476 crtc_timing->v_front_porch; 485 hw_crtc_timing.v_front_porch;
477 486
478 487
479#if defined(CONFIG_DRM_AMD_DC_DCN1_0) 488#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -486,21 +495,21 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
486 if (REG(DP_MSA_TIMING_PARAM3)) 495 if (REG(DP_MSA_TIMING_PARAM3))
487 REG_SET_4(DP_MSA_TIMING_PARAM3, 0, 496 REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
488 DP_MSA_HSYNCWIDTH, 497 DP_MSA_HSYNCWIDTH,
489 crtc_timing->h_sync_width, 498 hw_crtc_timing.h_sync_width,
490 DP_MSA_HSYNCPOLARITY, 499 DP_MSA_HSYNCPOLARITY,
491 !crtc_timing->flags.HSYNC_POSITIVE_POLARITY, 500 !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY,
492 DP_MSA_VSYNCWIDTH, 501 DP_MSA_VSYNCWIDTH,
493 crtc_timing->v_sync_width, 502 hw_crtc_timing.v_sync_width,
494 DP_MSA_VSYNCPOLARITY, 503 DP_MSA_VSYNCPOLARITY,
495 !crtc_timing->flags.VSYNC_POSITIVE_POLARITY); 504 !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY);
496 505
497 /* HWDITH include border or overscan */ 506 /* HWDITH include border or overscan */
498 if (REG(DP_MSA_TIMING_PARAM4)) 507 if (REG(DP_MSA_TIMING_PARAM4))
499 REG_SET_2(DP_MSA_TIMING_PARAM4, 0, 508 REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
500 DP_MSA_HWIDTH, crtc_timing->h_border_left + 509 DP_MSA_HWIDTH, hw_crtc_timing.h_border_left +
501 crtc_timing->h_addressable + crtc_timing->h_border_right, 510 hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right,
502 DP_MSA_VHEIGHT, crtc_timing->v_border_top + 511 DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top +
503 crtc_timing->v_addressable + crtc_timing->v_border_bottom); 512 hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
504#endif 513#endif
505 } 514 }
506#endif 515#endif
@@ -662,7 +671,7 @@ static void dce110_stream_encoder_dvi_set_stream_attribute(
662 cntl.signal = is_dual_link ? 671 cntl.signal = is_dual_link ?
663 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; 672 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
664 cntl.enable_dp_audio = false; 673 cntl.enable_dp_audio = false;
665 cntl.pixel_clock = crtc_timing->pix_clk_khz; 674 cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
666 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; 675 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
667 676
668 if (enc110->base.bp->funcs->encoder_control( 677 if (enc110->base.bp->funcs->encoder_control(
@@ -686,7 +695,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute(
686 cntl.engine_id = enc110->base.id; 695 cntl.engine_id = enc110->base.id;
687 cntl.signal = SIGNAL_TYPE_LVDS; 696 cntl.signal = SIGNAL_TYPE_LVDS;
688 cntl.enable_dp_audio = false; 697 cntl.enable_dp_audio = false;
689 cntl.pixel_clock = crtc_timing->pix_clk_khz; 698 cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
690 cntl.lanes_number = LANE_COUNT_FOUR; 699 cntl.lanes_number = LANE_COUNT_FOUR;
691 700
692 if (enc110->base.bp->funcs->encoder_control( 701 if (enc110->base.bp->funcs->encoder_control(
@@ -1575,6 +1584,14 @@ static void setup_stereo_sync(
1575 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); 1584 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
1576} 1585}
1577 1586
1587static void dig_connect_to_otg(
1588 struct stream_encoder *enc,
1589 int tg_inst)
1590{
1591 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
1592
1593 REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
1594}
1578 1595
1579static const struct stream_encoder_funcs dce110_str_enc_funcs = { 1596static const struct stream_encoder_funcs dce110_str_enc_funcs = {
1580 .dp_set_stream_attribute = 1597 .dp_set_stream_attribute =
@@ -1609,7 +1626,7 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
1609 .hdmi_audio_disable = dce110_se_hdmi_audio_disable, 1626 .hdmi_audio_disable = dce110_se_hdmi_audio_disable,
1610 .setup_stereo_sync = setup_stereo_sync, 1627 .setup_stereo_sync = setup_stereo_sync,
1611 .set_avmute = dce110_stream_encoder_set_avmute, 1628 .set_avmute = dce110_stream_encoder_set_avmute,
1612 1629 .dig_connect_to_otg = dig_connect_to_otg,
1613}; 1630};
1614 1631
1615void dce110_stream_encoder_construct( 1632void dce110_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index 6c28229c76eb..f9cdf2b5242c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -199,7 +199,8 @@
199 SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\ 199 SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
200 SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\ 200 SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
201 SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\ 201 SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
202 SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh) 202 SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
203 SE_SF(DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
203 204
204#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\ 205#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
205 SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) 206 SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
@@ -284,7 +285,8 @@
284 SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ 285 SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
285 SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ 286 SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
286 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ 287 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
287 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh) 288 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
289 SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
288 290
289#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ 291#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
290 SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) 292 SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
@@ -494,6 +496,7 @@ struct dce_stream_encoder_shift {
494 uint8_t HDMI_DB_DISABLE; 496 uint8_t HDMI_DB_DISABLE;
495 uint8_t DP_VID_N_MUL; 497 uint8_t DP_VID_N_MUL;
496 uint8_t DP_VID_M_DOUBLE_VALUE_EN; 498 uint8_t DP_VID_M_DOUBLE_VALUE_EN;
499 uint8_t DIG_SOURCE_SELECT;
497}; 500};
498 501
499struct dce_stream_encoder_mask { 502struct dce_stream_encoder_mask {
@@ -624,6 +627,7 @@ struct dce_stream_encoder_mask {
624 uint32_t HDMI_DB_DISABLE; 627 uint32_t HDMI_DB_DISABLE;
625 uint32_t DP_VID_N_MUL; 628 uint32_t DP_VID_N_MUL;
626 uint32_t DP_VID_M_DOUBLE_VALUE_EN; 629 uint32_t DP_VID_M_DOUBLE_VALUE_EN;
630 uint32_t DIG_SOURCE_SELECT;
627}; 631};
628 632
629struct dce110_stream_enc_registers { 633struct dce110_stream_enc_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 6ae51a5dfc04..23044e6723e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -76,6 +76,7 @@
76 76
77#ifndef mmBIOS_SCRATCH_2 77#ifndef mmBIOS_SCRATCH_2
78 #define mmBIOS_SCRATCH_2 0x05CB 78 #define mmBIOS_SCRATCH_2 0x05CB
79 #define mmBIOS_SCRATCH_3 0x05CC
79 #define mmBIOS_SCRATCH_6 0x05CF 80 #define mmBIOS_SCRATCH_6 0x05CF
80#endif 81#endif
81 82
@@ -365,6 +366,7 @@ static const struct dce_abm_mask abm_mask = {
365#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 366#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
366 367
367static const struct bios_registers bios_regs = { 368static const struct bios_registers bios_regs = {
369 .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
368 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 370 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
369}; 371};
370 372
@@ -587,7 +589,7 @@ struct output_pixel_processor *dce100_opp_create(
587 return &opp->base; 589 return &opp->base;
588} 590}
589 591
590struct aux_engine *dce100_aux_engine_create( 592struct dce_aux *dce100_aux_engine_create(
591 struct dc_context *ctx, 593 struct dc_context *ctx,
592 uint32_t inst) 594 uint32_t inst)
593{ 595{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 52d50e24a995..7b23239d33fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -62,8 +62,6 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
62} 62}
63}; 63};
64 64
65static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
66
67static uint32_t align_to_chunks_number_per_line(uint32_t pixels) 65static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
68{ 66{
69 return 256 * ((pixels + 255) / 256); 67 return 256 * ((pixels + 255) / 256);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..db0ef41eb91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -614,55 +614,6 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
614 return true; 614 return true;
615} 615}
616 616
617static enum dc_status bios_parser_crtc_source_select(
618 struct pipe_ctx *pipe_ctx)
619{
620 struct dc_bios *dcb;
621 /* call VBIOS table to set CRTC source for the HW
622 * encoder block
623 * note: video bios clears all FMT setting here. */
624 struct bp_crtc_source_select crtc_source_select = {0};
625 const struct dc_sink *sink = pipe_ctx->stream->sink;
626
627 crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
628 crtc_source_select.controller_id = pipe_ctx->stream_res.tg->inst + 1;
629 /*TODO: Need to un-hardcode color depth, dp_audio and account for
630 * the case where signal and sink signal is different (translator
631 * encoder)*/
632 crtc_source_select.signal = pipe_ctx->stream->signal;
633 crtc_source_select.enable_dp_audio = false;
634 crtc_source_select.sink_signal = pipe_ctx->stream->signal;
635
636 switch (pipe_ctx->stream->timing.display_color_depth) {
637 case COLOR_DEPTH_666:
638 crtc_source_select.display_output_bit_depth = PANEL_6BIT_COLOR;
639 break;
640 case COLOR_DEPTH_888:
641 crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
642 break;
643 case COLOR_DEPTH_101010:
644 crtc_source_select.display_output_bit_depth = PANEL_10BIT_COLOR;
645 break;
646 case COLOR_DEPTH_121212:
647 crtc_source_select.display_output_bit_depth = PANEL_12BIT_COLOR;
648 break;
649 default:
650 BREAK_TO_DEBUGGER();
651 crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
652 break;
653 }
654
655 dcb = sink->ctx->dc_bios;
656
657 if (BP_RESULT_OK != dcb->funcs->crtc_source_select(
658 dcb,
659 &crtc_source_select)) {
660 return DC_ERROR_UNEXPECTED;
661 }
662
663 return DC_OK;
664}
665
666void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) 617void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
667{ 618{
668 bool is_hdmi; 619 bool is_hdmi;
@@ -692,10 +643,10 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
692void dce110_enable_stream(struct pipe_ctx *pipe_ctx) 643void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
693{ 644{
694 enum dc_lane_count lane_count = 645 enum dc_lane_count lane_count =
695 pipe_ctx->stream->sink->link->cur_link_settings.lane_count; 646 pipe_ctx->stream->link->cur_link_settings.lane_count;
696 647
697 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 648 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
698 struct dc_link *link = pipe_ctx->stream->sink->link; 649 struct dc_link *link = pipe_ctx->stream->link;
699 650
700 651
701 uint32_t active_total_with_borders; 652 uint32_t active_total_with_borders;
@@ -1000,7 +951,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
1000 951
1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 952 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
1002 953
1003 if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 954 if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 955 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 956 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1006 /* un-mute audio */ 957 /* un-mute audio */
@@ -1017,6 +968,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 968 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
1018 pipe_ctx->stream_res.stream_enc, true); 969 pipe_ctx->stream_res.stream_enc, true);
1019 if (pipe_ctx->stream_res.audio) { 970 if (pipe_ctx->stream_res.audio) {
971 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
972
1020 if (option != KEEP_ACQUIRED_RESOURCE || 973 if (option != KEEP_ACQUIRED_RESOURCE ||
1021 !dc->debug.az_endpoint_mute_only) { 974 !dc->debug.az_endpoint_mute_only) {
1022 /*only disalbe az_endpoint if power down or free*/ 975 /*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +989,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); 989 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL; 990 pipe_ctx->stream_res.audio = NULL;
1038 } 991 }
992 if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
993 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
994 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1039 995
1040 /* TODO: notify audio driver for if audio modes list changed 996 /* TODO: notify audio driver for if audio modes list changed
1041 * add audio mode list change flag */ 997 * add audio mode list change flag */
@@ -1048,7 +1004,7 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1048void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) 1004void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
1049{ 1005{
1050 struct dc_stream_state *stream = pipe_ctx->stream; 1006 struct dc_stream_state *stream = pipe_ctx->stream;
1051 struct dc_link *link = stream->sink->link; 1007 struct dc_link *link = stream->link;
1052 struct dc *dc = pipe_ctx->stream->ctx->dc; 1008 struct dc *dc = pipe_ctx->stream->ctx->dc;
1053 1009
1054 if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) 1010 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
@@ -1073,11 +1029,11 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
1073{ 1029{
1074 struct encoder_unblank_param params = { { 0 } }; 1030 struct encoder_unblank_param params = { { 0 } };
1075 struct dc_stream_state *stream = pipe_ctx->stream; 1031 struct dc_stream_state *stream = pipe_ctx->stream;
1076 struct dc_link *link = stream->sink->link; 1032 struct dc_link *link = stream->link;
1077 1033
1078 /* only 3 items below are used by unblank */ 1034 /* only 3 items below are used by unblank */
1079 params.pixel_clk_khz = 1035 params.pixel_clk_khz =
1080 pipe_ctx->stream->timing.pix_clk_khz; 1036 pipe_ctx->stream->timing.pix_clk_100hz / 10;
1081 params.link_settings.link_rate = link_settings->link_rate; 1037 params.link_settings.link_rate = link_settings->link_rate;
1082 1038
1083 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1039 if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1090,7 +1046,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
1090void dce110_blank_stream(struct pipe_ctx *pipe_ctx) 1046void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
1091{ 1047{
1092 struct dc_stream_state *stream = pipe_ctx->stream; 1048 struct dc_stream_state *stream = pipe_ctx->stream;
1093 struct dc_link *link = stream->sink->link; 1049 struct dc_link *link = stream->link;
1094 1050
1095 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1051 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1096 link->dc->hwss.edp_backlight_control(link, false); 1052 link->dc->hwss.edp_backlight_control(link, false);
@@ -1163,27 +1119,27 @@ static void build_audio_output(
1163 stream->timing.flags.INTERLACE; 1119 stream->timing.flags.INTERLACE;
1164 1120
1165 audio_output->crtc_info.refresh_rate = 1121 audio_output->crtc_info.refresh_rate =
1166 (stream->timing.pix_clk_khz*1000)/ 1122 (stream->timing.pix_clk_100hz*10000)/
1167 (stream->timing.h_total*stream->timing.v_total); 1123 (stream->timing.h_total*stream->timing.v_total);
1168 1124
1169 audio_output->crtc_info.color_depth = 1125 audio_output->crtc_info.color_depth =
1170 stream->timing.display_color_depth; 1126 stream->timing.display_color_depth;
1171 1127
1172 audio_output->crtc_info.requested_pixel_clock = 1128 audio_output->crtc_info.requested_pixel_clock =
1173 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; 1129 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
1174 1130
1175 audio_output->crtc_info.calculated_pixel_clock = 1131 audio_output->crtc_info.calculated_pixel_clock =
1176 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; 1132 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
1177 1133
1178/*for HDMI, audio ACR is with deep color ratio factor*/ 1134/*for HDMI, audio ACR is with deep color ratio factor*/
1179 if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && 1135 if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
1180 audio_output->crtc_info.requested_pixel_clock == 1136 audio_output->crtc_info.requested_pixel_clock ==
1181 stream->timing.pix_clk_khz) { 1137 (stream->timing.pix_clk_100hz / 10)) {
1182 if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) { 1138 if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
1183 audio_output->crtc_info.requested_pixel_clock = 1139 audio_output->crtc_info.requested_pixel_clock =
1184 audio_output->crtc_info.requested_pixel_clock/2; 1140 audio_output->crtc_info.requested_pixel_clock/2;
1185 audio_output->crtc_info.calculated_pixel_clock = 1141 audio_output->crtc_info.calculated_pixel_clock =
1186 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk/2; 1142 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/20;
1187 1143
1188 } 1144 }
1189 } 1145 }
@@ -1385,12 +1341,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
1385 /* */ 1341 /* */
1386 dc->hwss.enable_stream_timing(pipe_ctx, context, dc); 1342 dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
1387 1343
1388 /* TODO: move to stream encoder */
1389 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) 1344 if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
1390 if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) { 1345 pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
1391 BREAK_TO_DEBUGGER(); 1346 pipe_ctx->stream_res.stream_enc,
1392 return DC_ERROR_UNEXPECTED; 1347 pipe_ctx->stream_res.tg->inst);
1393 }
1394 1348
1395 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 1349 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
1396 pipe_ctx->stream_res.opp, 1350 pipe_ctx->stream_res.opp,
@@ -1408,7 +1362,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
1408 1362
1409 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; 1363 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
1410 1364
1411 pipe_ctx->stream->sink->link->psr_enabled = false; 1365 pipe_ctx->stream->link->psr_enabled = false;
1412 1366
1413 return DC_OK; 1367 return DC_OK;
1414} 1368}
@@ -1518,7 +1472,7 @@ static struct dc_link *get_link_for_edp(struct dc *dc)
1518 return NULL; 1472 return NULL;
1519} 1473}
1520 1474
1521static struct dc_link *get_link_for_edp_not_in_use( 1475static struct dc_link *get_link_for_edp_to_turn_off(
1522 struct dc *dc, 1476 struct dc *dc,
1523 struct dc_state *context) 1477 struct dc_state *context)
1524{ 1478{
@@ -1527,8 +1481,12 @@ static struct dc_link *get_link_for_edp_not_in_use(
1527 1481
1528 /* check if eDP panel is suppose to be set mode, if yes, no need to disable */ 1482 /* check if eDP panel is suppose to be set mode, if yes, no need to disable */
1529 for (i = 0; i < context->stream_count; i++) { 1483 for (i = 0; i < context->stream_count; i++) {
1530 if (context->streams[i]->signal == SIGNAL_TYPE_EDP) 1484 if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
1531 return NULL; 1485 if (context->streams[i]->dpms_off == true)
1486 return context->streams[i]->sink->link;
1487 else
1488 return NULL;
1489 }
1532 } 1490 }
1533 1491
1534 /* check if there is an eDP panel not in use */ 1492 /* check if there is an eDP panel not in use */
@@ -1555,7 +1513,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
1555 int i; 1513 int i;
1556 struct dc_link *edp_link_to_turnoff = NULL; 1514 struct dc_link *edp_link_to_turnoff = NULL;
1557 struct dc_link *edp_link = get_link_for_edp(dc); 1515 struct dc_link *edp_link = get_link_for_edp(dc);
1558 struct dc_bios *bios = dc->ctx->dc_bios;
1559 bool can_edp_fast_boot_optimize = false; 1516 bool can_edp_fast_boot_optimize = false;
1560 bool apply_edp_fast_boot_optimization = false; 1517 bool apply_edp_fast_boot_optimization = false;
1561 1518
@@ -1571,7 +1528,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
1571 } 1528 }
1572 1529
1573 if (can_edp_fast_boot_optimize) 1530 if (can_edp_fast_boot_optimize)
1574 edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); 1531 edp_link_to_turnoff = get_link_for_edp_to_turn_off(dc, context);
1575 1532
1576 /* if OS doesn't light up eDP and eDP link is available, we want to disable 1533 /* if OS doesn't light up eDP and eDP link is available, we want to disable
1577 * If resume from S4/S5, should optimization. 1534 * If resume from S4/S5, should optimization.
@@ -1582,20 +1539,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
1582 if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { 1539 if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
1583 context->streams[i]->apply_edp_fast_boot_optimization = true; 1540 context->streams[i]->apply_edp_fast_boot_optimization = true;
1584 apply_edp_fast_boot_optimization = true; 1541 apply_edp_fast_boot_optimization = true;
1585
1586 /* When after S4 and S5, vbios may post edp and previous dpms_off
1587 * doesn't make sense.
1588 * Update dpms_off state to align hw and sw state via check
1589 * vBios scratch register.
1590 */
1591 if (bios->funcs->is_active_display) {
1592 const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
1593
1594 if (bios->funcs->is_active_display(bios,
1595 context->streams[i]->signal,
1596 device_tag))
1597 context->streams[i]->dpms_off = false;
1598 }
1599 } 1542 }
1600 } 1543 }
1601 } 1544 }
@@ -1624,8 +1567,8 @@ static uint32_t compute_pstate_blackout_duration(
1624 pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24; 1567 pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24;
1625 1568
1626 total_dest_line_time_ns = 1000000UL * 1569 total_dest_line_time_ns = 1000000UL *
1627 stream->timing.h_total / 1570 (stream->timing.h_total * 10) /
1628 stream->timing.pix_clk_khz + 1571 stream->timing.pix_clk_100hz +
1629 pstate_blackout_duration_ns; 1572 pstate_blackout_duration_ns;
1630 1573
1631 return total_dest_line_time_ns; 1574 return total_dest_line_time_ns;
@@ -1813,18 +1756,15 @@ static bool should_enable_fbc(struct dc *dc,
1813 if (i == dc->res_pool->pipe_count) 1756 if (i == dc->res_pool->pipe_count)
1814 return false; 1757 return false;
1815 1758
1816 if (!pipe_ctx->stream->sink) 1759 if (!pipe_ctx->stream->link)
1817 return false;
1818
1819 if (!pipe_ctx->stream->sink->link)
1820 return false; 1760 return false;
1821 1761
1822 /* Only supports eDP */ 1762 /* Only supports eDP */
1823 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1763 if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP)
1824 return false; 1764 return false;
1825 1765
1826 /* PSR should not be enabled */ 1766 /* PSR should not be enabled */
1827 if (pipe_ctx->stream->sink->link->psr_enabled) 1767 if (pipe_ctx->stream->link->psr_enabled)
1828 return false; 1768 return false;
1829 1769
1830 /* Nothing to compress */ 1770 /* Nothing to compress */
@@ -2573,7 +2513,7 @@ static void dce110_apply_ctx_for_surface(
2573 pipe_ctx->plane_res.mi, 2513 pipe_ctx->plane_res.mi,
2574 pipe_ctx->stream->timing.h_total, 2514 pipe_ctx->stream->timing.h_total,
2575 pipe_ctx->stream->timing.v_total, 2515 pipe_ctx->stream->timing.v_total,
2576 pipe_ctx->stream->timing.pix_clk_khz, 2516 pipe_ctx->stream->timing.pix_clk_100hz / 10,
2577 context->stream_count); 2517 context->stream_count);
2578 2518
2579 dce110_program_front_end_for_pipe(dc, pipe_ctx); 2519 dce110_program_front_end_for_pipe(dc, pipe_ctx);
@@ -2622,13 +2562,35 @@ static void dce110_wait_for_mpcc_disconnect(
2622 /* do nothing*/ 2562 /* do nothing*/
2623} 2563}
2624 2564
2565static void program_output_csc(struct dc *dc,
2566 struct pipe_ctx *pipe_ctx,
2567 enum dc_color_space colorspace,
2568 uint16_t *matrix,
2569 int opp_id)
2570{
2571 int i;
2572 struct out_csc_color_matrix tbl_entry;
2573
2574 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2575 enum dc_color_space color_space = pipe_ctx->stream->output_color_space;
2576
2577 for (i = 0; i < 12; i++)
2578 tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
2579
2580 tbl_entry.color_space = color_space;
2581
2582 pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(
2583 pipe_ctx->plane_res.xfm, &tbl_entry);
2584 }
2585}
2586
2625void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) 2587void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2626{ 2588{
2627 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; 2589 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2628 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; 2590 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
2629 struct mem_input *mi = pipe_ctx->plane_res.mi; 2591 struct mem_input *mi = pipe_ctx->plane_res.mi;
2630 struct dc_cursor_mi_param param = { 2592 struct dc_cursor_mi_param param = {
2631 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, 2593 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2632 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, 2594 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2633 .viewport = pipe_ctx->plane_res.scl_data.viewport, 2595 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2634 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 2596 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
@@ -2672,6 +2634,7 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2672 2634
2673static const struct hw_sequencer_funcs dce110_funcs = { 2635static const struct hw_sequencer_funcs dce110_funcs = {
2674 .program_gamut_remap = program_gamut_remap, 2636 .program_gamut_remap = program_gamut_remap,
2637 .program_output_csc = program_output_csc,
2675 .init_hw = init_hw, 2638 .init_hw = init_hw,
2676 .apply_ctx_to_hw = dce110_apply_ctx_to_hw, 2639 .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2677 .apply_ctx_for_surface = dce110_apply_ctx_for_surface, 2640 .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e33d11785b1f..7549adaa1542 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -84,6 +84,7 @@
84 84
85#ifndef mmBIOS_SCRATCH_2 85#ifndef mmBIOS_SCRATCH_2
86 #define mmBIOS_SCRATCH_2 0x05CB 86 #define mmBIOS_SCRATCH_2 0x05CB
87 #define mmBIOS_SCRATCH_3 0x05CC
87 #define mmBIOS_SCRATCH_6 0x05CF 88 #define mmBIOS_SCRATCH_6 0x05CF
88#endif 89#endif
89 90
@@ -369,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = {
369}; 370};
370 371
371static const struct bios_registers bios_regs = { 372static const struct bios_registers bios_regs = {
373 .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
372 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 374 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
373}; 375};
374 376
@@ -606,7 +608,7 @@ static struct output_pixel_processor *dce110_opp_create(
606 return &opp->base; 608 return &opp->base;
607} 609}
608 610
609struct aux_engine *dce110_aux_engine_create( 611struct dce_aux *dce110_aux_engine_create(
610 struct dc_context *ctx, 612 struct dc_context *ctx,
611 uint32_t inst) 613 uint32_t inst)
612{ 614{
@@ -779,8 +781,8 @@ static void get_pixel_clock_parameters(
779 * the pixel clock normalization for hdmi up to here instead of doing it 781 * the pixel clock normalization for hdmi up to here instead of doing it
780 * in pll_adjust_pix_clk 782 * in pll_adjust_pix_clk
781 */ 783 */
782 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 784 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
783 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 785 pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
784 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 786 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
785 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; 787 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
786 /* TODO: un-hardcode*/ 788 /* TODO: un-hardcode*/
@@ -797,10 +799,10 @@ static void get_pixel_clock_parameters(
797 pixel_clk_params->color_depth = COLOR_DEPTH_888; 799 pixel_clk_params->color_depth = COLOR_DEPTH_888;
798 } 800 }
799 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { 801 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
800 pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2; 802 pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
801 } 803 }
802 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 804 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
803 pixel_clk_params->requested_pix_clk *= 2; 805 pixel_clk_params->requested_pix_clk_100hz *= 2;
804 806
805} 807}
806 808
@@ -874,7 +876,7 @@ static bool dce110_validate_bandwidth(
874 __func__, 876 __func__,
875 context->streams[0]->timing.h_addressable, 877 context->streams[0]->timing.h_addressable,
876 context->streams[0]->timing.v_addressable, 878 context->streams[0]->timing.v_addressable,
877 context->streams[0]->timing.pix_clk_khz); 879 context->streams[0]->timing.pix_clk_100hz / 10);
878 880
879 if (memcmp(&dc->current_state->bw.dce, 881 if (memcmp(&dc->current_state->bw.dce,
880 &context->bw.dce, sizeof(context->bw.dce))) { 882 &context->bw.dce, sizeof(context->bw.dce))) {
@@ -1055,7 +1057,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
1055 pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, 1057 pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi,
1056 stream->timing.h_total, 1058 stream->timing.h_total,
1057 stream->timing.v_total, 1059 stream->timing.v_total,
1058 stream->timing.pix_clk_khz, 1060 stream->timing.pix_clk_100hz / 10,
1059 context->stream_count); 1061 context->stream_count);
1060 1062
1061 color_space_to_black_color(dc, 1063 color_space_to_black_color(dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 969d4e72dc94..ea3065d63372 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -76,6 +76,7 @@
76 76
77#ifndef mmBIOS_SCRATCH_2 77#ifndef mmBIOS_SCRATCH_2
78 #define mmBIOS_SCRATCH_2 0x05CB 78 #define mmBIOS_SCRATCH_2 0x05CB
79 #define mmBIOS_SCRATCH_3 0x05CC
79 #define mmBIOS_SCRATCH_6 0x05CF 80 #define mmBIOS_SCRATCH_6 0x05CF
80#endif 81#endif
81 82
@@ -376,6 +377,7 @@ static const struct dce110_clk_src_mask cs_mask = {
376}; 377};
377 378
378static const struct bios_registers bios_regs = { 379static const struct bios_registers bios_regs = {
380 .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
379 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 381 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
380}; 382};
381 383
@@ -607,7 +609,7 @@ struct output_pixel_processor *dce112_opp_create(
607 return &opp->base; 609 return &opp->base;
608} 610}
609 611
610struct aux_engine *dce112_aux_engine_create( 612struct dce_aux *dce112_aux_engine_create(
611 struct dc_context *ctx, 613 struct dc_context *ctx,
612 uint32_t inst) 614 uint32_t inst)
613{ 615{
@@ -763,7 +765,7 @@ static struct clock_source *find_matching_pll(
763 const struct resource_pool *pool, 765 const struct resource_pool *pool,
764 const struct dc_stream_state *const stream) 766 const struct dc_stream_state *const stream)
765{ 767{
766 switch (stream->sink->link->link_enc->transmitter) { 768 switch (stream->link->link_enc->transmitter) {
767 case TRANSMITTER_UNIPHY_A: 769 case TRANSMITTER_UNIPHY_A:
768 return pool->clock_sources[DCE112_CLK_SRC_PLL0]; 770 return pool->clock_sources[DCE112_CLK_SRC_PLL0];
769 case TRANSMITTER_UNIPHY_B: 771 case TRANSMITTER_UNIPHY_B:
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index eb0f5f9a973b..1ca30928025e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,6 +244,21 @@ static void dce120_update_dchub(
244 dh_data->dchub_info_valid = false; 244 dh_data->dchub_info_valid = false;
245} 245}
246 246
247/**
248 * dce121_xgmi_enabled() - Check if xGMI is enabled
249 * @hws: DCE hardware sequencer object
250 *
251 * Return true if xGMI is enabled. False otherwise.
252 */
253bool dce121_xgmi_enabled(struct dce_hwseq *hws)
254{
255 uint32_t pf_max_region;
256
257 REG_GET(MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION, &pf_max_region);
258 /* PF_MAX_REGION == 0 means xgmi is disabled */
259 return !!pf_max_region;
260}
261
247void dce120_hw_sequencer_construct(struct dc *dc) 262void dce120_hw_sequencer_construct(struct dc *dc)
248{ 263{
249 /* All registers used by dce11.2 match those in dce11 in offset and 264 /* All registers used by dce11.2 match those in dce11 in offset and
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
index 77a6b86d7606..c51afbd0b012 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -30,6 +30,7 @@
30 30
31struct dc; 31struct dc;
32 32
33bool dce121_xgmi_enabled(struct dce_hwseq *hws);
33void dce120_hw_sequencer_construct(struct dc *dc); 34void dce120_hw_sequencer_construct(struct dc *dc);
34 35
35#endif /* __DC_HWSS_DCE112_H__ */ 36#endif /* __DC_HWSS_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f12696674eb0..312a0aebf91f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -62,6 +62,8 @@
62#include "soc15_hw_ip.h" 62#include "soc15_hw_ip.h"
63#include "vega10_ip_offset.h" 63#include "vega10_ip_offset.h"
64#include "nbio/nbio_6_1_offset.h" 64#include "nbio/nbio_6_1_offset.h"
65#include "mmhub/mmhub_9_4_0_offset.h"
66#include "mmhub/mmhub_9_4_0_sh_mask.h"
65#include "reg_helper.h" 67#include "reg_helper.h"
66 68
67#include "dce100/dce100_resource.h" 69#include "dce100/dce100_resource.h"
@@ -139,6 +141,17 @@ static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = {
139 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 141 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
140 mm ## block ## id ## _ ## reg_name 142 mm ## block ## id ## _ ## reg_name
141 143
144/* MMHUB */
145#define MMHUB_BASE_INNER(seg) \
146 MMHUB_BASE__INST0_SEG ## seg
147
148#define MMHUB_BASE(seg) \
149 MMHUB_BASE_INNER(seg)
150
151#define MMHUB_SR(reg_name)\
152 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
153 mm ## reg_name
154
142/* macros to expend register list macro defined in HW object header file 155/* macros to expend register list macro defined in HW object header file
143 * end *********************/ 156 * end *********************/
144 157
@@ -378,7 +391,7 @@ struct output_pixel_processor *dce120_opp_create(
378 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); 391 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
379 return &opp->base; 392 return &opp->base;
380} 393}
381struct aux_engine *dce120_aux_engine_create( 394struct dce_aux *dce120_aux_engine_create(
382 struct dc_context *ctx, 395 struct dc_context *ctx,
383 uint32_t inst) 396 uint32_t inst)
384{ 397{
@@ -429,6 +442,7 @@ struct dce_i2c_hw *dce120_i2c_hw_create(
429 return dce_i2c_hw; 442 return dce_i2c_hw;
430} 443}
431static const struct bios_registers bios_regs = { 444static const struct bios_registers bios_regs = {
445 .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX),
432 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) 446 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
433}; 447};
434 448
@@ -681,6 +695,19 @@ static const struct dce_hwseq_mask hwseq_mask = {
681 HWSEQ_DCE12_MASK_SH_LIST(_MASK) 695 HWSEQ_DCE12_MASK_SH_LIST(_MASK)
682}; 696};
683 697
698/* HWSEQ regs for VG20 */
699static const struct dce_hwseq_registers dce121_hwseq_reg = {
700 HWSEQ_VG20_REG_LIST()
701};
702
703static const struct dce_hwseq_shift dce121_hwseq_shift = {
704 HWSEQ_VG20_MASK_SH_LIST(__SHIFT)
705};
706
707static const struct dce_hwseq_mask dce121_hwseq_mask = {
708 HWSEQ_VG20_MASK_SH_LIST(_MASK)
709};
710
684static struct dce_hwseq *dce120_hwseq_create( 711static struct dce_hwseq *dce120_hwseq_create(
685 struct dc_context *ctx) 712 struct dc_context *ctx)
686{ 713{
@@ -695,6 +722,20 @@ static struct dce_hwseq *dce120_hwseq_create(
695 return hws; 722 return hws;
696} 723}
697 724
725static struct dce_hwseq *dce121_hwseq_create(
726 struct dc_context *ctx)
727{
728 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
729
730 if (hws) {
731 hws->ctx = ctx;
732 hws->regs = &dce121_hwseq_reg;
733 hws->shifts = &dce121_hwseq_shift;
734 hws->masks = &dce121_hwseq_mask;
735 }
736 return hws;
737}
738
698static const struct resource_create_funcs res_create_funcs = { 739static const struct resource_create_funcs res_create_funcs = {
699 .read_dce_straps = read_dce_straps, 740 .read_dce_straps = read_dce_straps,
700 .create_audio = create_audio, 741 .create_audio = create_audio,
@@ -702,6 +743,14 @@ static const struct resource_create_funcs res_create_funcs = {
702 .create_hwseq = dce120_hwseq_create, 743 .create_hwseq = dce120_hwseq_create,
703}; 744};
704 745
746static const struct resource_create_funcs dce121_res_create_funcs = {
747 .read_dce_straps = read_dce_straps,
748 .create_audio = create_audio,
749 .create_stream_encoder = dce120_stream_encoder_create,
750 .create_hwseq = dce121_hwseq_create,
751};
752
753
705#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } 754#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) }
706static const struct dce_mem_input_registers mi_regs[] = { 755static const struct dce_mem_input_registers mi_regs[] = {
707 mi_inst_regs(0), 756 mi_inst_regs(0),
@@ -911,7 +960,8 @@ static bool construct(
911 int j; 960 int j;
912 struct dc_context *ctx = dc->ctx; 961 struct dc_context *ctx = dc->ctx;
913 struct irq_service_init_data irq_init_data; 962 struct irq_service_init_data irq_init_data;
914 bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); 963 static const struct resource_create_funcs *res_funcs;
964 bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
915 uint32_t pipe_fuses; 965 uint32_t pipe_fuses;
916 966
917 ctx->dc_bios->regs = &bios_regs; 967 ctx->dc_bios->regs = &bios_regs;
@@ -975,7 +1025,11 @@ static bool construct(
975 } 1025 }
976 } 1026 }
977 1027
978 pool->base.clk_mgr = dce120_clk_mgr_create(ctx); 1028 if (is_vg20)
1029 pool->base.clk_mgr = dce121_clk_mgr_create(ctx);
1030 else
1031 pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
1032
979 if (pool->base.clk_mgr == NULL) { 1033 if (pool->base.clk_mgr == NULL) {
980 dm_error("DC: failed to create display clock!\n"); 1034 dm_error("DC: failed to create display clock!\n");
981 BREAK_TO_DEBUGGER(); 1035 BREAK_TO_DEBUGGER();
@@ -1008,14 +1062,14 @@ static bool construct(
1008 if (!pool->base.irqs) 1062 if (!pool->base.irqs)
1009 goto irqs_create_fail; 1063 goto irqs_create_fail;
1010 1064
1011 /* retrieve valid pipe fuses */ 1065 /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */
1012 if (harvest_enabled) 1066 if (is_vg20)
1013 pipe_fuses = read_pipe_fuses(ctx); 1067 pipe_fuses = read_pipe_fuses(ctx);
1014 1068
1015 /* index to valid pipe resource */ 1069 /* index to valid pipe resource */
1016 j = 0; 1070 j = 0;
1017 for (i = 0; i < pool->base.pipe_count; i++) { 1071 for (i = 0; i < pool->base.pipe_count; i++) {
1018 if (harvest_enabled) { 1072 if (is_vg20) {
1019 if ((pipe_fuses & (1 << i)) != 0) { 1073 if ((pipe_fuses & (1 << i)) != 0) {
1020 dm_error("DC: skip invalid pipe %d!\n", i); 1074 dm_error("DC: skip invalid pipe %d!\n", i);
1021 continue; 1075 continue;
@@ -1093,10 +1147,24 @@ static bool construct(
1093 pool->base.pipe_count = j; 1147 pool->base.pipe_count = j;
1094 pool->base.timing_generator_count = j; 1148 pool->base.timing_generator_count = j;
1095 1149
1096 if (!resource_construct(num_virtual_links, dc, &pool->base, 1150 if (is_vg20)
1097 &res_create_funcs)) 1151 res_funcs = &dce121_res_create_funcs;
1152 else
1153 res_funcs = &res_create_funcs;
1154
1155 if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs))
1098 goto res_create_fail; 1156 goto res_create_fail;
1099 1157
1158 /*
1159 * This is a bit of a hack. The xGMI enabled info is used to determine
1160 * if audio and display clocks need to be adjusted with the WAFL link's
1161 * SS info. This is a responsiblity of the clk_mgr. But since MMHUB is
1162 * under hwseq, and the relevant register is in MMHUB, we have to do it
1163 * here.
1164 */
1165 if (is_vg20 && dce121_xgmi_enabled(dc->hwseq))
1166 dce121_clock_patch_xgmi_ss_info(pool->base.clk_mgr);
1167
1100 /* Create hardware sequencer */ 1168 /* Create hardware sequencer */
1101 if (!dce120_hw_sequencer_create(dc)) 1169 if (!dce120_hw_sequencer_create(dc))
1102 goto controller_create_fail; 1170 goto controller_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index cdd1d6b7b9f2..2eca81b5cf2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -77,6 +77,7 @@
77 77
78#ifndef mmBIOS_SCRATCH_2 78#ifndef mmBIOS_SCRATCH_2
79 #define mmBIOS_SCRATCH_2 0x05CB 79 #define mmBIOS_SCRATCH_2 0x05CB
80 #define mmBIOS_SCRATCH_3 0x05CC
80 #define mmBIOS_SCRATCH_6 0x05CF 81 #define mmBIOS_SCRATCH_6 0x05CF
81#endif 82#endif
82 83
@@ -358,6 +359,7 @@ static const struct dce110_clk_src_mask cs_mask = {
358}; 359};
359 360
360static const struct bios_registers bios_regs = { 361static const struct bios_registers bios_regs = {
362 .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
361 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 363 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
362}; 364};
363 365
@@ -467,7 +469,7 @@ static struct output_pixel_processor *dce80_opp_create(
467 return &opp->base; 469 return &opp->base;
468} 470}
469 471
470struct aux_engine *dce80_aux_engine_create( 472struct dce_aux *dce80_aux_engine_create(
471 struct dc_context *ctx, 473 struct dc_context *ctx,
472 uint32_t inst) 474 uint32_t inst)
473{ 475{
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 3ba4712a35ab..8b5ce557ee71 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -84,17 +84,17 @@ static const struct dce110_timing_generator_offsets reg_offsets[] = {
84#define DCP_REG(reg) (reg + tg110->offsets.dcp) 84#define DCP_REG(reg) (reg + tg110->offsets.dcp)
85#define DMIF_REG(reg) (reg + tg110->offsets.dmif) 85#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
86 86
87static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz) 87static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
88{ 88{
89 uint64_t pix_dur; 89 uint64_t pix_dur;
90 uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 90 uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
91 + DCE110TG_FROM_TG(tg)->offsets.dmif; 91 + DCE110TG_FROM_TG(tg)->offsets.dmif;
92 uint32_t value = dm_read_reg(tg->ctx, addr); 92 uint32_t value = dm_read_reg(tg->ctx, addr);
93 93
94 if (pix_clk_khz == 0) 94 if (pix_clk_100hz == 0)
95 return; 95 return;
96 96
97 pix_dur = 1000000000 / pix_clk_khz; 97 pix_dur = div_u64(10000000000ull, pix_clk_100hz);
98 98
99 set_reg_field_value( 99 set_reg_field_value(
100 value, 100 value,
@@ -110,7 +110,7 @@ static void program_timing(struct timing_generator *tg,
110 bool use_vbios) 110 bool use_vbios)
111{ 111{
112 if (!use_vbios) 112 if (!use_vbios)
113 program_pix_dur(tg, timing->pix_clk_khz); 113 program_pix_dur(tg, timing->pix_clk_100hz);
114 114
115 dce110_tg_program_timing(tg, timing, use_vbios); 115 dce110_tg_program_timing(tg, timing, use_vbios);
116} 116}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index 54abedbf1b43..afe8c42211cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -161,69 +161,17 @@ static int get_active_display_cnt(
161 return display_count; 161 return display_count;
162} 162}
163 163
164static void notify_deep_sleep_dcfclk_to_smu(
165 struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
166{
167 int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
168 /*
169 * if function pointer not set up, this message is
170 * sent as part of pplib_apply_display_requirements.
171 * So just return.
172 */
173 if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
174 return;
175
176 min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
177 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
178}
179
180static void notify_hard_min_dcfclk_to_smu(
181 struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
182{
183 int min_dcf_clk_mhz; //minimum required DCF clock in mhz
184
185 /*
186 * if function pointer not set up, this message is
187 * sent as part of pplib_apply_display_requirements.
188 * So just return.
189 */
190 if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
191 return;
192
193 min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
194
195 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
196}
197
198static void notify_hard_min_fclk_to_smu(
199 struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
200{
201 int min_f_clk_mhz; //minimum required F clock in mhz
202
203 /*
204 * if function pointer not set up, this message is
205 * sent as part of pplib_apply_display_requirements.
206 * So just return.
207 */
208 if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
209 return;
210
211 min_f_clk_mhz = min_f_clk_khz / 1000;
212
213 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
214}
215
216static void dcn1_update_clocks(struct clk_mgr *clk_mgr, 164static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
217 struct dc_state *context, 165 struct dc_state *context,
218 bool safe_to_lower) 166 bool safe_to_lower)
219{ 167{
220 struct dc *dc = clk_mgr->ctx->dc; 168 struct dc *dc = clk_mgr->ctx->dc;
169 struct dc_debug_options *debug = &dc->debug;
221 struct dc_clocks *new_clocks = &context->bw.dcn.clk; 170 struct dc_clocks *new_clocks = &context->bw.dcn.clk;
222 struct pp_smu_display_requirement_rv *smu_req_cur = 171 struct pp_smu_display_requirement_rv *smu_req_cur =
223 &dc->res_pool->pp_smu_req; 172 &dc->res_pool->pp_smu_req;
224 struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; 173 struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
225 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; 174 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
226 uint32_t requested_dcf_clock_in_khz = 0;
227 bool send_request_to_increase = false; 175 bool send_request_to_increase = false;
228 bool send_request_to_lower = false; 176 bool send_request_to_lower = false;
229 int display_count; 177 int display_count;
@@ -243,9 +191,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
243 */ 191 */
244 if (pp_smu->set_display_count) 192 if (pp_smu->set_display_count)
245 pp_smu->set_display_count(&pp_smu->pp_smu, display_count); 193 pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
246 else
247 smu_req.display_count = display_count;
248 194
195 smu_req.display_count = display_count;
249 } 196 }
250 197
251 if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz 198 if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
@@ -261,12 +208,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
261 } 208 }
262 209
263 // F Clock 210 // F Clock
211 if (debug->force_fclk_khz != 0)
212 new_clocks->fclk_khz = debug->force_fclk_khz;
213
264 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { 214 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
265 clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; 215 clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
266 smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; 216 smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
267 217
268 notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
269
270 send_request_to_lower = true; 218 send_request_to_lower = true;
271 } 219 }
272 220
@@ -281,7 +229,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
281 if (should_set_clock(safe_to_lower, 229 if (should_set_clock(safe_to_lower,
282 new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { 230 new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
283 clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 231 clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
284 smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000; 232 smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000;
285 233
286 send_request_to_lower = true; 234 send_request_to_lower = true;
287 } 235 }
@@ -291,15 +239,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
291 */ 239 */
292 if (send_request_to_increase) { 240 if (send_request_to_increase) {
293 /*use dcfclk to request voltage*/ 241 /*use dcfclk to request voltage*/
294 requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); 242 if (pp_smu->set_hard_min_fclk_by_freq &&
295 243 pp_smu->set_hard_min_dcfclk_by_freq &&
296 notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); 244 pp_smu->set_min_deep_sleep_dcfclk) {
297 245
298 if (pp_smu->set_display_requirement) 246 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
299 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); 247 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
300 248 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
301 notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); 249 } else {
302 dcn1_pplib_apply_display_requirements(dc, context); 250 if (pp_smu->set_display_requirement)
251 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
252 dcn1_pplib_apply_display_requirements(dc, context);
253 }
303 } 254 }
304 255
305 /* dcn1 dppclk is tied to dispclk */ 256 /* dcn1 dppclk is tied to dispclk */
@@ -314,18 +265,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
314 265
315 if (!send_request_to_increase && send_request_to_lower) { 266 if (!send_request_to_increase && send_request_to_lower) {
316 /*use dcfclk to request voltage*/ 267 /*use dcfclk to request voltage*/
317 requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); 268 if (pp_smu->set_hard_min_fclk_by_freq &&
318 269 pp_smu->set_hard_min_dcfclk_by_freq &&
319 notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); 270 pp_smu->set_min_deep_sleep_dcfclk) {
320 271
321 if (pp_smu->set_display_requirement) 272 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
322 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); 273 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
323 274 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
324 notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); 275 } else {
325 dcn1_pplib_apply_display_requirements(dc, context); 276 if (pp_smu->set_display_requirement)
277 pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
278 dcn1_pplib_apply_display_requirements(dc, context);
279 }
326 } 280 }
327 281
328
329 *smu_req_cur = smu_req; 282 *smu_req_cur = smu_req;
330} 283}
331static const struct clk_mgr_funcs dcn1_funcs = { 284static const struct clk_mgr_funcs dcn1_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
463 if (src_y_offset >= (int)param->viewport.height) 463 if (src_y_offset >= (int)param->viewport.height)
464 cur_en = 0; /* not visible beyond bottom edge*/ 464 cur_en = 0; /* not visible beyond bottom edge*/
465 465
466 if (src_y_offset < 0) 466 if (src_y_offset + (int)height <= 0)
467 cur_en = 0; /* not visible beyond top edge*/ 467 cur_en = 0; /* not visible beyond top edge*/
468 468
469 REG_UPDATE(CURSOR0_CONTROL, 469 REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 116977eb24e2..41f0f4c912e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -51,10 +51,6 @@
51 51
52#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) 52#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
53 53
54struct dcn10_input_csc_matrix {
55 enum dc_color_space color_space;
56 uint16_t regval[12];
57};
58 54
59enum dcn10_coef_filter_type_sel { 55enum dcn10_coef_filter_type_sel {
60 SCL_COEF_LUMA_VERT_FILTER = 0, 56 SCL_COEF_LUMA_VERT_FILTER = 0,
@@ -99,7 +95,7 @@ enum gamut_remap_select {
99 GAMUT_REMAP_COMB_COEFF 95 GAMUT_REMAP_COMB_COEFF
100}; 96};
101 97
102static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = { 98static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
103 {COLOR_SPACE_SRGB, 99 {COLOR_SPACE_SRGB,
104 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, 100 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
105 {COLOR_SPACE_SRGB_LIMITED, 101 {COLOR_SPACE_SRGB_LIMITED,
@@ -454,7 +450,7 @@ void dpp1_program_input_csc(
454{ 450{
455 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 451 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
456 int i; 452 int i;
457 int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix); 453 int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
458 const uint16_t *regval = NULL; 454 const uint16_t *regval = NULL;
459 uint32_t cur_select = 0; 455 uint32_t cur_select = 0;
460 enum dcn10_input_csc_select select; 456 enum dcn10_input_csc_select select;
@@ -467,8 +463,8 @@ void dpp1_program_input_csc(
467 463
468 if (tbl_entry == NULL) { 464 if (tbl_entry == NULL) {
469 for (i = 0; i < arr_size; i++) 465 for (i = 0; i < arr_size; i++)
470 if (dcn10_input_csc_matrix[i].color_space == color_space) { 466 if (dpp_input_csc_matrix[i].color_space == color_space) {
471 regval = dcn10_input_csc_matrix[i].regval; 467 regval = dpp_input_csc_matrix[i].regval;
472 break; 468 break;
473 } 469 }
474 470
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 4a863a5dab41..c7642e748297 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -597,11 +597,13 @@ static void dpp1_dscl_set_manual_ratio_init(
597 SCL_V_INIT_FRAC, init_frac, 597 SCL_V_INIT_FRAC, init_frac,
598 SCL_V_INIT_INT, init_int); 598 SCL_V_INIT_INT, init_int);
599 599
600 init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5; 600 if (REG(SCL_VERT_FILTER_INIT_BOT)) {
601 init_int = dc_fixpt_floor(data->inits.v_bot); 601 init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
602 REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, 602 init_int = dc_fixpt_floor(data->inits.v_bot);
603 SCL_V_INIT_FRAC_BOT, init_frac, 603 REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
604 SCL_V_INIT_INT_BOT, init_int); 604 SCL_V_INIT_FRAC_BOT, init_frac,
605 SCL_V_INIT_INT_BOT, init_int);
606 }
605 607
606 init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; 608 init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
607 init_int = dc_fixpt_floor(data->inits.v_c); 609 init_int = dc_fixpt_floor(data->inits.v_c);
@@ -609,11 +611,13 @@ static void dpp1_dscl_set_manual_ratio_init(
609 SCL_V_INIT_FRAC_C, init_frac, 611 SCL_V_INIT_FRAC_C, init_frac,
610 SCL_V_INIT_INT_C, init_int); 612 SCL_V_INIT_INT_C, init_int);
611 613
612 init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5; 614 if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
613 init_int = dc_fixpt_floor(data->inits.v_c_bot); 615 init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
614 REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, 616 init_int = dc_fixpt_floor(data->inits.v_c_bot);
615 SCL_V_INIT_FRAC_BOT_C, init_frac, 617 REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
616 SCL_V_INIT_INT_BOT_C, init_int); 618 SCL_V_INIT_FRAC_BOT_C, init_frac,
619 SCL_V_INIT_INT_BOT_C, init_int);
620 }
617} 621}
618 622
619 623
@@ -688,15 +692,17 @@ void dpp1_dscl_set_scaler_manual_scale(
688 return; 692 return;
689 693
690 /* Black offsets */ 694 /* Black offsets */
691 if (ycbcr) 695 if (REG(SCL_BLACK_OFFSET)) {
692 REG_SET_2(SCL_BLACK_OFFSET, 0, 696 if (ycbcr)
693 SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, 697 REG_SET_2(SCL_BLACK_OFFSET, 0,
694 SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); 698 SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
695 else 699 SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
700 else
696 701
697 REG_SET_2(SCL_BLACK_OFFSET, 0, 702 REG_SET_2(SCL_BLACK_OFFSET, 0,
698 SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, 703 SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
699 SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); 704 SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
705 }
700 706
701 /* Manually calculate scale ratio and init values */ 707 /* Manually calculate scale ratio and init values */
702 dpp1_dscl_set_manual_ratio_init(dpp, scl_data); 708 dpp1_dscl_set_manual_ratio_init(dpp, scl_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index c7d1e678ebf5..5a4614c371bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -29,19 +29,20 @@
29#include "reg_helper.h" 29#include "reg_helper.h"
30 30
31#define CTX \ 31#define CTX \
32 hubbub->ctx 32 hubbub1->base.ctx
33#define DC_LOGGER \ 33#define DC_LOGGER \
34 hubbub->ctx->logger 34 hubbub1->base.ctx->logger
35#define REG(reg)\ 35#define REG(reg)\
36 hubbub->regs->reg 36 hubbub1->regs->reg
37 37
38#undef FN 38#undef FN
39#define FN(reg_name, field_name) \ 39#define FN(reg_name, field_name) \
40 hubbub->shifts->field_name, hubbub->masks->field_name 40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41 41
42void hubbub1_wm_read_state(struct hubbub *hubbub, 42void hubbub1_wm_read_state(struct hubbub *hubbub,
43 struct dcn_hubbub_wm *wm) 43 struct dcn_hubbub_wm *wm)
44{ 44{
45 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
45 struct dcn_hubbub_wm_set *s; 46 struct dcn_hubbub_wm_set *s;
46 47
47 memset(wm, 0, sizeof(struct dcn_hubbub_wm)); 48 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
@@ -89,12 +90,14 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
89 90
90void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub) 91void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
91{ 92{
93 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
92 REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL, 94 REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
93 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0); 95 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
94} 96}
95 97
96bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) 98bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
97{ 99{
100 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
98 uint32_t enable = 0; 101 uint32_t enable = 0;
99 102
100 REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, 103 REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
@@ -107,6 +110,8 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
107bool hubbub1_verify_allow_pstate_change_high( 110bool hubbub1_verify_allow_pstate_change_high(
108 struct hubbub *hubbub) 111 struct hubbub *hubbub)
109{ 112{
113 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
114
110 /* pstate latency is ~20us so if we wait over 40us and pstate allow 115 /* pstate latency is ~20us so if we wait over 40us and pstate allow
111 * still not asserted, we are probably stuck and going to hang 116 * still not asserted, we are probably stuck and going to hang
112 * 117 *
@@ -193,7 +198,7 @@ bool hubbub1_verify_allow_pstate_change_high(
193 * 31: SOC pstate change request 198 * 31: SOC pstate change request
194 */ 199 */
195 200
196 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate); 201 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
197 202
198 for (i = 0; i < pstate_wait_timeout_us; i++) { 203 for (i = 0; i < pstate_wait_timeout_us; i++) {
199 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); 204 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
@@ -244,6 +249,8 @@ static uint32_t convert_and_clamp(
244 249
245void hubbub1_wm_change_req_wa(struct hubbub *hubbub) 250void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
246{ 251{
252 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
253
247 REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 254 REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
248 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1); 255 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
249} 256}
@@ -254,7 +261,9 @@ void hubbub1_program_watermarks(
254 unsigned int refclk_mhz, 261 unsigned int refclk_mhz,
255 bool safe_to_lower) 262 bool safe_to_lower)
256{ 263{
257 uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0; 264 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
265
266 uint32_t force_en = hubbub1->base.ctx->dc->debug.disable_stutter ? 1 : 0;
258 /* 267 /*
259 * Need to clamp to max of the register values (i.e. no wrap) 268 * Need to clamp to max of the register values (i.e. no wrap)
260 * for dcn1, all wm registers are 21-bit wide 269 * for dcn1, all wm registers are 21-bit wide
@@ -264,8 +273,8 @@ void hubbub1_program_watermarks(
264 273
265 /* Repeat for water mark set A, B, C and D. */ 274 /* Repeat for water mark set A, B, C and D. */
266 /* clock state A */ 275 /* clock state A */
267 if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) { 276 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
268 hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns; 277 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
269 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 278 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
270 refclk_mhz, 0x1fffff); 279 refclk_mhz, 0x1fffff);
271 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 280 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
@@ -275,20 +284,22 @@ void hubbub1_program_watermarks(
275 watermarks->a.urgent_ns, prog_wm_value); 284 watermarks->a.urgent_ns, prog_wm_value);
276 } 285 }
277 286
278 if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) { 287 if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) {
279 hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; 288 if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
280 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, 289 hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
281 refclk_mhz, 0x1fffff); 290 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
282 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); 291 refclk_mhz, 0x1fffff);
283 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" 292 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
284 "HW register value = 0x%x\n", 293 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
285 watermarks->a.pte_meta_urgent_ns, prog_wm_value); 294 "HW register value = 0x%x\n",
295 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
296 }
286 } 297 }
287 298
288 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { 299 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
289 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 300 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
290 > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { 301 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
291 hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 302 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
292 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; 303 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
293 prog_wm_value = convert_and_clamp( 304 prog_wm_value = convert_and_clamp(
294 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 305 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
@@ -300,8 +311,8 @@ void hubbub1_program_watermarks(
300 } 311 }
301 312
302 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns 313 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
303 > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) { 314 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
304 hubbub->watermarks.a.cstate_pstate.cstate_exit_ns = 315 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
305 watermarks->a.cstate_pstate.cstate_exit_ns; 316 watermarks->a.cstate_pstate.cstate_exit_ns;
306 prog_wm_value = convert_and_clamp( 317 prog_wm_value = convert_and_clamp(
307 watermarks->a.cstate_pstate.cstate_exit_ns, 318 watermarks->a.cstate_pstate.cstate_exit_ns,
@@ -314,8 +325,8 @@ void hubbub1_program_watermarks(
314 } 325 }
315 326
316 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns 327 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
317 > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) { 328 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
318 hubbub->watermarks.a.cstate_pstate.pstate_change_ns = 329 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
319 watermarks->a.cstate_pstate.pstate_change_ns; 330 watermarks->a.cstate_pstate.pstate_change_ns;
320 prog_wm_value = convert_and_clamp( 331 prog_wm_value = convert_and_clamp(
321 watermarks->a.cstate_pstate.pstate_change_ns, 332 watermarks->a.cstate_pstate.pstate_change_ns,
@@ -327,8 +338,8 @@ void hubbub1_program_watermarks(
327 } 338 }
328 339
329 /* clock state B */ 340 /* clock state B */
330 if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) { 341 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
331 hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns; 342 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
332 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, 343 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
333 refclk_mhz, 0x1fffff); 344 refclk_mhz, 0x1fffff);
334 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 345 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
@@ -338,20 +349,22 @@ void hubbub1_program_watermarks(
338 watermarks->b.urgent_ns, prog_wm_value); 349 watermarks->b.urgent_ns, prog_wm_value);
339 } 350 }
340 351
341 if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) { 352 if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) {
342 hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; 353 if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
343 prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns, 354 hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
344 refclk_mhz, 0x1fffff); 355 prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
345 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); 356 refclk_mhz, 0x1fffff);
346 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" 357 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
347 "HW register value = 0x%x\n", 358 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
348 watermarks->b.pte_meta_urgent_ns, prog_wm_value); 359 "HW register value = 0x%x\n",
360 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
361 }
349 } 362 }
350 363
351 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { 364 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
352 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 365 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
353 > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { 366 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
354 hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 367 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
355 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; 368 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
356 prog_wm_value = convert_and_clamp( 369 prog_wm_value = convert_and_clamp(
357 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 370 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
@@ -363,8 +376,8 @@ void hubbub1_program_watermarks(
363 } 376 }
364 377
365 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns 378 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
366 > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) { 379 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
367 hubbub->watermarks.b.cstate_pstate.cstate_exit_ns = 380 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
368 watermarks->b.cstate_pstate.cstate_exit_ns; 381 watermarks->b.cstate_pstate.cstate_exit_ns;
369 prog_wm_value = convert_and_clamp( 382 prog_wm_value = convert_and_clamp(
370 watermarks->b.cstate_pstate.cstate_exit_ns, 383 watermarks->b.cstate_pstate.cstate_exit_ns,
@@ -377,8 +390,8 @@ void hubbub1_program_watermarks(
377 } 390 }
378 391
379 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns 392 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
380 > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) { 393 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
381 hubbub->watermarks.b.cstate_pstate.pstate_change_ns = 394 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
382 watermarks->b.cstate_pstate.pstate_change_ns; 395 watermarks->b.cstate_pstate.pstate_change_ns;
383 prog_wm_value = convert_and_clamp( 396 prog_wm_value = convert_and_clamp(
384 watermarks->b.cstate_pstate.pstate_change_ns, 397 watermarks->b.cstate_pstate.pstate_change_ns,
@@ -390,8 +403,8 @@ void hubbub1_program_watermarks(
390 } 403 }
391 404
392 /* clock state C */ 405 /* clock state C */
393 if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) { 406 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
394 hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns; 407 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
395 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, 408 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
396 refclk_mhz, 0x1fffff); 409 refclk_mhz, 0x1fffff);
397 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 410 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
@@ -401,20 +414,22 @@ void hubbub1_program_watermarks(
401 watermarks->c.urgent_ns, prog_wm_value); 414 watermarks->c.urgent_ns, prog_wm_value);
402 } 415 }
403 416
404 if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) { 417 if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) {
405 hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; 418 if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
406 prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, 419 hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
407 refclk_mhz, 0x1fffff); 420 prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
408 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); 421 refclk_mhz, 0x1fffff);
409 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" 422 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
410 "HW register value = 0x%x\n", 423 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
411 watermarks->c.pte_meta_urgent_ns, prog_wm_value); 424 "HW register value = 0x%x\n",
425 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
426 }
412 } 427 }
413 428
414 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { 429 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
415 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 430 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
416 > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { 431 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
417 hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 432 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
418 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; 433 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
419 prog_wm_value = convert_and_clamp( 434 prog_wm_value = convert_and_clamp(
420 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 435 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
@@ -426,8 +441,8 @@ void hubbub1_program_watermarks(
426 } 441 }
427 442
428 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns 443 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
429 > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) { 444 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
430 hubbub->watermarks.c.cstate_pstate.cstate_exit_ns = 445 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
431 watermarks->c.cstate_pstate.cstate_exit_ns; 446 watermarks->c.cstate_pstate.cstate_exit_ns;
432 prog_wm_value = convert_and_clamp( 447 prog_wm_value = convert_and_clamp(
433 watermarks->c.cstate_pstate.cstate_exit_ns, 448 watermarks->c.cstate_pstate.cstate_exit_ns,
@@ -440,8 +455,8 @@ void hubbub1_program_watermarks(
440 } 455 }
441 456
442 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns 457 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
443 > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) { 458 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
444 hubbub->watermarks.c.cstate_pstate.pstate_change_ns = 459 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
445 watermarks->c.cstate_pstate.pstate_change_ns; 460 watermarks->c.cstate_pstate.pstate_change_ns;
446 prog_wm_value = convert_and_clamp( 461 prog_wm_value = convert_and_clamp(
447 watermarks->c.cstate_pstate.pstate_change_ns, 462 watermarks->c.cstate_pstate.pstate_change_ns,
@@ -453,8 +468,8 @@ void hubbub1_program_watermarks(
453 } 468 }
454 469
455 /* clock state D */ 470 /* clock state D */
456 if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) { 471 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
457 hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns; 472 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
458 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, 473 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
459 refclk_mhz, 0x1fffff); 474 refclk_mhz, 0x1fffff);
460 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 475 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
@@ -464,20 +479,22 @@ void hubbub1_program_watermarks(
464 watermarks->d.urgent_ns, prog_wm_value); 479 watermarks->d.urgent_ns, prog_wm_value);
465 } 480 }
466 481
467 if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) { 482 if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) {
468 hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; 483 if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
469 prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns, 484 hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
470 refclk_mhz, 0x1fffff); 485 prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
471 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); 486 refclk_mhz, 0x1fffff);
472 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" 487 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
473 "HW register value = 0x%x\n", 488 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
474 watermarks->d.pte_meta_urgent_ns, prog_wm_value); 489 "HW register value = 0x%x\n",
490 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
491 }
475 } 492 }
476 493
477 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { 494 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
478 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 495 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
479 > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { 496 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
480 hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 497 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
481 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; 498 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
482 prog_wm_value = convert_and_clamp( 499 prog_wm_value = convert_and_clamp(
483 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 500 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
@@ -489,8 +506,8 @@ void hubbub1_program_watermarks(
489 } 506 }
490 507
491 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns 508 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
492 > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) { 509 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
493 hubbub->watermarks.d.cstate_pstate.cstate_exit_ns = 510 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
494 watermarks->d.cstate_pstate.cstate_exit_ns; 511 watermarks->d.cstate_pstate.cstate_exit_ns;
495 prog_wm_value = convert_and_clamp( 512 prog_wm_value = convert_and_clamp(
496 watermarks->d.cstate_pstate.cstate_exit_ns, 513 watermarks->d.cstate_pstate.cstate_exit_ns,
@@ -503,8 +520,8 @@ void hubbub1_program_watermarks(
503 } 520 }
504 521
505 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns 522 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
506 > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) { 523 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
507 hubbub->watermarks.d.cstate_pstate.pstate_change_ns = 524 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
508 watermarks->d.cstate_pstate.pstate_change_ns; 525 watermarks->d.cstate_pstate.pstate_change_ns;
509 prog_wm_value = convert_and_clamp( 526 prog_wm_value = convert_and_clamp(
510 watermarks->d.cstate_pstate.pstate_change_ns, 527 watermarks->d.cstate_pstate.pstate_change_ns,
@@ -535,6 +552,8 @@ void hubbub1_update_dchub(
535 struct hubbub *hubbub, 552 struct hubbub *hubbub,
536 struct dchub_init_data *dh_data) 553 struct dchub_init_data *dh_data)
537{ 554{
555 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
556
538 if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) { 557 if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
539 ASSERT(false); 558 ASSERT(false);
540 /*should not come here*/ 559 /*should not come here*/
@@ -594,6 +613,8 @@ void hubbub1_update_dchub(
594 613
595void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) 614void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
596{ 615{
616 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
617
597 uint32_t watermark_change_req; 618 uint32_t watermark_change_req;
598 619
599 REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 620 REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
@@ -610,6 +631,8 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
610 631
611void hubbub1_soft_reset(struct hubbub *hubbub, bool reset) 632void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
612{ 633{
634 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
635
613 uint32_t reset_en = reset ? 1 : 0; 636 uint32_t reset_en = reset ? 1 : 0;
614 637
615 REG_UPDATE(DCHUBBUB_SOFT_RESET, 638 REG_UPDATE(DCHUBBUB_SOFT_RESET,
@@ -752,7 +775,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
752 const struct dc_dcc_surface_param *input, 775 const struct dc_dcc_surface_param *input,
753 struct dc_surface_dcc_cap *output) 776 struct dc_surface_dcc_cap *output)
754{ 777{
755 struct dc *dc = hubbub->ctx->dc; 778 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
779 struct dc *dc = hubbub1->base.ctx->dc;
780
756 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ 781 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
757 enum dcc_control dcc_control; 782 enum dcc_control dcc_control;
758 unsigned int bpe; 783 unsigned int bpe;
@@ -764,10 +789,10 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
764 if (dc->debug.disable_dcc == DCC_DISABLE) 789 if (dc->debug.disable_dcc == DCC_DISABLE)
765 return false; 790 return false;
766 791
767 if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) 792 if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
768 return false; 793 return false;
769 794
770 if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, 795 if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
771 &segment_order_horz, &segment_order_vert)) 796 &segment_order_horz, &segment_order_vert))
772 return false; 797 return false;
773 798
@@ -837,6 +862,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
837 .dcc_support_swizzle = hubbub1_dcc_support_swizzle, 862 .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
838 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, 863 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
839 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, 864 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
865 .wm_read_state = hubbub1_wm_read_state,
840}; 866};
841 867
842void hubbub1_construct(struct hubbub *hubbub, 868void hubbub1_construct(struct hubbub *hubbub,
@@ -845,18 +871,20 @@ void hubbub1_construct(struct hubbub *hubbub,
845 const struct dcn_hubbub_shift *hubbub_shift, 871 const struct dcn_hubbub_shift *hubbub_shift,
846 const struct dcn_hubbub_mask *hubbub_mask) 872 const struct dcn_hubbub_mask *hubbub_mask)
847{ 873{
848 hubbub->ctx = ctx; 874 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
875
876 hubbub1->base.ctx = ctx;
849 877
850 hubbub->funcs = &hubbub1_funcs; 878 hubbub1->base.funcs = &hubbub1_funcs;
851 879
852 hubbub->regs = hubbub_regs; 880 hubbub1->regs = hubbub_regs;
853 hubbub->shifts = hubbub_shift; 881 hubbub1->shifts = hubbub_shift;
854 hubbub->masks = hubbub_mask; 882 hubbub1->masks = hubbub_mask;
855 883
856 hubbub->debug_test_index_pstate = 0x7; 884 hubbub1->debug_test_index_pstate = 0x7;
857#if defined(CONFIG_DRM_AMD_DC_DCN1_01) 885#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
858 if (ctx->dce_version == DCN_VERSION_1_01) 886 if (ctx->dce_version == DCN_VERSION_1_01)
859 hubbub->debug_test_index_pstate = 0xB; 887 hubbub1->debug_test_index_pstate = 0xB;
860#endif 888#endif
861} 889}
862 890
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index d0f03d152913..c681e1cc9290 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -29,6 +29,9 @@
29#include "core_types.h" 29#include "core_types.h"
30#include "dchubbub.h" 30#include "dchubbub.h"
31 31
32#define TO_DCN10_HUBBUB(hubbub)\
33 container_of(hubbub, struct dcn10_hubbub, base)
34
32#define HUBHUB_REG_LIST_DCN()\ 35#define HUBHUB_REG_LIST_DCN()\
33 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ 36 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
34 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ 37 SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
@@ -107,6 +110,12 @@ struct dcn_hubbub_registers {
107 uint32_t DCHUBBUB_SDPIF_AGP_TOP; 110 uint32_t DCHUBBUB_SDPIF_AGP_TOP;
108 uint32_t DCHUBBUB_CRC_CTRL; 111 uint32_t DCHUBBUB_CRC_CTRL;
109 uint32_t DCHUBBUB_SOFT_RESET; 112 uint32_t DCHUBBUB_SOFT_RESET;
113 uint32_t DCN_VM_FB_LOCATION_BASE;
114 uint32_t DCN_VM_FB_LOCATION_TOP;
115 uint32_t DCN_VM_FB_OFFSET;
116 uint32_t DCN_VM_AGP_BOT;
117 uint32_t DCN_VM_AGP_TOP;
118 uint32_t DCN_VM_AGP_BASE;
110}; 119};
111 120
112/* set field name */ 121/* set field name */
@@ -152,7 +161,13 @@ struct dcn_hubbub_registers {
152 type SDPIF_FB_OFFSET;\ 161 type SDPIF_FB_OFFSET;\
153 type SDPIF_AGP_BASE;\ 162 type SDPIF_AGP_BASE;\
154 type SDPIF_AGP_BOT;\ 163 type SDPIF_AGP_BOT;\
155 type SDPIF_AGP_TOP 164 type SDPIF_AGP_TOP;\
165 type FB_BASE;\
166 type FB_TOP;\
167 type FB_OFFSET;\
168 type AGP_BOT;\
169 type AGP_TOP;\
170 type AGP_BASE
156 171
157 172
158struct dcn_hubbub_shift { 173struct dcn_hubbub_shift {
@@ -165,22 +180,8 @@ struct dcn_hubbub_mask {
165 180
166struct dc; 181struct dc;
167 182
168struct dcn_hubbub_wm_set { 183struct dcn10_hubbub {
169 uint32_t wm_set; 184 struct hubbub base;
170 uint32_t data_urgent;
171 uint32_t pte_meta_urgent;
172 uint32_t sr_enter;
173 uint32_t sr_exit;
174 uint32_t dram_clk_chanage;
175};
176
177struct dcn_hubbub_wm {
178 struct dcn_hubbub_wm_set sets[4];
179};
180
181struct hubbub {
182 const struct hubbub_funcs *funcs;
183 struct dc_context *ctx;
184 const struct dcn_hubbub_registers *regs; 185 const struct dcn_hubbub_registers *regs;
185 const struct dcn_hubbub_shift *shifts; 186 const struct dcn_hubbub_shift *shifts;
186 const struct dcn_hubbub_mask *masks; 187 const struct dcn_hubbub_mask *masks;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..0ba68d41b9c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -115,7 +115,7 @@ static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
115 REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en); 115 REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en);
116} 116}
117 117
118static void hubp1_vready_workaround(struct hubp *hubp, 118void hubp1_vready_workaround(struct hubp *hubp,
119 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) 119 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
120{ 120{
121 uint32_t value = 0; 121 uint32_t value = 0;
@@ -317,7 +317,8 @@ void hubp1_program_pixel_format(
317bool hubp1_program_surface_flip_and_addr( 317bool hubp1_program_surface_flip_and_addr(
318 struct hubp *hubp, 318 struct hubp *hubp,
319 const struct dc_plane_address *address, 319 const struct dc_plane_address *address,
320 bool flip_immediate) 320 bool flip_immediate,
321 uint8_t vmid)
321{ 322{
322 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 323 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
323 324
@@ -1140,7 +1141,7 @@ void hubp1_cursor_set_position(
1140 if (src_y_offset >= (int)param->viewport.height) 1141 if (src_y_offset >= (int)param->viewport.height)
1141 cur_en = 0; /* not visible beyond bottom edge*/ 1142 cur_en = 0; /* not visible beyond bottom edge*/
1142 1143
1143 if (src_y_offset < 0) //+ (int)hubp->curs_attr.height 1144 if (src_y_offset + (int)hubp->curs_attr.height <= 0)
1144 cur_en = 0; /* not visible beyond top edge*/ 1145 cur_en = 0; /* not visible beyond top edge*/
1145 1146
1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) 1147 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 62d4232e7796..a6d6dfe00617 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -707,11 +707,6 @@ void hubp1_dcc_control(struct hubp *hubp,
707 bool enable, 707 bool enable,
708 bool independent_64b_blks); 708 bool independent_64b_blks);
709 709
710bool hubp1_program_surface_flip_and_addr(
711 struct hubp *hubp,
712 const struct dc_plane_address *address,
713 bool flip_immediate);
714
715bool hubp1_is_flip_pending(struct hubp *hubp); 710bool hubp1_is_flip_pending(struct hubp *hubp);
716 711
717void hubp1_cursor_set_attributes( 712void hubp1_cursor_set_attributes(
@@ -745,5 +740,7 @@ void hubp1_clear_underflow(struct hubp *hubp);
745 740
746enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); 741enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
747 742
743void hubp1_vready_workaround(struct hubp *hubp,
744 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
748 745
749#endif 746#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..9cde24dbdac8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -40,7 +40,6 @@
40#include "ipp.h" 40#include "ipp.h"
41#include "mpc.h" 41#include "mpc.h"
42#include "reg_helper.h" 42#include "reg_helper.h"
43#include "custom_float.h"
44#include "dcn10_hubp.h" 43#include "dcn10_hubp.h"
45#include "dcn10_hubbub.h" 44#include "dcn10_hubbub.h"
46#include "dcn10_cm_common.h" 45#include "dcn10_cm_common.h"
@@ -92,10 +91,11 @@ static void log_mpc_crc(struct dc *dc,
92void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) 91void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
93{ 92{
94 struct dc_context *dc_ctx = dc->ctx; 93 struct dc_context *dc_ctx = dc->ctx;
95 struct dcn_hubbub_wm wm = {0}; 94 struct dcn_hubbub_wm wm;
96 int i; 95 int i;
97 96
98 hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); 97 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
98 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
99 99
100 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent" 100 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
101 " sr_enter sr_exit dram_clk_change\n"); 101 " sr_enter sr_exit dram_clk_change\n");
@@ -1202,7 +1202,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
1202 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 1202 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1203 pipe_ctx->plane_res.hubp, 1203 pipe_ctx->plane_res.hubp,
1204 &plane_state->address, 1204 &plane_state->address,
1205 plane_state->flip_immediate); 1205 plane_state->flip_immediate,
1206 0);
1206 1207
1207 plane_state->status.requested_address = plane_state->address; 1208 plane_state->status.requested_address = plane_state->address;
1208 1209
@@ -2048,7 +2049,7 @@ void update_dchubp_dpp(
2048 dc->res_pool->dccg->funcs->update_dpp_dto( 2049 dc->res_pool->dccg->funcs->update_dpp_dto(
2049 dc->res_pool->dccg, 2050 dc->res_pool->dccg,
2050 dpp->inst, 2051 dpp->inst,
2051 pipe_ctx->plane_res.bw.calc.dppclk_khz); 2052 pipe_ctx->plane_res.bw.dppclk_khz);
2052 else 2053 else
2053 dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? 2054 dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2054 dc->res_pool->clk_mgr->clks.dispclk_khz / 2 : 2055 dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
@@ -2125,7 +2126,8 @@ void update_dchubp_dpp(
2125 plane_state->update_flags.bits.swizzle_change || 2126 plane_state->update_flags.bits.swizzle_change ||
2126 plane_state->update_flags.bits.dcc_change || 2127 plane_state->update_flags.bits.dcc_change ||
2127 plane_state->update_flags.bits.bpp_change || 2128 plane_state->update_flags.bits.bpp_change ||
2128 plane_state->update_flags.bits.scaling_change) { 2129 plane_state->update_flags.bits.scaling_change ||
2130 plane_state->update_flags.bits.plane_size_change) {
2129 hubp->funcs->hubp_program_surface_config( 2131 hubp->funcs->hubp_program_surface_config(
2130 hubp, 2132 hubp,
2131 plane_state->format, 2133 plane_state->format,
@@ -2252,13 +2254,11 @@ static void program_all_pipe_in_tree(
2252 2254
2253 } 2255 }
2254 2256
2255 if (pipe_ctx->plane_state != NULL) { 2257 if (pipe_ctx->plane_state != NULL)
2256 dcn10_program_pipe(dc, pipe_ctx, context); 2258 dcn10_program_pipe(dc, pipe_ctx, context);
2257 }
2258 2259
2259 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) { 2260 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2260 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); 2261 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2261 }
2262} 2262}
2263 2263
2264struct pipe_ctx *find_top_pipe_for_stream( 2264struct pipe_ctx *find_top_pipe_for_stream(
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
2355 top_pipe_to_program->plane_state->update_flags.bits.full_update) 2355 top_pipe_to_program->plane_state->update_flags.bits.full_update)
2356 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2356 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2358 2358 tg = pipe_ctx->stream_res.tg;
2359 /* Skip inactive pipes and ones already updated */ 2359 /* Skip inactive pipes and ones already updated */
2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream
2361 || !pipe_ctx->plane_state) 2361 || !pipe_ctx->plane_state
2362 || !tg->funcs->is_tg_enabled(tg))
2362 continue; 2363 continue;
2363 2364
2364 pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); 2365 tg->funcs->lock(tg);
2365 2366
2366 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( 2367 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2367 pipe_ctx->plane_res.hubp, 2368 pipe_ctx->plane_res.hubp,
2368 &pipe_ctx->dlg_regs, 2369 &pipe_ctx->dlg_regs,
2369 &pipe_ctx->ttu_regs); 2370 &pipe_ctx->ttu_regs);
2370 }
2371 2371
2372 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2372 tg->funcs->unlock(tg);
2373 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2373 }
2374
2375 if (!pipe_ctx->stream || pipe_ctx->stream == stream
2376 || !pipe_ctx->plane_state)
2377 continue;
2378
2379 dcn10_pipe_control_lock(dc, pipe_ctx, false);
2380 }
2381 2374
2382 if (num_planes == 0) 2375 if (num_planes == 0)
2383 false_optc_underflow_wa(dc, stream, tg); 2376 false_optc_underflow_wa(dc, stream, tg);
@@ -2525,7 +2518,7 @@ static void dcn10_config_stereo_parameters(
2525 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA || 2518 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2526 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { 2519 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2527 enum display_dongle_type dongle = \ 2520 enum display_dongle_type dongle = \
2528 stream->sink->link->ddc->dongle_type; 2521 stream->link->ddc->dongle_type;
2529 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || 2522 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2530 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || 2523 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2531 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 2524 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
@@ -2656,7 +2649,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2656 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2649 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2657 struct dpp *dpp = pipe_ctx->plane_res.dpp; 2650 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2658 struct dc_cursor_mi_param param = { 2651 struct dc_cursor_mi_param param = {
2659 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, 2652 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2660 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, 2653 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2661 .viewport = pipe_ctx->plane_res.scl_data.viewport, 2654 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2662 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 2655 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index cd469014baa3..98f41d250978 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -40,7 +40,6 @@
40#include "ipp.h" 40#include "ipp.h"
41#include "mpc.h" 41#include "mpc.h"
42#include "reg_helper.h" 42#include "reg_helper.h"
43#include "custom_float.h"
44#include "dcn10_hubp.h" 43#include "dcn10_hubp.h"
45#include "dcn10_hubbub.h" 44#include "dcn10_hubbub.h"
46#include "dcn10_cm_common.h" 45#include "dcn10_cm_common.h"
@@ -72,7 +71,7 @@ static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt,
72static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize) 71static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize)
73{ 72{
74 struct dc_context *dc_ctx = dc->ctx; 73 struct dc_context *dc_ctx = dc->ctx;
75 struct dcn_hubbub_wm wm = {0}; 74 struct dcn_hubbub_wm wm;
76 int i; 75 int i;
77 76
78 unsigned int chars_printed = 0; 77 unsigned int chars_printed = 0;
@@ -81,7 +80,8 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
81 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; 80 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
82 static const unsigned int frac = 1000; 81 static const unsigned int frac = 1000;
83 82
84 hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); 83 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
84 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
85 85
86 chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n"); 86 chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n");
87 remaining_buffer -= chars_printed; 87 remaining_buffer -= chars_printed;
@@ -419,20 +419,22 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
419 unsigned int remaining_buffer = bufSize; 419 unsigned int remaining_buffer = bufSize;
420 420
421 chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel," 421 chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel,"
422 "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow\n"); 422 "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow,pixelclk[khz]\n");
423 remaining_buffer -= chars_printed; 423 remaining_buffer -= chars_printed;
424 pBuf += chars_printed; 424 pBuf += chars_printed;
425 425
426 for (i = 0; i < pool->timing_generator_count; i++) { 426 for (i = 0; i < pool->timing_generator_count; i++) {
427 struct timing_generator *tg = pool->timing_generators[i]; 427 struct timing_generator *tg = pool->timing_generators[i];
428 struct dcn_otg_state s = {0}; 428 struct dcn_otg_state s = {0};
429 int pix_clk = 0;
429 430
430 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); 431 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
432 pix_clk = dc->current_state->res_ctx.pipe_ctx[i].stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
431 433
432 //only print if OTG master is enabled 434 //only print if OTG master is enabled
433 if (s.otg_enabled & 1) { 435 if (s.otg_enabled & 1) {
434 chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d," 436 chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d,"
435 "%d,%d,%d,%d,%d,%d,%d,%d" 437 "%d,%d,%d,%d,%d,%d,%d,%d,%d"
436 "\n", 438 "\n",
437 tg->inst, 439 tg->inst,
438 s.v_blank_start, 440 s.v_blank_start,
@@ -451,7 +453,8 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
451 s.h_sync_a_pol, 453 s.h_sync_a_pol,
452 s.h_total, 454 s.h_total,
453 s.v_total, 455 s.v_total,
454 s.underflow_occurred_status); 456 s.underflow_occurred_status,
457 pix_clk);
455 458
456 remaining_buffer -= chars_printed; 459 remaining_buffer -= chars_printed;
457 pBuf += chars_printed; 460 pBuf += chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 477ab9222216..771449f8984f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -440,7 +440,7 @@ static uint8_t get_frontend_source(
440 } 440 }
441} 441}
442 442
443void configure_encoder( 443void enc1_configure_encoder(
444 struct dcn10_link_encoder *enc10, 444 struct dcn10_link_encoder *enc10,
445 const struct dc_link_settings *link_settings) 445 const struct dc_link_settings *link_settings)
446{ 446{
@@ -543,12 +543,12 @@ bool dcn10_link_encoder_validate_dvi_output(
543 if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || 543 if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
544 connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && 544 connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
545 signal != SIGNAL_TYPE_HDMI_TYPE_A && 545 signal != SIGNAL_TYPE_HDMI_TYPE_A &&
546 crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK) 546 crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10))
547 return false; 547 return false;
548 if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) 548 if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10))
549 return false; 549 return false;
550 550
551 if (crtc_timing->pix_clk_khz > max_pixel_clock) 551 if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10))
552 return false; 552 return false;
553 553
554 /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ 554 /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
@@ -571,7 +571,7 @@ bool dcn10_link_encoder_validate_dvi_output(
571static bool dcn10_link_encoder_validate_hdmi_output( 571static bool dcn10_link_encoder_validate_hdmi_output(
572 const struct dcn10_link_encoder *enc10, 572 const struct dcn10_link_encoder *enc10,
573 const struct dc_crtc_timing *crtc_timing, 573 const struct dc_crtc_timing *crtc_timing,
574 int adjusted_pix_clk_khz) 574 int adjusted_pix_clk_100hz)
575{ 575{
576 enum dc_color_depth max_deep_color = 576 enum dc_color_depth max_deep_color =
577 enc10->base.features.max_hdmi_deep_color; 577 enc10->base.features.max_hdmi_deep_color;
@@ -581,11 +581,11 @@ static bool dcn10_link_encoder_validate_hdmi_output(
581 581
582 if (crtc_timing->display_color_depth < COLOR_DEPTH_888) 582 if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
583 return false; 583 return false;
584 if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) 584 if (adjusted_pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10))
585 return false; 585 return false;
586 586
587 if ((adjusted_pix_clk_khz == 0) || 587 if ((adjusted_pix_clk_100hz == 0) ||
588 (adjusted_pix_clk_khz > enc10->base.features.max_hdmi_pixel_clock)) 588 (adjusted_pix_clk_100hz > (enc10->base.features.max_hdmi_pixel_clock * 10)))
589 return false; 589 return false;
590 590
591 /* DCE11 HW does not support 420 */ 591 /* DCE11 HW does not support 420 */
@@ -594,7 +594,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
594 return false; 594 return false;
595 595
596 if (!enc10->base.features.flags.bits.HDMI_6GB_EN && 596 if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
597 adjusted_pix_clk_khz >= 300000) 597 adjusted_pix_clk_100hz >= 3000000)
598 return false; 598 return false;
599 if (enc10->base.ctx->dc->debug.hdmi20_disable && 599 if (enc10->base.ctx->dc->debug.hdmi20_disable &&
600 crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 600 crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
@@ -738,7 +738,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
738 case SIGNAL_TYPE_DVI_DUAL_LINK: 738 case SIGNAL_TYPE_DVI_DUAL_LINK:
739 is_valid = dcn10_link_encoder_validate_dvi_output( 739 is_valid = dcn10_link_encoder_validate_dvi_output(
740 enc10, 740 enc10,
741 stream->sink->link->connector_signal, 741 stream->link->connector_signal,
742 stream->signal, 742 stream->signal,
743 &stream->timing); 743 &stream->timing);
744 break; 744 break;
@@ -746,7 +746,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
746 is_valid = dcn10_link_encoder_validate_hdmi_output( 746 is_valid = dcn10_link_encoder_validate_hdmi_output(
747 enc10, 747 enc10,
748 &stream->timing, 748 &stream->timing,
749 stream->phy_pix_clk); 749 stream->phy_pix_clk * 10);
750 break; 750 break;
751 case SIGNAL_TYPE_DISPLAY_PORT: 751 case SIGNAL_TYPE_DISPLAY_PORT:
752 case SIGNAL_TYPE_DISPLAY_PORT_MST: 752 case SIGNAL_TYPE_DISPLAY_PORT_MST:
@@ -910,7 +910,7 @@ void dcn10_link_encoder_enable_dp_output(
910 * but it's not passed to asic_control. 910 * but it's not passed to asic_control.
911 * We need to set number of lanes manually. 911 * We need to set number of lanes manually.
912 */ 912 */
913 configure_encoder(enc10, link_settings); 913 enc1_configure_encoder(enc10, link_settings);
914 914
915 cntl.action = TRANSMITTER_CONTROL_ENABLE; 915 cntl.action = TRANSMITTER_CONTROL_ENABLE;
916 cntl.engine_id = enc->preferred_engine; 916 cntl.engine_id = enc->preferred_engine;
@@ -949,7 +949,7 @@ void dcn10_link_encoder_enable_dp_mst_output(
949 * but it's not passed to asic_control. 949 * but it's not passed to asic_control.
950 * We need to set number of lanes manually. 950 * We need to set number of lanes manually.
951 */ 951 */
952 configure_encoder(enc10, link_settings); 952 enc1_configure_encoder(enc10, link_settings);
953 953
954 cntl.action = TRANSMITTER_CONTROL_ENABLE; 954 cntl.action = TRANSMITTER_CONTROL_ENABLE;
955 cntl.engine_id = ENGINE_ID_UNKNOWN; 955 cntl.engine_id = ENGINE_ID_UNKNOWN;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 49ead12b2532..670b46e887ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -271,7 +271,7 @@ void dcn10_link_encoder_setup(
271 struct link_encoder *enc, 271 struct link_encoder *enc,
272 enum signal_type signal); 272 enum signal_type signal);
273 273
274void configure_encoder( 274void enc1_configure_encoder(
275 struct dcn10_link_encoder *enc10, 275 struct dcn10_link_encoder *enc10,
276 const struct dc_link_settings *link_settings); 276 const struct dc_link_settings *link_settings);
277 277
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 7c138615f17d..eb019d404928 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -102,14 +102,6 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
102 patched_crtc_timing = *dc_crtc_timing; 102 patched_crtc_timing = *dc_crtc_timing;
103 optc1_apply_front_porch_workaround(optc, &patched_crtc_timing); 103 optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
104 104
105 vesa_sync_start = patched_crtc_timing.h_addressable +
106 patched_crtc_timing.h_border_right +
107 patched_crtc_timing.h_front_porch;
108
109 asic_blank_end = patched_crtc_timing.h_total -
110 vesa_sync_start -
111 patched_crtc_timing.h_border_left;
112
113 vesa_sync_start = patched_crtc_timing.v_addressable + 105 vesa_sync_start = patched_crtc_timing.v_addressable +
114 patched_crtc_timing.v_border_bottom + 106 patched_crtc_timing.v_border_bottom +
115 patched_crtc_timing.v_front_porch; 107 patched_crtc_timing.v_front_porch;
@@ -119,10 +111,8 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
119 patched_crtc_timing.v_border_top); 111 patched_crtc_timing.v_border_top);
120 112
121 vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; 113 vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
122 if (vertical_line_start < 0) { 114 if (vertical_line_start < 0)
123 ASSERT(0);
124 vertical_line_start = 0; 115 vertical_line_start = 0;
125 }
126 116
127 return vertical_line_start; 117 return vertical_line_start;
128} 118}
@@ -136,14 +126,14 @@ void optc1_program_vline_interrupt(
136 struct optc *optc1 = DCN10TG_FROM_TG(optc); 126 struct optc *optc1 = DCN10TG_FROM_TG(optc);
137 127
138 unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000); 128 unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
139 unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100); 129 unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
140 uint32_t req_delta_lines = (uint32_t) div64_u64( 130 uint32_t req_delta_lines = (uint32_t) div64_u64(
141 (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1), 131 (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
142 dc_crtc_timing->h_total); 132 dc_crtc_timing->h_total);
143 133
144 uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing); 134 uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
145 uint32_t start_line = 0; 135 uint32_t start_line = 0;
146 uint32_t endLine = 0; 136 uint32_t end_line = 0;
147 137
148 if (req_delta_lines != 0) 138 if (req_delta_lines != 0)
149 req_delta_lines--; 139 req_delta_lines--;
@@ -153,14 +143,17 @@ void optc1_program_vline_interrupt(
153 else 143 else
154 start_line = vsync_line - req_delta_lines; 144 start_line = vsync_line - req_delta_lines;
155 145
156 endLine = start_line + 2; 146 end_line = start_line + 2;
157 147
158 if (endLine >= dc_crtc_timing->v_total) 148 if (start_line >= dc_crtc_timing->v_total)
159 endLine = 2; 149 start_line = start_line % dc_crtc_timing->v_total;
150
151 if (end_line >= dc_crtc_timing->v_total)
152 end_line = end_line % dc_crtc_timing->v_total;
160 153
161 REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, 154 REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
162 OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, 155 OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
163 OTG_VERTICAL_INTERRUPT0_LINE_END, endLine); 156 OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
164} 157}
165 158
166/** 159/**
@@ -299,16 +292,17 @@ void optc1_program_timing(
299 } 292 }
300 293
301 /* Interlace */ 294 /* Interlace */
302 if (patched_crtc_timing.flags.INTERLACE == 1) { 295 if (REG(OTG_INTERLACE_CONTROL)) {
303 REG_UPDATE(OTG_INTERLACE_CONTROL, 296 if (patched_crtc_timing.flags.INTERLACE == 1) {
304 OTG_INTERLACE_ENABLE, 1); 297 REG_UPDATE(OTG_INTERLACE_CONTROL,
305 v_init = v_init / 2; 298 OTG_INTERLACE_ENABLE, 1);
306 if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end) 299 v_init = v_init / 2;
307 v_fp2 = v_fp2 / 2; 300 if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
308 } else 301 v_fp2 = v_fp2 / 2;
309 REG_UPDATE(OTG_INTERLACE_CONTROL, 302 } else
310 OTG_INTERLACE_ENABLE, 0); 303 REG_UPDATE(OTG_INTERLACE_CONTROL,
311 304 OTG_INTERLACE_ENABLE, 0);
305 }
312 306
313 /* VTG enable set to 0 first VInit */ 307 /* VTG enable set to 0 first VInit */
314 REG_UPDATE(CONTROL, 308 REG_UPDATE(CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 5d4772dec0ba..09d74070a49b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -70,7 +70,7 @@
70const struct _vcs_dpi_ip_params_st dcn1_0_ip = { 70const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
71 .rob_buffer_size_kbytes = 64, 71 .rob_buffer_size_kbytes = 64,
72 .det_buffer_size_kbytes = 164, 72 .det_buffer_size_kbytes = 164,
73 .dpte_buffer_size_in_pte_reqs = 42, 73 .dpte_buffer_size_in_pte_reqs_luma = 42,
74 .dpp_output_buffer_pixels = 2560, 74 .dpp_output_buffer_pixels = 2560,
75 .opp_output_buffer_lines = 1, 75 .opp_output_buffer_lines = 1,
76 .pixel_chunk_size_kbytes = 8, 76 .pixel_chunk_size_kbytes = 8,
@@ -436,7 +436,6 @@ static const struct dcn_optc_mask tg_mask = {
436}; 436};
437 437
438static const struct bios_registers bios_regs = { 438static const struct bios_registers bios_regs = {
439 NBIO_SR(BIOS_SCRATCH_0),
440 NBIO_SR(BIOS_SCRATCH_3), 439 NBIO_SR(BIOS_SCRATCH_3),
441 NBIO_SR(BIOS_SCRATCH_6) 440 NBIO_SR(BIOS_SCRATCH_6)
442}; 441};
@@ -609,7 +608,7 @@ static struct output_pixel_processor *dcn10_opp_create(
609 return &opp->base; 608 return &opp->base;
610} 609}
611 610
612struct aux_engine *dcn10_aux_engine_create( 611struct dce_aux *dcn10_aux_engine_create(
613 struct dc_context *ctx, 612 struct dc_context *ctx,
614 uint32_t inst) 613 uint32_t inst)
615{ 614{
@@ -678,18 +677,18 @@ static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
678 677
679static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) 678static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx)
680{ 679{
681 struct hubbub *hubbub = kzalloc(sizeof(struct hubbub), 680 struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub),
682 GFP_KERNEL); 681 GFP_KERNEL);
683 682
684 if (!hubbub) 683 if (!dcn10_hubbub)
685 return NULL; 684 return NULL;
686 685
687 hubbub1_construct(hubbub, ctx, 686 hubbub1_construct(&dcn10_hubbub->base, ctx,
688 &hubbub_reg, 687 &hubbub_reg,
689 &hubbub_shift, 688 &hubbub_shift,
690 &hubbub_mask); 689 &hubbub_mask);
691 690
692 return hubbub; 691 return &dcn10_hubbub->base;
693} 692}
694 693
695static struct timing_generator *dcn10_timing_generator_create( 694static struct timing_generator *dcn10_timing_generator_create(
@@ -911,7 +910,7 @@ static void destruct(struct dcn10_resource_pool *pool)
911 910
912 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 911 for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
913 if (pool->base.engines[i] != NULL) 912 if (pool->base.engines[i] != NULL)
914 pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]); 913 dce110_engine_destroy(&pool->base.engines[i]);
915 if (pool->base.hw_i2cs[i] != NULL) { 914 if (pool->base.hw_i2cs[i] != NULL) {
916 kfree(pool->base.hw_i2cs[i]); 915 kfree(pool->base.hw_i2cs[i]);
917 pool->base.hw_i2cs[i] = NULL; 916 pool->base.hw_i2cs[i] = NULL;
@@ -974,8 +973,8 @@ static void get_pixel_clock_parameters(
974 struct pixel_clk_params *pixel_clk_params) 973 struct pixel_clk_params *pixel_clk_params)
975{ 974{
976 const struct dc_stream_state *stream = pipe_ctx->stream; 975 const struct dc_stream_state *stream = pipe_ctx->stream;
977 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 976 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
978 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 977 pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
979 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 978 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
980 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; 979 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
981 /* TODO: un-hardcode*/ 980 /* TODO: un-hardcode*/
@@ -991,9 +990,9 @@ static void get_pixel_clock_parameters(
991 pixel_clk_params->color_depth = COLOR_DEPTH_888; 990 pixel_clk_params->color_depth = COLOR_DEPTH_888;
992 991
993 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 992 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
994 pixel_clk_params->requested_pix_clk /= 2; 993 pixel_clk_params->requested_pix_clk_100hz /= 2;
995 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 994 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
996 pixel_clk_params->requested_pix_clk *= 2; 995 pixel_clk_params->requested_pix_clk_100hz *= 2;
997 996
998} 997}
999 998
@@ -1131,6 +1130,56 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
1131 return DC_OK; 1130 return DC_OK;
1132} 1131}
1133 1132
1133static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context)
1134{
1135 int i, j;
1136 bool video_down_scaled = false;
1137 bool video_large = false;
1138 bool desktop_large = false;
1139 bool dcc_disabled = false;
1140
1141 for (i = 0; i < context->stream_count; i++) {
1142 if (context->stream_status[i].plane_count == 0)
1143 continue;
1144
1145 if (context->stream_status[i].plane_count > 2)
1146 return false;
1147
1148 for (j = 0; j < context->stream_status[i].plane_count; j++) {
1149 struct dc_plane_state *plane =
1150 context->stream_status[i].plane_states[j];
1151
1152
1153 if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1154
1155 if (plane->src_rect.width > plane->dst_rect.width ||
1156 plane->src_rect.height > plane->dst_rect.height)
1157 video_down_scaled = true;
1158
1159 if (plane->src_rect.width >= 3840)
1160 video_large = true;
1161
1162 } else {
1163 if (plane->src_rect.width >= 3840)
1164 desktop_large = true;
1165 if (!plane->dcc.enable)
1166 dcc_disabled = true;
1167 }
1168 }
1169 }
1170
1171 /*
1172 * Workaround: On DCN10 there is UMC issue that causes underflow when
1173 * playing 4k video on 4k desktop with video downscaled and single channel
1174 * memory
1175 */
1176 if (video_large && desktop_large && video_down_scaled && dcc_disabled &&
1177 dc->dcn_soc->number_of_channels == 1)
1178 return DC_FAIL_SURFACE_VALIDATE;
1179
1180 return DC_OK;
1181}
1182
1134static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) 1183static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
1135{ 1184{
1136 enum dc_status result = DC_OK; 1185 enum dc_status result = DC_OK;
@@ -1159,6 +1208,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
1159 .validate_bandwidth = dcn_validate_bandwidth, 1208 .validate_bandwidth = dcn_validate_bandwidth,
1160 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, 1209 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1161 .validate_plane = dcn10_validate_plane, 1210 .validate_plane = dcn10_validate_plane,
1211 .validate_global = dcn10_validate_global,
1162 .add_stream_to_ctx = dcn10_add_stream_to_ctx, 1212 .add_stream_to_ctx = dcn10_add_stream_to_ctx,
1163 .get_default_swizzle_mode = dcn10_get_default_swizzle_mode 1213 .get_default_swizzle_mode = dcn10_get_default_swizzle_mode
1164}; 1214};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b8b5525a389a..b08254121251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -261,17 +261,29 @@ void enc1_stream_encoder_dp_set_stream_attribute(
261 uint8_t dp_component_depth = 0; 261 uint8_t dp_component_depth = 0;
262 262
263 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 263 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
264 struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
265
266 if (hw_crtc_timing.flags.INTERLACE) {
267 /*the input timing is in VESA spec format with Interlace flag =1*/
268 hw_crtc_timing.v_total /= 2;
269 hw_crtc_timing.v_border_top /= 2;
270 hw_crtc_timing.v_addressable /= 2;
271 hw_crtc_timing.v_border_bottom /= 2;
272 hw_crtc_timing.v_front_porch /= 2;
273 hw_crtc_timing.v_sync_width /= 2;
274 }
275
264 276
265 /* set pixel encoding */ 277 /* set pixel encoding */
266 switch (crtc_timing->pixel_encoding) { 278 switch (hw_crtc_timing.pixel_encoding) {
267 case PIXEL_ENCODING_YCBCR422: 279 case PIXEL_ENCODING_YCBCR422:
268 dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422; 280 dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
269 break; 281 break;
270 case PIXEL_ENCODING_YCBCR444: 282 case PIXEL_ENCODING_YCBCR444:
271 dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444; 283 dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
272 284
273 if (crtc_timing->flags.Y_ONLY) 285 if (hw_crtc_timing.flags.Y_ONLY)
274 if (crtc_timing->display_color_depth != COLOR_DEPTH_666) 286 if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666)
275 /* HW testing only, no use case yet. 287 /* HW testing only, no use case yet.
276 * Color depth of Y-only could be 288 * Color depth of Y-only could be
277 * 8, 10, 12, 16 bits 289 * 8, 10, 12, 16 bits
@@ -299,7 +311,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
299 * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, 311 * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
300 * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). 312 * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
301 */ 313 */
302 if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || 314 if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
303 (output_color_space == COLOR_SPACE_2020_YCBCR) || 315 (output_color_space == COLOR_SPACE_2020_YCBCR) ||
304 (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || 316 (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
305 (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) 317 (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
@@ -308,7 +320,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
308 misc1 = misc1 & ~0x40; 320 misc1 = misc1 & ~0x40;
309 321
310 /* set color depth */ 322 /* set color depth */
311 switch (crtc_timing->display_color_depth) { 323 switch (hw_crtc_timing.display_color_depth) {
312 case COLOR_DEPTH_666: 324 case COLOR_DEPTH_666:
313 dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC; 325 dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
314 break; 326 break;
@@ -336,7 +348,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
336 348
337 /* set dynamic range and YCbCr range */ 349 /* set dynamic range and YCbCr range */
338 350
339 switch (crtc_timing->display_color_depth) { 351 switch (hw_crtc_timing.display_color_depth) {
340 case COLOR_DEPTH_666: 352 case COLOR_DEPTH_666:
341 colorimetry_bpc = 0; 353 colorimetry_bpc = 0;
342 break; 354 break;
@@ -372,9 +384,9 @@ void enc1_stream_encoder_dp_set_stream_attribute(
372 misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ 384 misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
373 misc1 = misc1 & ~0x80; /* bit7 = 0*/ 385 misc1 = misc1 & ~0x80; /* bit7 = 0*/
374 dynamic_range_ycbcr = 0; /*bt601*/ 386 dynamic_range_ycbcr = 0; /*bt601*/
375 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 387 if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
376 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ 388 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
377 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) 389 else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
378 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ 390 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
379 break; 391 break;
380 case COLOR_SPACE_YCBCR709: 392 case COLOR_SPACE_YCBCR709:
@@ -382,9 +394,9 @@ void enc1_stream_encoder_dp_set_stream_attribute(
382 misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ 394 misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
383 misc1 = misc1 & ~0x80; /* bit7 = 0*/ 395 misc1 = misc1 & ~0x80; /* bit7 = 0*/
384 dynamic_range_ycbcr = 1; /*bt709*/ 396 dynamic_range_ycbcr = 1; /*bt709*/
385 if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 397 if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
386 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ 398 misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
387 else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) 399 else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
388 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ 400 misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
389 break; 401 break;
390 case COLOR_SPACE_2020_RGB_LIMITEDRANGE: 402 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
@@ -414,26 +426,26 @@ void enc1_stream_encoder_dp_set_stream_attribute(
414 * dc_crtc_timing is vesa dmt struct. data from edid 426 * dc_crtc_timing is vesa dmt struct. data from edid
415 */ 427 */
416 REG_SET_2(DP_MSA_TIMING_PARAM1, 0, 428 REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
417 DP_MSA_HTOTAL, crtc_timing->h_total, 429 DP_MSA_HTOTAL, hw_crtc_timing.h_total,
418 DP_MSA_VTOTAL, crtc_timing->v_total); 430 DP_MSA_VTOTAL, hw_crtc_timing.v_total);
419 431
420 /* calculate from vesa timing parameters 432 /* calculate from vesa timing parameters
421 * h_active_start related to leading edge of sync 433 * h_active_start related to leading edge of sync
422 */ 434 */
423 435
424 h_blank = crtc_timing->h_total - crtc_timing->h_border_left - 436 h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
425 crtc_timing->h_addressable - crtc_timing->h_border_right; 437 hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
426 438
427 h_back_porch = h_blank - crtc_timing->h_front_porch - 439 h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
428 crtc_timing->h_sync_width; 440 hw_crtc_timing.h_sync_width;
429 441
430 /* start at beginning of left border */ 442 /* start at beginning of left border */
431 h_active_start = crtc_timing->h_sync_width + h_back_porch; 443 h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
432 444
433 445
434 v_active_start = crtc_timing->v_total - crtc_timing->v_border_top - 446 v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
435 crtc_timing->v_addressable - crtc_timing->v_border_bottom - 447 hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
436 crtc_timing->v_front_porch; 448 hw_crtc_timing.v_front_porch;
437 449
438 450
439 /* start at beginning of left border */ 451 /* start at beginning of left border */
@@ -443,20 +455,20 @@ void enc1_stream_encoder_dp_set_stream_attribute(
443 455
444 REG_SET_4(DP_MSA_TIMING_PARAM3, 0, 456 REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
445 DP_MSA_HSYNCWIDTH, 457 DP_MSA_HSYNCWIDTH,
446 crtc_timing->h_sync_width, 458 hw_crtc_timing.h_sync_width,
447 DP_MSA_HSYNCPOLARITY, 459 DP_MSA_HSYNCPOLARITY,
448 !crtc_timing->flags.HSYNC_POSITIVE_POLARITY, 460 !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY,
449 DP_MSA_VSYNCWIDTH, 461 DP_MSA_VSYNCWIDTH,
450 crtc_timing->v_sync_width, 462 hw_crtc_timing.v_sync_width,
451 DP_MSA_VSYNCPOLARITY, 463 DP_MSA_VSYNCPOLARITY,
452 !crtc_timing->flags.VSYNC_POSITIVE_POLARITY); 464 !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY);
453 465
454 /* HWDITH include border or overscan */ 466 /* HWDITH include border or overscan */
455 REG_SET_2(DP_MSA_TIMING_PARAM4, 0, 467 REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
456 DP_MSA_HWIDTH, crtc_timing->h_border_left + 468 DP_MSA_HWIDTH, hw_crtc_timing.h_border_left +
457 crtc_timing->h_addressable + crtc_timing->h_border_right, 469 hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right,
458 DP_MSA_VHEIGHT, crtc_timing->v_border_top + 470 DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top +
459 crtc_timing->v_addressable + crtc_timing->v_border_bottom); 471 hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
460} 472}
461 473
462static void enc1_stream_encoder_set_stream_attribute_helper( 474static void enc1_stream_encoder_set_stream_attribute_helper(
@@ -594,7 +606,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
594 cntl.signal = is_dual_link ? 606 cntl.signal = is_dual_link ?
595 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; 607 SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
596 cntl.enable_dp_audio = false; 608 cntl.enable_dp_audio = false;
597 cntl.pixel_clock = crtc_timing->pix_clk_khz; 609 cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
598 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; 610 cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
599 611
600 if (enc1->base.bp->funcs->encoder_control( 612 if (enc1->base.bp->funcs->encoder_control(
@@ -1413,6 +1425,14 @@ void enc1_setup_stereo_sync(
1413 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); 1425 REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
1414} 1426}
1415 1427
1428void enc1_dig_connect_to_otg(
1429 struct stream_encoder *enc,
1430 int tg_inst)
1431{
1432 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
1433
1434 REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
1435}
1416 1436
1417static const struct stream_encoder_funcs dcn10_str_enc_funcs = { 1437static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
1418 .dp_set_stream_attribute = 1438 .dp_set_stream_attribute =
@@ -1445,6 +1465,7 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
1445 .hdmi_audio_disable = enc1_se_hdmi_audio_disable, 1465 .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
1446 .setup_stereo_sync = enc1_setup_stereo_sync, 1466 .setup_stereo_sync = enc1_setup_stereo_sync,
1447 .set_avmute = enc1_stream_encoder_set_avmute, 1467 .set_avmute = enc1_stream_encoder_set_avmute,
1468 .dig_connect_to_otg = enc1_dig_connect_to_otg,
1448}; 1469};
1449 1470
1450void dcn10_stream_encoder_construct( 1471void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 67f3e4dd95c1..b7c800e10a32 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -274,7 +274,8 @@ struct dcn10_stream_enc_registers {
274 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\ 274 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
275 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ 275 SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
276 SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ 276 SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
277 SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh) 277 SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\
278 SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
278 279
279#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ 280#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
280 SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) 281 SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
@@ -426,7 +427,8 @@ struct dcn10_stream_enc_registers {
426 type DP_MSA_VHEIGHT;\ 427 type DP_MSA_VHEIGHT;\
427 type HDMI_DB_DISABLE;\ 428 type HDMI_DB_DISABLE;\
428 type DP_VID_N_MUL;\ 429 type DP_VID_N_MUL;\
429 type DP_VID_M_DOUBLE_VALUE_EN 430 type DP_VID_M_DOUBLE_VALUE_EN;\
431 type DIG_SOURCE_SELECT
430 432
431struct dcn10_stream_encoder_shift { 433struct dcn10_stream_encoder_shift {
432 SE_REG_FIELD_LIST_DCN1_0(uint8_t); 434 SE_REG_FIELD_LIST_DCN1_0(uint8_t);
@@ -523,4 +525,8 @@ void enc1_se_hdmi_audio_setup(
523void enc1_se_hdmi_audio_disable( 525void enc1_se_hdmi_audio_disable(
524 struct stream_encoder *enc); 526 struct stream_encoder *enc);
525 527
528void enc1_dig_connect_to_otg(
529 struct stream_encoder *enc,
530 int tg_inst);
531
526#endif /* __DC_STREAM_ENCODER_DCN10_H__ */ 532#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 0029a39efb1c..14bed5b1fa97 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -38,7 +38,8 @@ enum pp_smu_ver {
38 * of interface sharing between families of ASIcs. 38 * of interface sharing between families of ASIcs.
39 */ 39 */
40 PP_SMU_UNSUPPORTED, 40 PP_SMU_UNSUPPORTED,
41 PP_SMU_VER_RV 41 PP_SMU_VER_RV,
42 PP_SMU_VER_MAX
42}; 43};
43 44
44struct pp_smu { 45struct pp_smu {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 1af8c777b3ac..77200711abbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -82,9 +82,17 @@ enum dm_pp_clock_type {
82#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \ 82#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \
83 (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \ 83 (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \
84 (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \ 84 (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \
85 (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid" 85 (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : \
86 86 (clk_type) == DM_PP_CLOCK_TYPE_DCFCLK ? "DCF" : \
87#define DM_PP_MAX_CLOCK_LEVELS 8 87 (clk_type) == DM_PP_CLOCK_TYPE_DCEFCLK ? "DCEF" : \
88 (clk_type) == DM_PP_CLOCK_TYPE_SOCCLK ? "SoC" : \
89 (clk_type) == DM_PP_CLOCK_TYPE_PIXELCLK ? "Pixel" : \
90 (clk_type) == DM_PP_CLOCK_TYPE_DISPLAYPHYCLK ? "Display PHY" : \
91 (clk_type) == DM_PP_CLOCK_TYPE_DPPCLK ? "DPP" : \
92 (clk_type) == DM_PP_CLOCK_TYPE_FCLK ? "F" : \
93 "Invalid"
94
95#define DM_PP_MAX_CLOCK_LEVELS 16
88 96
89struct dm_pp_clock_levels { 97struct dm_pp_clock_levels {
90 uint32_t num_levels; 98 uint32_t num_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index d97ca6528f9d..33c7d7588712 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
30 cc_stack_align := -mstack-alignment=16 30 cc_stack_align := -mstack-alignment=16
31endif 31endif
32 32
33dml_ccflags := -mhard-float -msse $(cc_stack_align) 33dml_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
34 34
35CFLAGS_display_mode_lib.o := $(dml_ccflags) 35CFLAGS_display_mode_lib.o := $(dml_ccflags)
36CFLAGS_display_pipe_clocks.o := $(dml_ccflags) 36CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index bea4e61b94c7..c59e582c1f40 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -121,4 +121,30 @@ enum self_refresh_affinity {
121 dm_neither_self_refresh_nor_mclk_switch 121 dm_neither_self_refresh_nor_mclk_switch
122}; 122};
123 123
124enum dm_validation_status {
125 DML_VALIDATION_OK,
126 DML_FAIL_SCALE_RATIO_TAP,
127 DML_FAIL_SOURCE_PIXEL_FORMAT,
128 DML_FAIL_VIEWPORT_SIZE,
129 DML_FAIL_TOTAL_V_ACTIVE_BW,
130 DML_FAIL_DIO_SUPPORT,
131 DML_FAIL_NOT_ENOUGH_DSC,
132 DML_FAIL_DSC_CLK_REQUIRED,
133 DML_FAIL_URGENT_LATENCY,
134 DML_FAIL_REORDERING_BUFFER,
135 DML_FAIL_DISPCLK_DPPCLK,
136 DML_FAIL_TOTAL_AVAILABLE_PIPES,
137 DML_FAIL_NUM_OTG,
138 DML_FAIL_WRITEBACK_MODE,
139 DML_FAIL_WRITEBACK_LATENCY,
140 DML_FAIL_WRITEBACK_SCALE_RATIO_TAP,
141 DML_FAIL_CURSOR_SUPPORT,
142 DML_FAIL_PITCH_SUPPORT,
143 DML_FAIL_PTE_BUFFER_SIZE,
144 DML_FAIL_HOST_VM_IMMEDIATE_FLIP,
145 DML_FAIL_DSC_INPUT_BPC,
146 DML_FAIL_PREFETCH_SUPPORT,
147 DML_FAIL_V_RATIO_PREFETCH,
148};
149
124#endif 150#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index dddeb0d4db8f..d303b789adfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -62,3 +62,31 @@ void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
62 } 62 }
63} 63}
64 64
65const char *dml_get_status_message(enum dm_validation_status status)
66{
67 switch (status) {
68 case DML_VALIDATION_OK: return "Validation OK";
69 case DML_FAIL_SCALE_RATIO_TAP: return "Scale ratio/tap";
70 case DML_FAIL_SOURCE_PIXEL_FORMAT: return "Source pixel format";
71 case DML_FAIL_VIEWPORT_SIZE: return "Viewport size";
72 case DML_FAIL_TOTAL_V_ACTIVE_BW: return "Total vertical active bandwidth";
73 case DML_FAIL_DIO_SUPPORT: return "DIO support";
74 case DML_FAIL_NOT_ENOUGH_DSC: return "Not enough DSC Units";
75 case DML_FAIL_DSC_CLK_REQUIRED: return "DSC clock required";
76 case DML_FAIL_URGENT_LATENCY: return "Urgent latency";
77 case DML_FAIL_REORDERING_BUFFER: return "Re-ordering buffer";
78 case DML_FAIL_DISPCLK_DPPCLK: return "Dispclk and Dppclk";
79 case DML_FAIL_TOTAL_AVAILABLE_PIPES: return "Total available pipes";
80 case DML_FAIL_NUM_OTG: return "Number of OTG";
81 case DML_FAIL_WRITEBACK_MODE: return "Writeback mode";
82 case DML_FAIL_WRITEBACK_LATENCY: return "Writeback latency";
83 case DML_FAIL_WRITEBACK_SCALE_RATIO_TAP: return "Writeback scale ratio/tap";
84 case DML_FAIL_CURSOR_SUPPORT: return "Cursor support";
85 case DML_FAIL_PITCH_SUPPORT: return "Pitch support";
86 case DML_FAIL_PTE_BUFFER_SIZE: return "PTE buffer size";
87 case DML_FAIL_DSC_INPUT_BPC: return "DSC input bpc";
88 case DML_FAIL_PREFETCH_SUPPORT: return "Prefetch support";
89 case DML_FAIL_V_RATIO_PREFETCH: return "Vertical ratio prefetch";
90 default: return "Unknown Status";
91 }
92}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 635206248889..a730e0209c05 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -43,4 +43,6 @@ struct display_mode_lib {
43 43
44void dml_init_instance(struct display_mode_lib *lib, enum dml_project project); 44void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
45 45
46const char *dml_get_status_message(enum dm_validation_status status);
47
46#endif 48#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 5dd04520ceca..391183e3428f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -30,22 +30,15 @@ typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
30typedef struct _vcs_dpi_ip_params_st ip_params_st; 30typedef struct _vcs_dpi_ip_params_st ip_params_st;
31typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st; 31typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
32typedef struct _vcs_dpi_display_output_params_st display_output_params_st; 32typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
33typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
34typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st; 33typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
35typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st; 34typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
36typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st; 35typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
37typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st; 36typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
38typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st; 37typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
39typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st; 38typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
40typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
41typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
42typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
43typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
44typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
45typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st; 39typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
46typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st; 40typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
47typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st; 41typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
48typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
49typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st; 42typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
50typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st; 43typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
51typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st; 44typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
@@ -55,8 +48,6 @@ typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
55typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st; 48typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
56typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st; 49typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
57typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st; 50typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
58typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
59typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
60typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st; 51typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
61 52
62struct _vcs_dpi_voltage_scaling_st { 53struct _vcs_dpi_voltage_scaling_st {
@@ -111,8 +102,6 @@ struct _vcs_dpi_soc_bounding_box_st {
111 double xfc_bus_transport_time_us; 102 double xfc_bus_transport_time_us;
112 double xfc_xbuf_latency_tolerance_us; 103 double xfc_xbuf_latency_tolerance_us;
113 int use_urgent_burst_bw; 104 int use_urgent_burst_bw;
114 double max_hscl_ratio;
115 double max_vscl_ratio;
116 unsigned int num_states; 105 unsigned int num_states;
117 struct _vcs_dpi_voltage_scaling_st clock_limits[8]; 106 struct _vcs_dpi_voltage_scaling_st clock_limits[8];
118}; 107};
@@ -129,7 +118,8 @@ struct _vcs_dpi_ip_params_st {
129 unsigned int odm_capable; 118 unsigned int odm_capable;
130 unsigned int rob_buffer_size_kbytes; 119 unsigned int rob_buffer_size_kbytes;
131 unsigned int det_buffer_size_kbytes; 120 unsigned int det_buffer_size_kbytes;
132 unsigned int dpte_buffer_size_in_pte_reqs; 121 unsigned int dpte_buffer_size_in_pte_reqs_luma;
122 unsigned int dpte_buffer_size_in_pte_reqs_chroma;
133 unsigned int pde_proc_buffer_size_64k_reqs; 123 unsigned int pde_proc_buffer_size_64k_reqs;
134 unsigned int dpp_output_buffer_pixels; 124 unsigned int dpp_output_buffer_pixels;
135 unsigned int opp_output_buffer_lines; 125 unsigned int opp_output_buffer_lines;
@@ -192,7 +182,6 @@ struct _vcs_dpi_display_xfc_params_st {
192struct _vcs_dpi_display_pipe_source_params_st { 182struct _vcs_dpi_display_pipe_source_params_st {
193 int source_format; 183 int source_format;
194 unsigned char dcc; 184 unsigned char dcc;
195 unsigned int dcc_override;
196 unsigned int dcc_rate; 185 unsigned int dcc_rate;
197 unsigned char dcc_use_global; 186 unsigned char dcc_use_global;
198 unsigned char vm; 187 unsigned char vm;
@@ -205,7 +194,6 @@ struct _vcs_dpi_display_pipe_source_params_st {
205 int source_scan; 194 int source_scan;
206 int sw_mode; 195 int sw_mode;
207 int macro_tile_size; 196 int macro_tile_size;
208 unsigned char is_display_sw;
209 unsigned int viewport_width; 197 unsigned int viewport_width;
210 unsigned int viewport_height; 198 unsigned int viewport_height;
211 unsigned int viewport_y_y; 199 unsigned int viewport_y_y;
@@ -252,16 +240,10 @@ struct _vcs_dpi_display_output_params_st {
252 int output_bpc; 240 int output_bpc;
253 int output_type; 241 int output_type;
254 int output_format; 242 int output_format;
255 int output_standard;
256 int dsc_slices; 243 int dsc_slices;
257 struct writeback_st wb; 244 struct writeback_st wb;
258}; 245};
259 246
260struct _vcs_dpi_display_bandwidth_st {
261 double total_bw_consumed_gbps;
262 double guaranteed_urgent_return_bw_gbps;
263};
264
265struct _vcs_dpi_scaler_ratio_depth_st { 247struct _vcs_dpi_scaler_ratio_depth_st {
266 double hscl_ratio; 248 double hscl_ratio;
267 double vscl_ratio; 249 double vscl_ratio;
@@ -300,11 +282,9 @@ struct _vcs_dpi_display_pipe_dest_params_st {
300 unsigned int vupdate_width; 282 unsigned int vupdate_width;
301 unsigned int vready_offset; 283 unsigned int vready_offset;
302 unsigned char interlaced; 284 unsigned char interlaced;
303 unsigned char underscan;
304 double pixel_rate_mhz; 285 double pixel_rate_mhz;
305 unsigned char synchronized_vblank_all_planes; 286 unsigned char synchronized_vblank_all_planes;
306 unsigned char otg_inst; 287 unsigned char otg_inst;
307 unsigned char odm_split_cnt;
308 unsigned char odm_combine; 288 unsigned char odm_combine;
309 unsigned char use_maximum_vstartup; 289 unsigned char use_maximum_vstartup;
310}; 290};
@@ -331,65 +311,6 @@ struct _vcs_dpi_display_e2e_pipe_params_st {
331 display_clocks_and_cfg_st clks_cfg; 311 display_clocks_and_cfg_st clks_cfg;
332}; 312};
333 313
334struct _vcs_dpi_dchub_buffer_sizing_st {
335 unsigned int swath_width_y;
336 unsigned int swath_height_y;
337 unsigned int swath_height_c;
338 unsigned int detail_buffer_size_y;
339};
340
341struct _vcs_dpi_watermarks_perf_st {
342 double stutter_eff_in_active_region_percent;
343 double urgent_latency_supported_us;
344 double non_urgent_latency_supported_us;
345 double dram_clock_change_margin_us;
346 double dram_access_eff_percent;
347};
348
349struct _vcs_dpi_cstate_pstate_watermarks_st {
350 double cstate_exit_us;
351 double cstate_enter_plus_exit_us;
352 double pstate_change_us;
353};
354
355struct _vcs_dpi_wm_calc_pipe_params_st {
356 unsigned int num_dpp;
357 int voltage;
358 int output_type;
359 double dcfclk_mhz;
360 double socclk_mhz;
361 double dppclk_mhz;
362 double pixclk_mhz;
363 unsigned char interlace_en;
364 unsigned char pte_enable;
365 unsigned char dcc_enable;
366 double dcc_rate;
367 double bytes_per_pixel_c;
368 double bytes_per_pixel_y;
369 unsigned int swath_width_y;
370 unsigned int swath_height_y;
371 unsigned int swath_height_c;
372 unsigned int det_buffer_size_y;
373 double h_ratio;
374 double v_ratio;
375 unsigned int h_taps;
376 unsigned int h_total;
377 unsigned int v_total;
378 unsigned int v_active;
379 unsigned int e2e_index;
380 double display_pipe_line_delivery_time;
381 double read_bw;
382 unsigned int lines_in_det_y;
383 unsigned int lines_in_det_y_rounded_down_to_swath;
384 double full_det_buffering_time;
385 double dcfclk_deepsleep_mhz_per_plane;
386};
387
388struct _vcs_dpi_vratio_pre_st {
389 double vratio_pre_l;
390 double vratio_pre_c;
391};
392
393struct _vcs_dpi_display_data_rq_misc_params_st { 314struct _vcs_dpi_display_data_rq_misc_params_st {
394 unsigned int full_swath_bytes; 315 unsigned int full_swath_bytes;
395 unsigned int stored_swath_bytes; 316 unsigned int stored_swath_bytes;
@@ -423,16 +344,9 @@ struct _vcs_dpi_display_data_rq_dlg_params_st {
423 unsigned int meta_bytes_per_row_ub; 344 unsigned int meta_bytes_per_row_ub;
424}; 345};
425 346
426struct _vcs_dpi_display_cur_rq_dlg_params_st {
427 unsigned char enable;
428 unsigned int swath_height;
429 unsigned int req_per_line;
430};
431
432struct _vcs_dpi_display_rq_dlg_params_st { 347struct _vcs_dpi_display_rq_dlg_params_st {
433 display_data_rq_dlg_params_st rq_l; 348 display_data_rq_dlg_params_st rq_l;
434 display_data_rq_dlg_params_st rq_c; 349 display_data_rq_dlg_params_st rq_c;
435 display_cur_rq_dlg_params_st rq_cur0;
436}; 350};
437 351
438struct _vcs_dpi_display_rq_sizing_params_st { 352struct _vcs_dpi_display_rq_sizing_params_st {
@@ -498,6 +412,10 @@ struct _vcs_dpi_display_dlg_regs_st {
498 unsigned int xfc_reg_remote_surface_flip_latency; 412 unsigned int xfc_reg_remote_surface_flip_latency;
499 unsigned int xfc_reg_prefetch_margin; 413 unsigned int xfc_reg_prefetch_margin;
500 unsigned int dst_y_delta_drq_limit; 414 unsigned int dst_y_delta_drq_limit;
415 unsigned int refcyc_per_vm_group_vblank;
416 unsigned int refcyc_per_vm_group_flip;
417 unsigned int refcyc_per_vm_req_vblank;
418 unsigned int refcyc_per_vm_req_flip;
501}; 419};
502 420
503struct _vcs_dpi_display_ttu_regs_st { 421struct _vcs_dpi_display_ttu_regs_st {
@@ -556,19 +474,6 @@ struct _vcs_dpi_display_dlg_sys_params_st {
556 unsigned int total_flip_bytes; 474 unsigned int total_flip_bytes;
557}; 475};
558 476
559struct _vcs_dpi_display_dlg_prefetch_param_st {
560 double prefetch_bw;
561 unsigned int flip_bytes;
562};
563
564struct _vcs_dpi_display_pipe_clock_st {
565 double dcfclk_mhz;
566 double dispclk_mhz;
567 double socclk_mhz;
568 double dscclk_mhz[6];
569 double dppclk_mhz[6];
570};
571
572struct _vcs_dpi_display_arb_params_st { 477struct _vcs_dpi_display_arb_params_st {
573 int max_req_outstanding; 478 int max_req_outstanding;
574 int min_req_outstanding; 479 int min_req_outstanding;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index c2037daa8e66..ad8571f5a142 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -459,7 +459,7 @@ static void dml1_rq_dlg_get_row_heights(
459 /* dpte */ 459 /* dpte */
460 /* ------ */ 460 /* ------ */
461 log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes); 461 log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
462 dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs; 462 dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
463 463
464 log2_vmpg_height = 0; 464 log2_vmpg_height = 0;
465 log2_vmpg_width = 0; 465 log2_vmpg_width = 0;
@@ -776,7 +776,7 @@ static void get_surf_rq_param(
776 /* dpte */ 776 /* dpte */
777 /* ------ */ 777 /* ------ */
778 log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes); 778 log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
779 dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs; 779 dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
780 780
781 log2_vmpg_height = 0; 781 log2_vmpg_height = 0;
782 log2_vmpg_width = 0; 782 log2_vmpg_width = 0;
@@ -881,7 +881,7 @@ static void get_surf_rq_param(
881 /* the dpte_group_bytes is reduced for the specific case of vertical 881 /* the dpte_group_bytes is reduced for the specific case of vertical
882 * access of a tile surface that has dpte request of 8x1 ptes. 882 * access of a tile surface that has dpte request of 8x1 ptes.
883 */ 883 */
884 if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) /*reduced, in this case, will have page fault within a group */ 884 if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) /*reduced, in this case, will have page fault within a group */
885 rq_sizing_param->dpte_group_bytes = 512; 885 rq_sizing_param->dpte_group_bytes = 512;
886 else 886 else
887 /*full size */ 887 /*full size */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
deleted file mode 100644
index 352885cb4d07..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ /dev/null
@@ -1,99 +0,0 @@
1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
23# Makefile for the 'i2c' sub-component of DAL.
24# It provides the control and status of HW i2c engine of the adapter.
25
26I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \
27 i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o
28
29AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX))
30
31AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX)
32
33###############################################################################
34# DCE 8x family
35###############################################################################
36I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \
37 i2c_sw_engine_dce80.o
38
39AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80))
40
41AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80)
42
43###############################################################################
44# DCE 100 family
45###############################################################################
46I2CAUX_DCE100 = i2caux_dce100.o
47
48AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100))
49
50AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100)
51
52###############################################################################
53# DCE 110 family
54###############################################################################
55I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \
56 aux_engine_dce110.o
57
58AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110))
59
60AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110)
61
62###############################################################################
63# DCE 112 family
64###############################################################################
65I2CAUX_DCE112 = i2caux_dce112.o
66
67AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112))
68
69AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
70
71###############################################################################
72# DCN 1.0 family
73###############################################################################
74ifdef CONFIG_DRM_AMD_DC_DCN1_0
75I2CAUX_DCN1 = i2caux_dcn10.o
76
77AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
78
79AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCN1)
80endif
81
82###############################################################################
83# DCE 120 family
84###############################################################################
85I2CAUX_DCE120 = i2caux_dce120.o
86
87AMD_DAL_I2CAUX_DCE120 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce120/,$(I2CAUX_DCE120))
88
89AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE120)
90
91###############################################################################
92# Diagnostics on FPGA
93###############################################################################
94I2CAUX_DIAG = i2caux_diag.o
95
96AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG))
97
98AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG)
99
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
deleted file mode 100644
index 8cbf38b2470d..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ /dev/null
@@ -1,606 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_event_log.h"
28
29/*
30 * Pre-requisites: headers required by header of this unit
31 */
32#include "include/i2caux_interface.h"
33#include "engine.h"
34
35/*
36 * Header of this unit
37 */
38
39#include "aux_engine.h"
40
41/*
42 * Post-requisites: headers required by this unit
43 */
44
45#include "include/link_service_types.h"
46
47/*
48 * This unit
49 */
50
51enum {
52 AUX_INVALID_REPLY_RETRY_COUNTER = 1,
53 AUX_TIMED_OUT_RETRY_COUNTER = 2,
54 AUX_DEFER_RETRY_COUNTER = 6
55};
56
57#define FROM_ENGINE(ptr) \
58 container_of((ptr), struct aux_engine, base)
59#define DC_LOGGER \
60 engine->base.ctx->logger
61
62enum i2caux_engine_type dal_aux_engine_get_engine_type(
63 const struct engine *engine)
64{
65 return I2CAUX_ENGINE_TYPE_AUX;
66}
67
68bool dal_aux_engine_acquire(
69 struct engine *engine,
70 struct ddc *ddc)
71{
72 struct aux_engine *aux_engine = FROM_ENGINE(engine);
73
74 enum gpio_result result;
75 if (aux_engine->funcs->is_engine_available) {
76 /*check whether SW could use the engine*/
77 if (!aux_engine->funcs->is_engine_available(aux_engine)) {
78 return false;
79 }
80 }
81
82 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
83 GPIO_DDC_CONFIG_TYPE_MODE_AUX);
84
85 if (result != GPIO_RESULT_OK)
86 return false;
87
88 if (!aux_engine->funcs->acquire_engine(aux_engine)) {
89 dal_ddc_close(ddc);
90 return false;
91 }
92
93 engine->ddc = ddc;
94
95 return true;
96}
97
98struct read_command_context {
99 uint8_t *buffer;
100 uint32_t current_read_length;
101 uint32_t offset;
102 enum i2caux_transaction_status status;
103
104 struct aux_request_transaction_data request;
105 struct aux_reply_transaction_data reply;
106
107 uint8_t returned_byte;
108
109 uint32_t timed_out_retry_aux;
110 uint32_t invalid_reply_retry_aux;
111 uint32_t defer_retry_aux;
112 uint32_t defer_retry_i2c;
113 uint32_t invalid_reply_retry_aux_on_ack;
114
115 bool transaction_complete;
116 bool operation_succeeded;
117};
118
119static void process_read_reply(
120 struct aux_engine *engine,
121 struct read_command_context *ctx)
122{
123 engine->funcs->process_channel_reply(engine, &ctx->reply);
124
125 switch (ctx->reply.status) {
126 case AUX_TRANSACTION_REPLY_AUX_ACK:
127 ctx->defer_retry_aux = 0;
128 if (ctx->returned_byte > ctx->current_read_length) {
129 ctx->status =
130 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
131 ctx->operation_succeeded = false;
132 } else if (ctx->returned_byte < ctx->current_read_length) {
133 ctx->current_read_length -= ctx->returned_byte;
134
135 ctx->offset += ctx->returned_byte;
136
137 ++ctx->invalid_reply_retry_aux_on_ack;
138
139 if (ctx->invalid_reply_retry_aux_on_ack >
140 AUX_INVALID_REPLY_RETRY_COUNTER) {
141 ctx->status =
142 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
143 ctx->operation_succeeded = false;
144 }
145 } else {
146 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
147 ctx->transaction_complete = true;
148 ctx->operation_succeeded = true;
149 }
150 break;
151 case AUX_TRANSACTION_REPLY_AUX_NACK:
152 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
153 ctx->operation_succeeded = false;
154 break;
155 case AUX_TRANSACTION_REPLY_AUX_DEFER:
156 ++ctx->defer_retry_aux;
157
158 if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
159 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
160 ctx->operation_succeeded = false;
161 }
162 break;
163 case AUX_TRANSACTION_REPLY_I2C_DEFER:
164 ctx->defer_retry_aux = 0;
165
166 ++ctx->defer_retry_i2c;
167
168 if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
169 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
170 ctx->operation_succeeded = false;
171 }
172 break;
173 case AUX_TRANSACTION_REPLY_HPD_DISCON:
174 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
175 ctx->operation_succeeded = false;
176 break;
177 default:
178 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
179 ctx->operation_succeeded = false;
180 }
181}
182
183static void process_read_request(
184 struct aux_engine *engine,
185 struct read_command_context *ctx)
186{
187 enum aux_channel_operation_result operation_result;
188
189 engine->funcs->submit_channel_request(engine, &ctx->request);
190
191 operation_result = engine->funcs->get_channel_status(
192 engine, &ctx->returned_byte);
193
194 switch (operation_result) {
195 case AUX_CHANNEL_OPERATION_SUCCEEDED:
196 if (ctx->returned_byte > ctx->current_read_length) {
197 ctx->status =
198 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
199 ctx->operation_succeeded = false;
200 } else {
201 ctx->timed_out_retry_aux = 0;
202 ctx->invalid_reply_retry_aux = 0;
203
204 ctx->reply.length = ctx->returned_byte;
205 ctx->reply.data = ctx->buffer;
206
207 process_read_reply(engine, ctx);
208 }
209 break;
210 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
211 ++ctx->invalid_reply_retry_aux;
212
213 if (ctx->invalid_reply_retry_aux >
214 AUX_INVALID_REPLY_RETRY_COUNTER) {
215 ctx->status =
216 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
217 ctx->operation_succeeded = false;
218 } else
219 udelay(400);
220 break;
221 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
222 ++ctx->timed_out_retry_aux;
223
224 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
225 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
226 ctx->operation_succeeded = false;
227 } else {
228 /* DP 1.2a, table 2-58:
229 * "S3: AUX Request CMD PENDING:
230 * retry 3 times, with 400usec wait on each"
231 * The HW timeout is set to 550usec,
232 * so we should not wait here */
233 }
234 break;
235 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
236 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
237 ctx->operation_succeeded = false;
238 break;
239 default:
240 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
241 ctx->operation_succeeded = false;
242 }
243}
244
245static bool read_command(
246 struct aux_engine *engine,
247 struct i2caux_transaction_request *request,
248 bool middle_of_transaction)
249{
250 struct read_command_context ctx;
251
252 ctx.buffer = request->payload.data;
253 ctx.current_read_length = request->payload.length;
254 ctx.offset = 0;
255 ctx.timed_out_retry_aux = 0;
256 ctx.invalid_reply_retry_aux = 0;
257 ctx.defer_retry_aux = 0;
258 ctx.defer_retry_i2c = 0;
259 ctx.invalid_reply_retry_aux_on_ack = 0;
260 ctx.transaction_complete = false;
261 ctx.operation_succeeded = true;
262
263 if (request->payload.address_space ==
264 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
265 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
266 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
267 ctx.request.address = request->payload.address;
268 } else if (request->payload.address_space ==
269 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
270 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
271 ctx.request.action = middle_of_transaction ?
272 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
273 I2CAUX_TRANSACTION_ACTION_I2C_READ;
274 ctx.request.address = request->payload.address >> 1;
275 } else {
276 /* in DAL2, there was no return in such case */
277 BREAK_TO_DEBUGGER();
278 return false;
279 }
280
281 ctx.request.delay = 0;
282
283 do {
284 memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
285
286 ctx.request.data = ctx.buffer + ctx.offset;
287 ctx.request.length = ctx.current_read_length;
288
289 process_read_request(engine, &ctx);
290
291 request->status = ctx.status;
292
293 if (ctx.operation_succeeded && !ctx.transaction_complete)
294 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
295 msleep(engine->delay);
296 } while (ctx.operation_succeeded && !ctx.transaction_complete);
297
298 if (request->payload.address_space ==
299 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
300 DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
301 request->payload.address,
302 request->payload.data[0],
303 ctx.operation_succeeded);
304 }
305
306 return ctx.operation_succeeded;
307}
308
309struct write_command_context {
310 bool mot;
311
312 uint8_t *buffer;
313 uint32_t current_write_length;
314 enum i2caux_transaction_status status;
315
316 struct aux_request_transaction_data request;
317 struct aux_reply_transaction_data reply;
318
319 uint8_t returned_byte;
320
321 uint32_t timed_out_retry_aux;
322 uint32_t invalid_reply_retry_aux;
323 uint32_t defer_retry_aux;
324 uint32_t defer_retry_i2c;
325 uint32_t max_defer_retry;
326 uint32_t ack_m_retry;
327
328 uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
329
330 bool transaction_complete;
331 bool operation_succeeded;
332};
333
334static void process_write_reply(
335 struct aux_engine *engine,
336 struct write_command_context *ctx)
337{
338 engine->funcs->process_channel_reply(engine, &ctx->reply);
339
340 switch (ctx->reply.status) {
341 case AUX_TRANSACTION_REPLY_AUX_ACK:
342 ctx->operation_succeeded = true;
343
344 if (ctx->returned_byte) {
345 ctx->request.action = ctx->mot ?
346 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
347 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
348
349 ctx->current_write_length = 0;
350
351 ++ctx->ack_m_retry;
352
353 if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
354 ctx->status =
355 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
356 ctx->operation_succeeded = false;
357 } else
358 udelay(300);
359 } else {
360 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
361 ctx->defer_retry_aux = 0;
362 ctx->ack_m_retry = 0;
363 ctx->transaction_complete = true;
364 }
365 break;
366 case AUX_TRANSACTION_REPLY_AUX_NACK:
367 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
368 ctx->operation_succeeded = false;
369 break;
370 case AUX_TRANSACTION_REPLY_AUX_DEFER:
371 ++ctx->defer_retry_aux;
372
373 if (ctx->defer_retry_aux > ctx->max_defer_retry) {
374 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
375 ctx->operation_succeeded = false;
376 }
377 break;
378 case AUX_TRANSACTION_REPLY_I2C_DEFER:
379 ctx->defer_retry_aux = 0;
380 ctx->current_write_length = 0;
381
382 ctx->request.action = ctx->mot ?
383 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
384 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
385
386 ++ctx->defer_retry_i2c;
387
388 if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
389 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
390 ctx->operation_succeeded = false;
391 }
392 break;
393 case AUX_TRANSACTION_REPLY_HPD_DISCON:
394 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
395 ctx->operation_succeeded = false;
396 break;
397 default:
398 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
399 ctx->operation_succeeded = false;
400 }
401}
402
403static void process_write_request(
404 struct aux_engine *engine,
405 struct write_command_context *ctx)
406{
407 enum aux_channel_operation_result operation_result;
408
409 engine->funcs->submit_channel_request(engine, &ctx->request);
410
411 operation_result = engine->funcs->get_channel_status(
412 engine, &ctx->returned_byte);
413
414 switch (operation_result) {
415 case AUX_CHANNEL_OPERATION_SUCCEEDED:
416 ctx->timed_out_retry_aux = 0;
417 ctx->invalid_reply_retry_aux = 0;
418
419 ctx->reply.length = ctx->returned_byte;
420 ctx->reply.data = ctx->reply_data;
421
422 process_write_reply(engine, ctx);
423 break;
424 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
425 ++ctx->invalid_reply_retry_aux;
426
427 if (ctx->invalid_reply_retry_aux >
428 AUX_INVALID_REPLY_RETRY_COUNTER) {
429 ctx->status =
430 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
431 ctx->operation_succeeded = false;
432 } else
433 udelay(400);
434 break;
435 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
436 ++ctx->timed_out_retry_aux;
437
438 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
439 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
440 ctx->operation_succeeded = false;
441 } else {
442 /* DP 1.2a, table 2-58:
443 * "S3: AUX Request CMD PENDING:
444 * retry 3 times, with 400usec wait on each"
445 * The HW timeout is set to 550usec,
446 * so we should not wait here */
447 }
448 break;
449 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
450 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
451 ctx->operation_succeeded = false;
452 break;
453 default:
454 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
455 ctx->operation_succeeded = false;
456 }
457}
458
459static bool write_command(
460 struct aux_engine *engine,
461 struct i2caux_transaction_request *request,
462 bool middle_of_transaction)
463{
464 struct write_command_context ctx;
465
466 ctx.mot = middle_of_transaction;
467 ctx.buffer = request->payload.data;
468 ctx.current_write_length = request->payload.length;
469 ctx.timed_out_retry_aux = 0;
470 ctx.invalid_reply_retry_aux = 0;
471 ctx.defer_retry_aux = 0;
472 ctx.defer_retry_i2c = 0;
473 ctx.ack_m_retry = 0;
474 ctx.transaction_complete = false;
475 ctx.operation_succeeded = true;
476
477 if (request->payload.address_space ==
478 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
479 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
480 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
481 ctx.request.address = request->payload.address;
482 } else if (request->payload.address_space ==
483 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
484 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
485 ctx.request.action = middle_of_transaction ?
486 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
487 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
488 ctx.request.address = request->payload.address >> 1;
489 } else {
490 /* in DAL2, there was no return in such case */
491 BREAK_TO_DEBUGGER();
492 return false;
493 }
494
495 ctx.request.delay = 0;
496
497 ctx.max_defer_retry =
498 (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
499 engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
500
501 do {
502 ctx.request.data = ctx.buffer;
503 ctx.request.length = ctx.current_write_length;
504
505 process_write_request(engine, &ctx);
506
507 request->status = ctx.status;
508
509 if (ctx.operation_succeeded && !ctx.transaction_complete)
510 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
511 msleep(engine->delay);
512 } while (ctx.operation_succeeded && !ctx.transaction_complete);
513
514 if (request->payload.address_space ==
515 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
516 DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
517 request->payload.address,
518 request->payload.data[0],
519 ctx.operation_succeeded);
520 }
521
522 return ctx.operation_succeeded;
523}
524
525static bool end_of_transaction_command(
526 struct aux_engine *engine,
527 struct i2caux_transaction_request *request)
528{
529 struct i2caux_transaction_request dummy_request;
530 uint8_t dummy_data;
531
532 /* [tcheng] We only need to send the stop (read with MOT = 0)
533 * for I2C-over-Aux, not native AUX */
534
535 if (request->payload.address_space !=
536 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
537 return false;
538
539 dummy_request.operation = request->operation;
540 dummy_request.payload.address_space = request->payload.address_space;
541 dummy_request.payload.address = request->payload.address;
542
543 /*
544 * Add a dummy byte due to some receiver quirk
545 * where one byte is sent along with MOT = 0.
546 * Ideally this should be 0.
547 */
548
549 dummy_request.payload.length = 0;
550 dummy_request.payload.data = &dummy_data;
551
552 if (request->operation == I2CAUX_TRANSACTION_READ)
553 return read_command(engine, &dummy_request, false);
554 else
555 return write_command(engine, &dummy_request, false);
556
557 /* according Syed, it does not need now DoDummyMOT */
558}
559
560bool dal_aux_engine_submit_request(
561 struct engine *engine,
562 struct i2caux_transaction_request *request,
563 bool middle_of_transaction)
564{
565 struct aux_engine *aux_engine = FROM_ENGINE(engine);
566
567 bool result;
568 bool mot_used = true;
569
570 switch (request->operation) {
571 case I2CAUX_TRANSACTION_READ:
572 result = read_command(aux_engine, request, mot_used);
573 break;
574 case I2CAUX_TRANSACTION_WRITE:
575 result = write_command(aux_engine, request, mot_used);
576 break;
577 default:
578 result = false;
579 }
580
581 /* [tcheng]
582 * need to send stop for the last transaction to free up the AUX
583 * if the above command fails, this would be the last transaction */
584
585 if (!middle_of_transaction || !result)
586 end_of_transaction_command(aux_engine, request);
587
588 /* mask AUX interrupt */
589
590 return result;
591}
592
593void dal_aux_engine_construct(
594 struct aux_engine *engine,
595 struct dc_context *ctx)
596{
597 dal_i2caux_construct_engine(&engine->base, ctx);
598 engine->delay = 0;
599 engine->max_defer_write_retry = 0;
600}
601
602void dal_aux_engine_destruct(
603 struct aux_engine *engine)
604{
605 dal_i2caux_destruct_engine(&engine->base);
606}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
deleted file mode 100644
index c33a2898d967..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_H__
27#define __DAL_AUX_ENGINE_H__
28
29#include "dc_ddc_types.h"
30
31struct aux_engine;
32
33struct aux_engine_funcs {
34 void (*destroy)(
35 struct aux_engine **ptr);
36 bool (*acquire_engine)(
37 struct aux_engine *engine);
38 void (*configure)(
39 struct aux_engine *engine,
40 union aux_config cfg);
41 void (*submit_channel_request)(
42 struct aux_engine *engine,
43 struct aux_request_transaction_data *request);
44 void (*process_channel_reply)(
45 struct aux_engine *engine,
46 struct aux_reply_transaction_data *reply);
47 int (*read_channel_reply)(
48 struct aux_engine *engine,
49 uint32_t size,
50 uint8_t *buffer,
51 uint8_t *reply_result,
52 uint32_t *sw_status);
53 enum aux_channel_operation_result (*get_channel_status)(
54 struct aux_engine *engine,
55 uint8_t *returned_bytes);
56 bool (*is_engine_available) (
57 struct aux_engine *engine);
58};
59
60struct aux_engine {
61 struct engine base;
62 const struct aux_engine_funcs *funcs;
63 /* following values are expressed in milliseconds */
64 uint32_t delay;
65 uint32_t max_defer_write_retry;
66
67 bool acquire_reset;
68};
69
70void dal_aux_engine_construct(
71 struct aux_engine *engine,
72 struct dc_context *ctx);
73
74void dal_aux_engine_destruct(
75 struct aux_engine *engine);
76bool dal_aux_engine_submit_request(
77 struct engine *ptr,
78 struct i2caux_transaction_request *request,
79 bool middle_of_transaction);
80bool dal_aux_engine_acquire(
81 struct engine *ptr,
82 struct ddc *ddc);
83enum i2caux_engine_type dal_aux_engine_get_engine_type(
84 const struct engine *engine);
85
86#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
deleted file mode 100644
index 8b704ab0471c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/aux_engine_dce110.h"
36#include "../dce110/i2c_hw_engine_dce110.h"
37#include "../dce110/i2caux_dce110.h"
38
39#include "dce/dce_10_0_d.h"
40#include "dce/dce_10_0_sh_mask.h"
41
42/* set register offset */
43#define SR(reg_name)\
44 .reg_name = mm ## reg_name
45
46/* set register offset with instance */
47#define SRI(reg_name, block, id)\
48 .reg_name = mm ## block ## id ## _ ## reg_name
49
50#define aux_regs(id)\
51[id] = {\
52 AUX_COMMON_REG_LIST(id), \
53 .AUX_RESET_MASK = 0 \
54}
55
56#define hw_engine_regs(id)\
57{\
58 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
59}
60
61static const struct dce110_aux_registers dce100_aux_regs[] = {
62 aux_regs(0),
63 aux_regs(1),
64 aux_regs(2),
65 aux_regs(3),
66 aux_regs(4),
67 aux_regs(5),
68};
69
70static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = {
71 hw_engine_regs(1),
72 hw_engine_regs(2),
73 hw_engine_regs(3),
74 hw_engine_regs(4),
75 hw_engine_regs(5),
76 hw_engine_regs(6)
77};
78
79static const struct dce110_i2c_hw_engine_shift i2c_shift = {
80 I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT)
81};
82
83static const struct dce110_i2c_hw_engine_mask i2c_mask = {
84 I2C_COMMON_MASK_SH_LIST_DCE100(_MASK)
85};
86
87struct i2caux *dal_i2caux_dce100_create(
88 struct dc_context *ctx)
89{
90 struct i2caux_dce110 *i2caux_dce110 =
91 kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
92
93 if (!i2caux_dce110) {
94 ASSERT_CRITICAL(false);
95 return NULL;
96 }
97
98 dal_i2caux_dce110_construct(i2caux_dce110,
99 ctx,
100 ARRAY_SIZE(dce100_aux_regs),
101 dce100_aux_regs,
102 dce100_hw_engine_regs,
103 &i2c_shift,
104 &i2c_mask);
105 return &i2caux_dce110->base;
106}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
deleted file mode 100644
index 2b508d3e0ef4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE100_H__
27#define __DAL_I2C_AUX_DCE100_H__
28
29struct i2caux *dal_i2caux_dce100_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
deleted file mode 100644
index 59c3ed43d609..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_event_log.h"
28
29/*
30 * Pre-requisites: headers required by header of this unit
31 */
32#include "include/i2caux_interface.h"
33#include "../engine.h"
34#include "../aux_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "aux_engine_dce110.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45#include "dce/dce_11_0_sh_mask.h"
46
47#define CTX \
48 aux110->base.base.ctx
49#define REG(reg_name)\
50 (aux110->regs->reg_name)
51#include "reg_helper.h"
52
53/*
54 * This unit
55 */
56
57/*
58 * @brief
59 * Cast 'struct aux_engine *'
60 * to 'struct aux_engine_dce110 *'
61 */
62#define FROM_AUX_ENGINE(ptr) \
63 container_of((ptr), struct aux_engine_dce110, base)
64
65/*
66 * @brief
67 * Cast 'struct engine *'
68 * to 'struct aux_engine_dce110 *'
69 */
70#define FROM_ENGINE(ptr) \
71 FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
72
73static void release_engine(
74 struct engine *engine)
75{
76 struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
77
78 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
79}
80
81static void destruct(
82 struct aux_engine_dce110 *engine);
83
84static void destroy(
85 struct aux_engine **aux_engine)
86{
87 struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine);
88
89 destruct(engine);
90
91 kfree(engine);
92
93 *aux_engine = NULL;
94}
95
96#define SW_CAN_ACCESS_AUX 1
97#define DMCU_CAN_ACCESS_AUX 2
98
99static bool is_engine_available(
100 struct aux_engine *engine)
101{
102 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
103
104 uint32_t value = REG_READ(AUX_ARB_CONTROL);
105 uint32_t field = get_reg_field_value(
106 value,
107 AUX_ARB_CONTROL,
108 AUX_REG_RW_CNTL_STATUS);
109
110 return (field != DMCU_CAN_ACCESS_AUX);
111}
112static bool acquire_engine(
113 struct aux_engine *engine)
114{
115 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
116
117 uint32_t value = REG_READ(AUX_ARB_CONTROL);
118 uint32_t field = get_reg_field_value(
119 value,
120 AUX_ARB_CONTROL,
121 AUX_REG_RW_CNTL_STATUS);
122 if (field == DMCU_CAN_ACCESS_AUX)
123 return false;
124 /* enable AUX before request SW to access AUX */
125 value = REG_READ(AUX_CONTROL);
126 field = get_reg_field_value(value,
127 AUX_CONTROL,
128 AUX_EN);
129
130 if (field == 0) {
131 set_reg_field_value(
132 value,
133 1,
134 AUX_CONTROL,
135 AUX_EN);
136
137 if (REG(AUX_RESET_MASK)) {
138 /*DP_AUX block as part of the enable sequence*/
139 set_reg_field_value(
140 value,
141 1,
142 AUX_CONTROL,
143 AUX_RESET);
144 }
145
146 REG_WRITE(AUX_CONTROL, value);
147
148 if (REG(AUX_RESET_MASK)) {
149 /*poll HW to make sure reset it done*/
150
151 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
152 1, 11);
153
154 set_reg_field_value(
155 value,
156 0,
157 AUX_CONTROL,
158 AUX_RESET);
159
160 REG_WRITE(AUX_CONTROL, value);
161
162 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
163 1, 11);
164 }
165 } /*if (field)*/
166
167 /* request SW to access AUX */
168 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
169
170 value = REG_READ(AUX_ARB_CONTROL);
171 field = get_reg_field_value(
172 value,
173 AUX_ARB_CONTROL,
174 AUX_REG_RW_CNTL_STATUS);
175
176 return (field == SW_CAN_ACCESS_AUX);
177}
178
179#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
180 ((command) | ((0xF0000 & (address)) >> 16))
181
182#define COMPOSE_AUX_SW_DATA_8_15(address) \
183 ((0xFF00 & (address)) >> 8)
184
185#define COMPOSE_AUX_SW_DATA_0_7(address) \
186 (0xFF & (address))
187
188static void submit_channel_request(
189 struct aux_engine *engine,
190 struct aux_request_transaction_data *request)
191{
192 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
193 uint32_t value;
194 uint32_t length;
195
196 bool is_write =
197 ((request->type == AUX_TRANSACTION_TYPE_DP) &&
198 (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
199 ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
200 ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
201 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
202 if (REG(AUXN_IMPCAL)) {
203 /* clear_aux_error */
204 REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
205 1,
206 0);
207
208 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
209 1,
210 0);
211
212 /* force_default_calibrate */
213 REG_UPDATE_1BY1_2(AUXN_IMPCAL,
214 AUXN_IMPCAL_ENABLE, 1,
215 AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
216
217 /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
218
219 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
220 1,
221 0);
222 }
223 /* set the delay and the number of bytes to write */
224
225 /* The length include
226 * the 4 bit header and the 20 bit address
227 * (that is 3 byte).
228 * If the requested length is non zero this means
229 * an addition byte specifying the length is required. */
230
231 length = request->length ? 4 : 3;
232 if (is_write)
233 length += request->length;
234
235 REG_UPDATE_2(AUX_SW_CONTROL,
236 AUX_SW_START_DELAY, request->delay,
237 AUX_SW_WR_BYTES, length);
238
239 /* program action and address and payload data (if 'is_write') */
240 value = REG_UPDATE_4(AUX_SW_DATA,
241 AUX_SW_INDEX, 0,
242 AUX_SW_DATA_RW, 0,
243 AUX_SW_AUTOINCREMENT_DISABLE, 1,
244 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
245
246 value = REG_SET_2(AUX_SW_DATA, value,
247 AUX_SW_AUTOINCREMENT_DISABLE, 0,
248 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
249
250 value = REG_SET(AUX_SW_DATA, value,
251 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
252
253 if (request->length) {
254 value = REG_SET(AUX_SW_DATA, value,
255 AUX_SW_DATA, request->length - 1);
256 }
257
258 if (is_write) {
259 /* Load the HW buffer with the Data to be sent.
260 * This is relevant for write operation.
261 * For read, the data recived data will be
262 * processed in process_channel_reply(). */
263 uint32_t i = 0;
264
265 while (i < request->length) {
266 value = REG_SET(AUX_SW_DATA, value,
267 AUX_SW_DATA, request->data[i]);
268
269 ++i;
270 }
271 }
272
273 REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
274 REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
275 10, aux110->timeout_period/10);
276 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
277 EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE,
278 request->action, request->address, request->length, request->data);
279}
280
281static int read_channel_reply(struct aux_engine *engine, uint32_t size,
282 uint8_t *buffer, uint8_t *reply_result,
283 uint32_t *sw_status)
284{
285 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
286 uint32_t bytes_replied;
287 uint32_t reply_result_32;
288
289 *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
290 &bytes_replied);
291
292 /* In case HPD is LOW, exit AUX transaction */
293 if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
294 return -1;
295
296 /* Need at least the status byte */
297 if (!bytes_replied)
298 return -1;
299
300 REG_UPDATE_1BY1_3(AUX_SW_DATA,
301 AUX_SW_INDEX, 0,
302 AUX_SW_AUTOINCREMENT_DISABLE, 1,
303 AUX_SW_DATA_RW, 1);
304
305 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
306 reply_result_32 = reply_result_32 >> 4;
307 *reply_result = (uint8_t)reply_result_32;
308
309 if (reply_result_32 == 0) { /* ACK */
310 uint32_t i = 0;
311
312 /* First byte was already used to get the command status */
313 --bytes_replied;
314
315 /* Do not overflow buffer */
316 if (bytes_replied > size)
317 return -1;
318
319 while (i < bytes_replied) {
320 uint32_t aux_sw_data_val;
321
322 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
323 buffer[i] = aux_sw_data_val;
324 ++i;
325 }
326
327 return i;
328 }
329
330 return 0;
331}
332
333static void process_channel_reply(
334 struct aux_engine *engine,
335 struct aux_reply_transaction_data *reply)
336{
337 int bytes_replied;
338 uint8_t reply_result;
339 uint32_t sw_status;
340
341 bytes_replied = read_channel_reply(engine, reply->length, reply->data,
342 &reply_result, &sw_status);
343 EVENT_LOG_AUX_REP(engine->base.ddc->pin_data->en,
344 EVENT_LOG_AUX_ORIGIN_NATIVE, reply_result,
345 bytes_replied, reply->data);
346
347 /* in case HPD is LOW, exit AUX transaction */
348 if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
349 reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
350 return;
351 }
352
353 if (bytes_replied < 0) {
354 /* Need to handle an error case...
355 * Hopefully, upper layer function won't call this function if
356 * the number of bytes in the reply was 0, because there was
357 * surely an error that was asserted that should have been
358 * handled for hot plug case, this could happens
359 */
360 if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
361 reply->status = AUX_TRANSACTION_REPLY_INVALID;
362 ASSERT_CRITICAL(false);
363 return;
364 }
365 } else {
366
367 switch (reply_result) {
368 case 0: /* ACK */
369 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
370 break;
371 case 1: /* NACK */
372 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
373 break;
374 case 2: /* DEFER */
375 reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
376 break;
377 case 4: /* AUX ACK / I2C NACK */
378 reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
379 break;
380 case 8: /* AUX ACK / I2C DEFER */
381 reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
382 break;
383 default:
384 reply->status = AUX_TRANSACTION_REPLY_INVALID;
385 }
386 }
387}
388
389static enum aux_channel_operation_result get_channel_status(
390 struct aux_engine *engine,
391 uint8_t *returned_bytes)
392{
393 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
394
395 uint32_t value;
396
397 if (returned_bytes == NULL) {
398 /*caller pass NULL pointer*/
399 ASSERT_CRITICAL(false);
400 return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
401 }
402 *returned_bytes = 0;
403
404 /* poll to make sure that SW_DONE is asserted */
405 value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
406 10, aux110->timeout_period/10);
407
408 /* in case HPD is LOW, exit AUX transaction */
409 if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
410 return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
411
412 /* Note that the following bits are set in 'status.bits'
413 * during CTS 4.2.1.2 (FW 3.3.1):
414 * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
415 * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
416 *
417 * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
418 * HW debugging bit and should be ignored. */
419 if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
420 if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
421 (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
422 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
423
424 else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
425 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
426 (value &
427 AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
428 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
429 return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
430
431 *returned_bytes = get_reg_field_value(value,
432 AUX_SW_STATUS,
433 AUX_SW_REPLY_BYTE_COUNT);
434
435 if (*returned_bytes == 0)
436 return
437 AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
438 else {
439 *returned_bytes -= 1;
440 return AUX_CHANNEL_OPERATION_SUCCEEDED;
441 }
442 } else {
443 /*time_elapsed >= aux_engine->timeout_period
444 * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
445 */
446 ASSERT_CRITICAL(false);
447 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
448 }
449}
450
451static const struct aux_engine_funcs aux_engine_funcs = {
452 .destroy = destroy,
453 .acquire_engine = acquire_engine,
454 .submit_channel_request = submit_channel_request,
455 .process_channel_reply = process_channel_reply,
456 .read_channel_reply = read_channel_reply,
457 .get_channel_status = get_channel_status,
458 .is_engine_available = is_engine_available,
459};
460
461static const struct engine_funcs engine_funcs = {
462 .release_engine = release_engine,
463 .submit_request = dal_aux_engine_submit_request,
464 .get_engine_type = dal_aux_engine_get_engine_type,
465 .acquire = dal_aux_engine_acquire,
466};
467
468static void construct(
469 struct aux_engine_dce110 *engine,
470 const struct aux_engine_dce110_init_data *aux_init_data)
471{
472 dal_aux_engine_construct(&engine->base, aux_init_data->ctx);
473 engine->base.base.funcs = &engine_funcs;
474 engine->base.funcs = &aux_engine_funcs;
475
476 engine->timeout_period = aux_init_data->timeout_period;
477 engine->regs = aux_init_data->regs;
478}
479
480static void destruct(
481 struct aux_engine_dce110 *engine)
482{
483 dal_aux_engine_destruct(&engine->base);
484}
485
486struct aux_engine *dal_aux_engine_dce110_create(
487 const struct aux_engine_dce110_init_data *aux_init_data)
488{
489 struct aux_engine_dce110 *engine;
490
491 if (!aux_init_data) {
492 ASSERT_CRITICAL(false);
493 return NULL;
494 }
495
496 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
497
498 if (!engine) {
499 ASSERT_CRITICAL(false);
500 return NULL;
501 }
502
503 construct(engine, aux_init_data);
504 return &engine->base;
505}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
deleted file mode 100644
index 85ee82162590..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_DCE110_H__
27#define __DAL_AUX_ENGINE_DCE110_H__
28
29#include "../aux_engine.h"
30
31#define AUX_COMMON_REG_LIST(id)\
32 SRI(AUX_CONTROL, DP_AUX, id), \
33 SRI(AUX_ARB_CONTROL, DP_AUX, id), \
34 SRI(AUX_SW_DATA, DP_AUX, id), \
35 SRI(AUX_SW_CONTROL, DP_AUX, id), \
36 SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
37 SRI(AUX_SW_STATUS, DP_AUX, id), \
38 SR(AUXN_IMPCAL), \
39 SR(AUXP_IMPCAL)
40
41struct dce110_aux_registers {
42 uint32_t AUX_CONTROL;
43 uint32_t AUX_ARB_CONTROL;
44 uint32_t AUX_SW_DATA;
45 uint32_t AUX_SW_CONTROL;
46 uint32_t AUX_INTERRUPT_CONTROL;
47 uint32_t AUX_SW_STATUS;
48 uint32_t AUXN_IMPCAL;
49 uint32_t AUXP_IMPCAL;
50
51 uint32_t AUX_RESET_MASK;
52};
53
54struct aux_engine_dce110 {
55 struct aux_engine base;
56 const struct dce110_aux_registers *regs;
57 struct {
58 uint32_t aux_control;
59 uint32_t aux_arb_control;
60 uint32_t aux_sw_data;
61 uint32_t aux_sw_control;
62 uint32_t aux_interrupt_control;
63 uint32_t aux_sw_status;
64 } addr;
65 uint32_t timeout_period;
66};
67
68struct aux_engine_dce110_init_data {
69 uint32_t engine_id;
70 uint32_t timeout_period;
71 struct dc_context *ctx;
72 const struct dce110_aux_registers *regs;
73};
74
75struct aux_engine *dal_aux_engine_dce110_create(
76 const struct aux_engine_dce110_init_data *aux_init_data);
77
78#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
deleted file mode 100644
index 9cbe1a7a6bcb..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ /dev/null
@@ -1,574 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "include/logger_interface.h"
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31
32#include "include/i2caux_interface.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_hw_engine.h"
36#include "../i2c_generic_hw_engine.h"
37/*
38 * Header of this unit
39 */
40
41#include "i2c_hw_engine_dce110.h"
42
43/*
44 * Post-requisites: headers required by this unit
45 */
46#include "reg_helper.h"
47
48/*
49 * This unit
50 */
51#define DC_LOGGER \
52 hw_engine->base.base.base.ctx->logger
53
54enum dc_i2c_status {
55 DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
56 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
57 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
58};
59
60enum dc_i2c_arbitration {
61 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
62 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
63};
64
65
66
67/*
68 * @brief
69 * Cast pointer to 'struct i2c_hw_engine *'
70 * to pointer 'struct i2c_hw_engine_dce110 *'
71 */
72#define FROM_I2C_HW_ENGINE(ptr) \
73 container_of((ptr), struct i2c_hw_engine_dce110, base)
74/*
75 * @brief
76 * Cast pointer to 'struct i2c_engine *'
77 * to pointer to 'struct i2c_hw_engine_dce110 *'
78 */
79#define FROM_I2C_ENGINE(ptr) \
80 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
81
82/*
83 * @brief
84 * Cast pointer to 'struct engine *'
85 * to 'pointer to struct i2c_hw_engine_dce110 *'
86 */
87#define FROM_ENGINE(ptr) \
88 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
89
90#define CTX \
91 hw_engine->base.base.base.ctx
92
93#define REG(reg_name)\
94 (hw_engine->regs->reg_name)
95
96#undef FN
97#define FN(reg_name, field_name) \
98 hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name
99
100#include "reg_helper.h"
101
102static void disable_i2c_hw_engine(
103 struct i2c_hw_engine_dce110 *hw_engine)
104{
105 REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0);
106}
107
108static void release_engine(
109 struct engine *engine)
110{
111 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
112
113 struct i2c_engine *base = NULL;
114 bool safe_to_reset;
115
116 base = &hw_engine->base.base;
117
118 /* Restore original HW engine speed */
119
120 base->funcs->set_speed(base, hw_engine->base.original_speed);
121
122 /* Release I2C */
123 REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
124
125 /* Reset HW engine */
126 {
127 uint32_t i2c_sw_status = 0;
128 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
129 /* if used by SW, safe to reset */
130 safe_to_reset = (i2c_sw_status == 1);
131 }
132
133 if (safe_to_reset)
134 REG_UPDATE_2(
135 DC_I2C_CONTROL,
136 DC_I2C_SOFT_RESET, 1,
137 DC_I2C_SW_STATUS_RESET, 1);
138 else
139 REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
140
141 /* HW I2c engine - clock gating feature */
142 if (!hw_engine->engine_keep_power_up_count)
143 disable_i2c_hw_engine(hw_engine);
144}
145
146static bool setup_engine(
147 struct i2c_engine *i2c_engine)
148{
149 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
150 uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
151 uint32_t reset_length = 0;
152
153 if (hw_engine->base.base.setup_limit != 0)
154 i2c_setup_limit = hw_engine->base.base.setup_limit;
155
156 /* Program pin select */
157 REG_UPDATE_6(
158 DC_I2C_CONTROL,
159 DC_I2C_GO, 0,
160 DC_I2C_SOFT_RESET, 0,
161 DC_I2C_SEND_RESET, 0,
162 DC_I2C_SW_STATUS_RESET, 1,
163 DC_I2C_TRANSACTION_COUNT, 0,
164 DC_I2C_DDC_SELECT, hw_engine->engine_id);
165
166 /* Program time limit */
167 if (hw_engine->base.base.send_reset_length == 0) {
168 /*pre-dcn*/
169 REG_UPDATE_N(
170 SETUP, 2,
171 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
172 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
173 } else {
174 reset_length = hw_engine->base.base.send_reset_length;
175 }
176 /* Program HW priority
177 * set to High - interrupt software I2C at any time
178 * Enable restart of SW I2C that was interrupted by HW
179 * disable queuing of software while I2C is in use by HW */
180 REG_UPDATE_2(
181 DC_I2C_ARBITRATION,
182 DC_I2C_NO_QUEUED_SW_GO, 0,
183 DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
184
185 return true;
186}
187
188static uint32_t get_speed(
189 const struct i2c_engine *i2c_engine)
190{
191 const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
192 uint32_t pre_scale = 0;
193
194 REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
195
196 /* [anaumov] it seems following is unnecessary */
197 /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
198 return pre_scale ?
199 hw_engine->reference_frequency / pre_scale :
200 hw_engine->base.default_speed;
201}
202
203static void set_speed(
204 struct i2c_engine *i2c_engine,
205 uint32_t speed)
206{
207 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
208
209 if (speed) {
210 if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
211 REG_UPDATE_N(
212 SPEED, 3,
213 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
214 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
215 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
216 else
217 REG_UPDATE_N(
218 SPEED, 2,
219 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
220 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
221 }
222}
223
224static inline void reset_hw_engine(struct engine *engine)
225{
226 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
227
228 REG_UPDATE_2(
229 DC_I2C_CONTROL,
230 DC_I2C_SW_STATUS_RESET, 1,
231 DC_I2C_SW_STATUS_RESET, 1);
232}
233
234static bool is_hw_busy(struct engine *engine)
235{
236 struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
237 uint32_t i2c_sw_status = 0;
238
239 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
240 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
241 return false;
242
243 reset_hw_engine(engine);
244
245 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
246 return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
247}
248
249
250#define STOP_TRANS_PREDICAT \
251 ((hw_engine->transaction_count == 3) || \
252 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \
253 (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ))
254
255#define SET_I2C_TRANSACTION(id) \
256 do { \
257 REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
258 FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
259 FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
260 FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
261 FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \
262 FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
263 if (STOP_TRANS_PREDICAT) \
264 last_transaction = true; \
265 } while (false)
266
267
268static bool process_transaction(
269 struct i2c_hw_engine_dce110 *hw_engine,
270 struct i2c_request_transaction_data *request)
271{
272 uint32_t length = request->length;
273 uint8_t *buffer = request->data;
274 uint32_t value = 0;
275
276 bool last_transaction = false;
277
278 struct dc_context *ctx = NULL;
279
280 ctx = hw_engine->base.base.base.ctx;
281
282
283
284 switch (hw_engine->transaction_count) {
285 case 0:
286 SET_I2C_TRANSACTION(0);
287 break;
288 case 1:
289 SET_I2C_TRANSACTION(1);
290 break;
291 case 2:
292 SET_I2C_TRANSACTION(2);
293 break;
294 case 3:
295 SET_I2C_TRANSACTION(3);
296 break;
297 default:
298 /* TODO Warning ? */
299 break;
300 }
301
302
303 /* Write the I2C address and I2C data
304 * into the hardware circular buffer, one byte per entry.
305 * As an example, the 7-bit I2C slave address for CRT monitor
306 * for reading DDC/EDID information is 0b1010001.
307 * For an I2C send operation, the LSB must be programmed to 0;
308 * for I2C receive operation, the LSB must be programmed to 1. */
309 if (hw_engine->transaction_count == 0) {
310 value = REG_SET_4(DC_I2C_DATA, 0,
311 DC_I2C_DATA_RW, false,
312 DC_I2C_DATA, request->address,
313 DC_I2C_INDEX, 0,
314 DC_I2C_INDEX_WRITE, 1);
315 hw_engine->buffer_used_write = 0;
316 } else
317 value = REG_SET_2(DC_I2C_DATA, 0,
318 DC_I2C_DATA_RW, false,
319 DC_I2C_DATA, request->address);
320
321 hw_engine->buffer_used_write++;
322
323 if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
324 while (length) {
325 REG_SET_2(DC_I2C_DATA, value,
326 DC_I2C_INDEX_WRITE, 0,
327 DC_I2C_DATA, *buffer++);
328 hw_engine->buffer_used_write++;
329 --length;
330 }
331 }
332
333 ++hw_engine->transaction_count;
334 hw_engine->buffer_used_bytes += length + 1;
335
336 return last_transaction;
337}
338
339static void execute_transaction(
340 struct i2c_hw_engine_dce110 *hw_engine)
341{
342 REG_UPDATE_N(SETUP, 5,
343 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
344 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
345 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
346 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
347 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
348
349
350 REG_UPDATE_5(DC_I2C_CONTROL,
351 DC_I2C_SOFT_RESET, 0,
352 DC_I2C_SW_STATUS_RESET, 0,
353 DC_I2C_SEND_RESET, 0,
354 DC_I2C_GO, 0,
355 DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1);
356
357 /* start I2C transfer */
358 REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
359
360 /* all transactions were executed and HW buffer became empty
361 * (even though it actually happens when status becomes DONE) */
362 hw_engine->transaction_count = 0;
363 hw_engine->buffer_used_bytes = 0;
364}
365
366static void submit_channel_request(
367 struct i2c_engine *engine,
368 struct i2c_request_transaction_data *request)
369{
370 request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
371
372 if (!process_transaction(FROM_I2C_ENGINE(engine), request))
373 return;
374
375 if (is_hw_busy(&engine->base)) {
376 request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
377 return;
378 }
379
380 execute_transaction(FROM_I2C_ENGINE(engine));
381}
382
383static void process_channel_reply(
384 struct i2c_engine *engine,
385 struct i2c_reply_transaction_data *reply)
386{
387 uint32_t length = reply->length;
388 uint8_t *buffer = reply->data;
389
390 struct i2c_hw_engine_dce110 *hw_engine =
391 FROM_I2C_ENGINE(engine);
392
393
394 REG_SET_3(DC_I2C_DATA, 0,
395 DC_I2C_INDEX, hw_engine->buffer_used_write,
396 DC_I2C_DATA_RW, 1,
397 DC_I2C_INDEX_WRITE, 1);
398
399 while (length) {
400 /* after reading the status,
401 * if the I2C operation executed successfully
402 * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
403 * should read data bytes from I2C circular data buffer */
404
405 uint32_t i2c_data;
406
407 REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
408 *buffer++ = i2c_data;
409
410 --length;
411 }
412}
413
414static enum i2c_channel_operation_result get_channel_status(
415 struct i2c_engine *i2c_engine,
416 uint8_t *returned_bytes)
417{
418 uint32_t i2c_sw_status = 0;
419 struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
420 uint32_t value =
421 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
422
423 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
424 return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
425 else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK)
426 return I2C_CHANNEL_OPERATION_NO_RESPONSE;
427 else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT)
428 return I2C_CHANNEL_OPERATION_TIMEOUT;
429 else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED)
430 return I2C_CHANNEL_OPERATION_FAILED;
431 else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE)
432 return I2C_CHANNEL_OPERATION_SUCCEEDED;
433
434 /*
435 * this is the case when HW used for communication, I2C_SW_STATUS
436 * could be zero
437 */
438 return I2C_CHANNEL_OPERATION_SUCCEEDED;
439}
440
441static uint32_t get_hw_buffer_available_size(
442 const struct i2c_hw_engine *engine)
443{
444 return I2C_HW_BUFFER_SIZE -
445 FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
446}
447
448static uint32_t get_transaction_timeout(
449 const struct i2c_hw_engine *engine,
450 uint32_t length)
451{
452 uint32_t speed = engine->base.funcs->get_speed(&engine->base);
453
454 uint32_t period_timeout;
455 uint32_t num_of_clock_stretches;
456
457 if (!speed)
458 return 0;
459
460 period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
461
462 num_of_clock_stretches = 1 + (length << 3) + 1;
463 num_of_clock_stretches +=
464 (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
465 (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
466
467 return period_timeout * num_of_clock_stretches;
468}
469
470static void destroy(
471 struct i2c_engine **i2c_engine)
472{
473 struct i2c_hw_engine_dce110 *engine_dce110 =
474 FROM_I2C_ENGINE(*i2c_engine);
475
476 dal_i2c_hw_engine_destruct(&engine_dce110->base);
477
478 kfree(engine_dce110);
479
480 *i2c_engine = NULL;
481}
482
483static const struct i2c_engine_funcs i2c_engine_funcs = {
484 .destroy = destroy,
485 .get_speed = get_speed,
486 .set_speed = set_speed,
487 .setup_engine = setup_engine,
488 .submit_channel_request = submit_channel_request,
489 .process_channel_reply = process_channel_reply,
490 .get_channel_status = get_channel_status,
491 .acquire_engine = dal_i2c_hw_engine_acquire_engine,
492};
493
494static const struct engine_funcs engine_funcs = {
495 .release_engine = release_engine,
496 .get_engine_type = dal_i2c_hw_engine_get_engine_type,
497 .acquire = dal_i2c_engine_acquire,
498 .submit_request = dal_i2c_hw_engine_submit_request,
499};
500
501static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
502 .get_hw_buffer_available_size = get_hw_buffer_available_size,
503 .get_transaction_timeout = get_transaction_timeout,
504 .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result,
505};
506
507static void construct(
508 struct i2c_hw_engine_dce110 *hw_engine,
509 const struct i2c_hw_engine_dce110_create_arg *arg)
510{
511 uint32_t xtal_ref_div = 0;
512
513 dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx);
514
515 hw_engine->base.base.base.funcs = &engine_funcs;
516 hw_engine->base.base.funcs = &i2c_engine_funcs;
517 hw_engine->base.funcs = &i2c_hw_engine_funcs;
518 hw_engine->base.default_speed = arg->default_speed;
519
520 hw_engine->regs = arg->regs;
521 hw_engine->i2c_shift = arg->i2c_shift;
522 hw_engine->i2c_mask = arg->i2c_mask;
523
524 hw_engine->engine_id = arg->engine_id;
525
526 hw_engine->buffer_used_bytes = 0;
527 hw_engine->transaction_count = 0;
528 hw_engine->engine_keep_power_up_count = 1;
529
530
531 REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
532
533 if (xtal_ref_div == 0) {
534 DC_LOG_WARNING("Invalid base timer divider [%s]\n",
535 __func__);
536 xtal_ref_div = 2;
537 }
538
539 /*Calculating Reference Clock by divding original frequency by
540 * XTAL_REF_DIV.
541 * At upper level, uint32_t reference_frequency =
542 * dal_i2caux_get_reference_clock(as) >> 1
543 * which already divided by 2. So we need x2 to get original
544 * reference clock from ppll_info
545 */
546 hw_engine->reference_frequency =
547 (arg->reference_frequency * 2) / xtal_ref_div;
548}
549
550struct i2c_engine *dal_i2c_hw_engine_dce110_create(
551 const struct i2c_hw_engine_dce110_create_arg *arg)
552{
553 struct i2c_hw_engine_dce110 *engine_dce10;
554
555 if (!arg) {
556 ASSERT_CRITICAL(false);
557 return NULL;
558 }
559 if (!arg->reference_frequency) {
560 ASSERT_CRITICAL(false);
561 return NULL;
562 }
563
564 engine_dce10 = kzalloc(sizeof(struct i2c_hw_engine_dce110),
565 GFP_KERNEL);
566
567 if (!engine_dce10) {
568 ASSERT_CRITICAL(false);
569 return NULL;
570 }
571
572 construct(engine_dce10, arg);
573 return &engine_dce10->base.base;
574}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
deleted file mode 100644
index fea2946906ed..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__
27#define __DAL_I2C_HW_ENGINE_DCE110_H__
28
29#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
30 SRI(SETUP, DC_I2C_DDC, id),\
31 SRI(SPEED, DC_I2C_DDC, id),\
32 SR(DC_I2C_ARBITRATION),\
33 SR(DC_I2C_CONTROL),\
34 SR(DC_I2C_SW_STATUS),\
35 SR(DC_I2C_TRANSACTION0),\
36 SR(DC_I2C_TRANSACTION1),\
37 SR(DC_I2C_TRANSACTION2),\
38 SR(DC_I2C_TRANSACTION3),\
39 SR(DC_I2C_DATA),\
40 SR(MICROSECOND_TIME_BASE_DIV)
41
42#define I2C_SF(reg_name, field_name, post_fix)\
43 .field_name = reg_name ## __ ## field_name ## post_fix
44
45#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
46 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
47 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
48 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
49 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
50 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
51 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
52 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
53 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
54 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
55 I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
56 I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
57 I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
58 I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
59 I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
60 I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
61 I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
62 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
63 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
64 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
65 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
66 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
67 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
68 I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
69 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
70 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
71 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
72 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
73 I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
74 I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
75 I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
76 I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
77 I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
78 I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
79
80#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\
81 I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
82
83#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
84 I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
85 I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
86
87struct dce110_i2c_hw_engine_shift {
88 uint8_t DC_I2C_DDC1_ENABLE;
89 uint8_t DC_I2C_DDC1_TIME_LIMIT;
90 uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
91 uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
92 uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
93 uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
94 uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
95 uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
96 uint8_t DC_I2C_NO_QUEUED_SW_GO;
97 uint8_t DC_I2C_SW_PRIORITY;
98 uint8_t DC_I2C_SOFT_RESET;
99 uint8_t DC_I2C_SW_STATUS_RESET;
100 uint8_t DC_I2C_GO;
101 uint8_t DC_I2C_SEND_RESET;
102 uint8_t DC_I2C_TRANSACTION_COUNT;
103 uint8_t DC_I2C_DDC_SELECT;
104 uint8_t DC_I2C_DDC1_PRESCALE;
105 uint8_t DC_I2C_DDC1_THRESHOLD;
106 uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
107 uint8_t DC_I2C_SW_STOPPED_ON_NACK;
108 uint8_t DC_I2C_SW_TIMEOUT;
109 uint8_t DC_I2C_SW_ABORTED;
110 uint8_t DC_I2C_SW_DONE;
111 uint8_t DC_I2C_SW_STATUS;
112 uint8_t DC_I2C_STOP_ON_NACK0;
113 uint8_t DC_I2C_START0;
114 uint8_t DC_I2C_RW0;
115 uint8_t DC_I2C_STOP0;
116 uint8_t DC_I2C_COUNT0;
117 uint8_t DC_I2C_DATA_RW;
118 uint8_t DC_I2C_DATA;
119 uint8_t DC_I2C_INDEX;
120 uint8_t DC_I2C_INDEX_WRITE;
121 uint8_t XTAL_REF_DIV;
122};
123
124struct dce110_i2c_hw_engine_mask {
125 uint32_t DC_I2C_DDC1_ENABLE;
126 uint32_t DC_I2C_DDC1_TIME_LIMIT;
127 uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
128 uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
129 uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
130 uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
131 uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
132 uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
133 uint32_t DC_I2C_NO_QUEUED_SW_GO;
134 uint32_t DC_I2C_SW_PRIORITY;
135 uint32_t DC_I2C_SOFT_RESET;
136 uint32_t DC_I2C_SW_STATUS_RESET;
137 uint32_t DC_I2C_GO;
138 uint32_t DC_I2C_SEND_RESET;
139 uint32_t DC_I2C_TRANSACTION_COUNT;
140 uint32_t DC_I2C_DDC_SELECT;
141 uint32_t DC_I2C_DDC1_PRESCALE;
142 uint32_t DC_I2C_DDC1_THRESHOLD;
143 uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
144 uint32_t DC_I2C_SW_STOPPED_ON_NACK;
145 uint32_t DC_I2C_SW_TIMEOUT;
146 uint32_t DC_I2C_SW_ABORTED;
147 uint32_t DC_I2C_SW_DONE;
148 uint32_t DC_I2C_SW_STATUS;
149 uint32_t DC_I2C_STOP_ON_NACK0;
150 uint32_t DC_I2C_START0;
151 uint32_t DC_I2C_RW0;
152 uint32_t DC_I2C_STOP0;
153 uint32_t DC_I2C_COUNT0;
154 uint32_t DC_I2C_DATA_RW;
155 uint32_t DC_I2C_DATA;
156 uint32_t DC_I2C_INDEX;
157 uint32_t DC_I2C_INDEX_WRITE;
158 uint32_t XTAL_REF_DIV;
159};
160
161struct dce110_i2c_hw_engine_registers {
162 uint32_t SETUP;
163 uint32_t SPEED;
164 uint32_t DC_I2C_ARBITRATION;
165 uint32_t DC_I2C_CONTROL;
166 uint32_t DC_I2C_SW_STATUS;
167 uint32_t DC_I2C_TRANSACTION0;
168 uint32_t DC_I2C_TRANSACTION1;
169 uint32_t DC_I2C_TRANSACTION2;
170 uint32_t DC_I2C_TRANSACTION3;
171 uint32_t DC_I2C_DATA;
172 uint32_t MICROSECOND_TIME_BASE_DIV;
173};
174
175struct i2c_hw_engine_dce110 {
176 struct i2c_hw_engine base;
177 const struct dce110_i2c_hw_engine_registers *regs;
178 const struct dce110_i2c_hw_engine_shift *i2c_shift;
179 const struct dce110_i2c_hw_engine_mask *i2c_mask;
180 struct {
181 uint32_t DC_I2C_DDCX_SETUP;
182 uint32_t DC_I2C_DDCX_SPEED;
183 } addr;
184 uint32_t engine_id;
185 /* expressed in kilohertz */
186 uint32_t reference_frequency;
187 /* number of bytes currently used in HW buffer */
188 uint32_t buffer_used_bytes;
189 /* number of bytes used for write transaction in HW buffer
190 * - this will be used as the index to read from*/
191 uint32_t buffer_used_write;
192 /* number of pending transactions (before GO) */
193 uint32_t transaction_count;
194 uint32_t engine_keep_power_up_count;
195 uint32_t i2_setup_time_limit;
196};
197
198struct i2c_hw_engine_dce110_create_arg {
199 uint32_t engine_id;
200 uint32_t reference_frequency;
201 uint32_t default_speed;
202 struct dc_context *ctx;
203 const struct dce110_i2c_hw_engine_registers *regs;
204 const struct dce110_i2c_hw_engine_shift *i2c_shift;
205 const struct dce110_i2c_hw_engine_mask *i2c_mask;
206};
207
208struct i2c_engine *dal_i2c_hw_engine_dce110_create(
209 const struct i2c_hw_engine_dce110_create_arg *arg);
210
211enum {
212 I2C_SETUP_TIME_LIMIT_DCE = 255,
213 I2C_SETUP_TIME_LIMIT_DCN = 3,
214 I2C_HW_BUFFER_SIZE = 538,
215 I2C_SEND_RESET_LENGTH_9 = 9,
216 I2C_SEND_RESET_LENGTH_10 = 10,
217};
218#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
deleted file mode 100644
index 3aa7f791e523..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_sw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_sw_engine_dce110.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46/*
47 * This unit
48 */
49
50/*
51 * @brief
52 * Cast 'struct i2c_sw_engine *'
53 * to 'struct i2c_sw_engine_dce110 *'
54 */
55#define FROM_I2C_SW_ENGINE(ptr) \
56 container_of((ptr), struct i2c_sw_engine_dce110, base)
57/*
58 * @brief
59 * Cast 'struct i2c_engine *'
60 * to 'struct i2c_sw_engine_dce80 *'
61 */
62#define FROM_I2C_ENGINE(ptr) \
63 FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
64
65/*
66 * @brief
67 * Cast 'struct engine *'
68 * to 'struct i2c_sw_engine_dce80 *'
69 */
70#define FROM_ENGINE(ptr) \
71 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
72
73static void release_engine(
74 struct engine *engine)
75{
76}
77
78static void destruct(
79 struct i2c_sw_engine_dce110 *engine)
80{
81 dal_i2c_sw_engine_destruct(&engine->base);
82}
83
84static void destroy(
85 struct i2c_engine **engine)
86{
87 struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine);
88
89 destruct(sw_engine);
90
91 kfree(sw_engine);
92
93 *engine = NULL;
94}
95
96static bool acquire_engine(
97 struct i2c_engine *engine,
98 struct ddc *ddc_handle)
99{
100 return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
101}
102
103static const struct i2c_engine_funcs i2c_engine_funcs = {
104 .acquire_engine = acquire_engine,
105 .destroy = destroy,
106 .get_speed = dal_i2c_sw_engine_get_speed,
107 .set_speed = dal_i2c_sw_engine_set_speed,
108 .setup_engine = dal_i2c_engine_setup_i2c_engine,
109 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
110 .process_channel_reply = dal_i2c_engine_process_channel_reply,
111 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
112};
113
114static const struct engine_funcs engine_funcs = {
115 .release_engine = release_engine,
116 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
117 .acquire = dal_i2c_engine_acquire,
118 .submit_request = dal_i2c_sw_engine_submit_request,
119};
120
121static void construct(
122 struct i2c_sw_engine_dce110 *engine_dce110,
123 const struct i2c_sw_engine_dce110_create_arg *arg_dce110)
124{
125 struct i2c_sw_engine_create_arg arg_base;
126
127 arg_base.ctx = arg_dce110->ctx;
128 arg_base.default_speed = arg_dce110->default_speed;
129
130 dal_i2c_sw_engine_construct(&engine_dce110->base, &arg_base);
131
132 /*struct engine struct engine_funcs*/
133 engine_dce110->base.base.base.funcs = &engine_funcs;
134 /*struct i2c_engine struct i2c_engine_funcs*/
135 engine_dce110->base.base.funcs = &i2c_engine_funcs;
136 engine_dce110->base.default_speed = arg_dce110->default_speed;
137 engine_dce110->engine_id = arg_dce110->engine_id;
138}
139
140struct i2c_engine *dal_i2c_sw_engine_dce110_create(
141 const struct i2c_sw_engine_dce110_create_arg *arg)
142{
143 struct i2c_sw_engine_dce110 *engine_dce110;
144
145 if (!arg) {
146 ASSERT_CRITICAL(false);
147 return NULL;
148 }
149
150 engine_dce110 = kzalloc(sizeof(struct i2c_sw_engine_dce110),
151 GFP_KERNEL);
152
153 if (!engine_dce110) {
154 ASSERT_CRITICAL(false);
155 return NULL;
156 }
157
158 construct(engine_dce110, arg);
159 return &engine_dce110->base.base;
160}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
deleted file mode 100644
index 1d748ac1d6d6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_sw_engine.h"
36#include "../i2c_hw_engine.h"
37
38/*
39 * Header of this unit
40 */
41#include "i2caux_dce110.h"
42
43#include "i2c_sw_engine_dce110.h"
44#include "i2c_hw_engine_dce110.h"
45#include "aux_engine_dce110.h"
46#include "../../dc.h"
47#include "dc_types.h"
48
49
50/*
51 * Post-requisites: headers required by this unit
52 */
53
54/*
55 * This unit
56 */
57/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/
58#define FROM_I2C_AUX(ptr) \
59 container_of((ptr), struct i2caux_dce110, base)
60
61static void destruct(
62 struct i2caux_dce110 *i2caux_dce110)
63{
64 dal_i2caux_destruct(&i2caux_dce110->base);
65}
66
67static void destroy(
68 struct i2caux **i2c_engine)
69{
70 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine);
71
72 destruct(i2caux_dce110);
73
74 kfree(i2caux_dce110);
75
76 *i2c_engine = NULL;
77}
78
79static struct i2c_engine *acquire_i2c_hw_engine(
80 struct i2caux *i2caux,
81 struct ddc *ddc)
82{
83 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
84
85 struct i2c_engine *engine = NULL;
86 /* generic hw engine is not used for EDID read
87 * It may be needed for external i2c device, like thermal chip,
88 * TODO will be implemented when needed.
89 * check dce80 bool non_generic for generic hw engine;
90 */
91
92 if (!ddc)
93 return NULL;
94
95 if (ddc->hw_info.hw_supported) {
96 enum gpio_ddc_line line = dal_ddc_get_line(ddc);
97
98 if (line < GPIO_DDC_LINE_COUNT)
99 engine = i2caux->i2c_hw_engines[line];
100 }
101
102 if (!engine)
103 return NULL;
104
105 if (!i2caux_dce110->i2c_hw_buffer_in_use &&
106 engine->base.funcs->acquire(&engine->base, ddc)) {
107 i2caux_dce110->i2c_hw_buffer_in_use = true;
108 return engine;
109 }
110
111 return NULL;
112}
113
114static void release_engine(
115 struct i2caux *i2caux,
116 struct engine *engine)
117{
118 struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
119
120 if (engine->funcs->get_engine_type(engine) ==
121 I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
122 i2caux_dce110->i2c_hw_buffer_in_use = false;
123
124 dal_i2caux_release_engine(i2caux, engine);
125}
126
127static const enum gpio_ddc_line hw_ddc_lines[] = {
128 GPIO_DDC_LINE_DDC1,
129 GPIO_DDC_LINE_DDC2,
130 GPIO_DDC_LINE_DDC3,
131 GPIO_DDC_LINE_DDC4,
132 GPIO_DDC_LINE_DDC5,
133 GPIO_DDC_LINE_DDC6,
134};
135
136static const enum gpio_ddc_line hw_aux_lines[] = {
137 GPIO_DDC_LINE_DDC1,
138 GPIO_DDC_LINE_DDC2,
139 GPIO_DDC_LINE_DDC3,
140 GPIO_DDC_LINE_DDC4,
141 GPIO_DDC_LINE_DDC5,
142 GPIO_DDC_LINE_DDC6,
143};
144
145/* function table */
146static const struct i2caux_funcs i2caux_funcs = {
147 .destroy = destroy,
148 .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
149 .release_engine = release_engine,
150 .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
151 .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
152};
153
154#include "dce/dce_11_0_d.h"
155#include "dce/dce_11_0_sh_mask.h"
156
157/* set register offset */
158#define SR(reg_name)\
159 .reg_name = mm ## reg_name
160
161/* set register offset with instance */
162#define SRI(reg_name, block, id)\
163 .reg_name = mm ## block ## id ## _ ## reg_name
164
165#define aux_regs(id)\
166[id] = {\
167 AUX_COMMON_REG_LIST(id), \
168 .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
169}
170
171#define hw_engine_regs(id)\
172{\
173 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
174}
175
176static const struct dce110_aux_registers dce110_aux_regs[] = {
177 aux_regs(0),
178 aux_regs(1),
179 aux_regs(2),
180 aux_regs(3),
181 aux_regs(4),
182 aux_regs(5)
183};
184
185static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = {
186 hw_engine_regs(1),
187 hw_engine_regs(2),
188 hw_engine_regs(3),
189 hw_engine_regs(4),
190 hw_engine_regs(5),
191 hw_engine_regs(6)
192};
193
194static const struct dce110_i2c_hw_engine_shift i2c_shift = {
195 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
196};
197
198static const struct dce110_i2c_hw_engine_mask i2c_mask = {
199 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
200};
201
202void dal_i2caux_dce110_construct(
203 struct i2caux_dce110 *i2caux_dce110,
204 struct dc_context *ctx,
205 unsigned int num_i2caux_inst,
206 const struct dce110_aux_registers aux_regs[],
207 const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
208 const struct dce110_i2c_hw_engine_shift *i2c_shift,
209 const struct dce110_i2c_hw_engine_mask *i2c_mask)
210{
211 uint32_t i = 0;
212 uint32_t reference_frequency = 0;
213 bool use_i2c_sw_engine = false;
214 struct i2caux *base = NULL;
215 /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock
216 * does not return 48KHz, we need hard coded for 48Khz.
217 * Some BIOS setting incorrect cause this
218 * For production, we always get value from BIOS*/
219 reference_frequency =
220 dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
221
222 base = &i2caux_dce110->base;
223
224 dal_i2caux_construct(base, ctx);
225
226 i2caux_dce110->base.funcs = &i2caux_funcs;
227 i2caux_dce110->i2c_hw_buffer_in_use = false;
228 /* Create I2C engines (DDC lines per connector)
229 * different I2C/AUX usage cases, DDC, Generic GPIO, AUX.
230 */
231 do {
232 enum gpio_ddc_line line_id = hw_ddc_lines[i];
233
234 struct i2c_hw_engine_dce110_create_arg hw_arg_dce110;
235
236 if (use_i2c_sw_engine) {
237 struct i2c_sw_engine_dce110_create_arg sw_arg;
238
239 sw_arg.engine_id = i;
240 sw_arg.default_speed = base->default_i2c_sw_speed;
241 sw_arg.ctx = ctx;
242 base->i2c_sw_engines[line_id] =
243 dal_i2c_sw_engine_dce110_create(&sw_arg);
244 }
245
246 hw_arg_dce110.engine_id = i;
247 hw_arg_dce110.reference_frequency = reference_frequency;
248 hw_arg_dce110.default_speed = base->default_i2c_hw_speed;
249 hw_arg_dce110.ctx = ctx;
250 hw_arg_dce110.regs = &i2c_hw_engine_regs[i];
251 hw_arg_dce110.i2c_shift = i2c_shift;
252 hw_arg_dce110.i2c_mask = i2c_mask;
253
254 base->i2c_hw_engines[line_id] =
255 dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
256 if (base->i2c_hw_engines[line_id] != NULL) {
257 switch (ctx->dce_version) {
258 case DCN_VERSION_1_0:
259 base->i2c_hw_engines[line_id]->setup_limit =
260 I2C_SETUP_TIME_LIMIT_DCN;
261 base->i2c_hw_engines[line_id]->send_reset_length = 0;
262 break;
263 default:
264 base->i2c_hw_engines[line_id]->setup_limit =
265 I2C_SETUP_TIME_LIMIT_DCE;
266 base->i2c_hw_engines[line_id]->send_reset_length = 0;
267 break;
268 }
269 }
270 ++i;
271 } while (i < num_i2caux_inst);
272
273 /* Create AUX engines for all lines which has assisted HW AUX
274 * 'i' (loop counter) used as DDC/AUX engine_id */
275
276 i = 0;
277
278 do {
279 enum gpio_ddc_line line_id = hw_aux_lines[i];
280
281 struct aux_engine_dce110_init_data aux_init_data;
282
283 aux_init_data.engine_id = i;
284 aux_init_data.timeout_period = base->aux_timeout_period;
285 aux_init_data.ctx = ctx;
286 aux_init_data.regs = &aux_regs[i];
287
288 base->aux_engines[line_id] =
289 dal_aux_engine_dce110_create(&aux_init_data);
290
291 ++i;
292 } while (i < num_i2caux_inst);
293
294 /*TODO Generic I2C SW and HW*/
295}
296
297/*
298 * dal_i2caux_dce110_create
299 *
300 * @brief
301 * public interface to allocate memory for DCE11 I2CAUX
302 *
303 * @param
304 * struct adapter_service *as - [in]
305 * struct dc_context *ctx - [in]
306 *
307 * @return
308 * pointer to the base struct of DCE11 I2CAUX
309 */
310struct i2caux *dal_i2caux_dce110_create(
311 struct dc_context *ctx)
312{
313 struct i2caux_dce110 *i2caux_dce110 =
314 kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
315
316 if (!i2caux_dce110) {
317 ASSERT_CRITICAL(false);
318 return NULL;
319 }
320
321 dal_i2caux_dce110_construct(i2caux_dce110,
322 ctx,
323 ARRAY_SIZE(dce110_aux_regs),
324 dce110_aux_regs,
325 i2c_hw_engine_regs,
326 &i2c_shift,
327 &i2c_mask);
328 return &i2caux_dce110->base;
329}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
deleted file mode 100644
index d3d8cc58666a..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE110_H__
27#define __DAL_I2C_AUX_DCE110_H__
28
29#include "../i2caux.h"
30
31struct i2caux_dce110 {
32 struct i2caux base;
33 /* indicate the I2C HW circular buffer is in use */
34 bool i2c_hw_buffer_in_use;
35};
36
37struct dce110_aux_registers;
38struct dce110_i2c_hw_engine_registers;
39struct dce110_i2c_hw_engine_shift;
40struct dce110_i2c_hw_engine_mask;
41
42struct i2caux *dal_i2caux_dce110_create(
43 struct dc_context *ctx);
44
45void dal_i2caux_dce110_construct(
46 struct i2caux_dce110 *i2caux_dce110,
47 struct dc_context *ctx,
48 unsigned int num_i2caux_inst,
49 const struct dce110_aux_registers *aux_regs,
50 const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
51 const struct dce110_i2c_hw_engine_shift *i2c_shift,
52 const struct dce110_i2c_hw_engine_mask *i2c_mask);
53
54#endif /* __DAL_I2C_AUX_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
deleted file mode 100644
index a9db04738724..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/i2caux_dce110.h"
36#include "i2caux_dce112.h"
37
38#include "../dce110/aux_engine_dce110.h"
39
40#include "../dce110/i2c_hw_engine_dce110.h"
41
42#include "dce/dce_11_2_d.h"
43#include "dce/dce_11_2_sh_mask.h"
44
45/* set register offset */
46#define SR(reg_name)\
47 .reg_name = mm ## reg_name
48
49/* set register offset with instance */
50#define SRI(reg_name, block, id)\
51 .reg_name = mm ## block ## id ## _ ## reg_name
52
53#define aux_regs(id)\
54[id] = {\
55 AUX_COMMON_REG_LIST(id), \
56 .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
57}
58
59#define hw_engine_regs(id)\
60{\
61 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
62}
63
64static const struct dce110_aux_registers dce112_aux_regs[] = {
65 aux_regs(0),
66 aux_regs(1),
67 aux_regs(2),
68 aux_regs(3),
69 aux_regs(4),
70 aux_regs(5),
71};
72
73static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = {
74 hw_engine_regs(1),
75 hw_engine_regs(2),
76 hw_engine_regs(3),
77 hw_engine_regs(4),
78 hw_engine_regs(5),
79 hw_engine_regs(6)
80};
81
82static const struct dce110_i2c_hw_engine_shift i2c_shift = {
83 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
84};
85
86static const struct dce110_i2c_hw_engine_mask i2c_mask = {
87 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
88};
89
90static void construct(
91 struct i2caux_dce110 *i2caux_dce110,
92 struct dc_context *ctx)
93{
94 dal_i2caux_dce110_construct(i2caux_dce110,
95 ctx,
96 ARRAY_SIZE(dce112_aux_regs),
97 dce112_aux_regs,
98 dce112_hw_engine_regs,
99 &i2c_shift,
100 &i2c_mask);
101}
102
103/*
104 * dal_i2caux_dce110_create
105 *
106 * @brief
107 * public interface to allocate memory for DCE11 I2CAUX
108 *
109 * @param
110 * struct adapter_service *as - [in]
111 * struct dc_context *ctx - [in]
112 *
113 * @return
114 * pointer to the base struct of DCE11 I2CAUX
115 */
116struct i2caux *dal_i2caux_dce112_create(
117 struct dc_context *ctx)
118{
119 struct i2caux_dce110 *i2caux_dce110 =
120 kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
121
122 if (!i2caux_dce110) {
123 ASSERT_CRITICAL(false);
124 return NULL;
125 }
126
127 construct(i2caux_dce110, ctx);
128 return &i2caux_dce110->base;
129}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
deleted file mode 100644
index 8d35453c25b6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCE112_H__
27#define __DAL_I2C_AUX_DCE112_H__
28
29struct i2caux *dal_i2caux_dce112_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
deleted file mode 100644
index 6a4f344c1db4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/i2c_hw_engine_dce110.h"
36#include "../dce110/aux_engine_dce110.h"
37#include "../dce110/i2caux_dce110.h"
38
39#include "dce/dce_12_0_offset.h"
40#include "dce/dce_12_0_sh_mask.h"
41#include "soc15_hw_ip.h"
42#include "vega10_ip_offset.h"
43
44/* begin *********************
45 * macros to expend register list macro defined in HW object header file */
46
47#define BASE_INNER(seg) \
48 DCE_BASE__INST0_SEG ## seg
49
50/* compile time expand base address. */
51#define BASE(seg) \
52 BASE_INNER(seg)
53
54#define SR(reg_name)\
55 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
56 mm ## reg_name
57
58#define SRI(reg_name, block, id)\
59 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
60 mm ## block ## id ## _ ## reg_name
61/* macros to expend register list macro defined in HW object header file
62 * end *********************/
63
64#define aux_regs(id)\
65[id] = {\
66 AUX_COMMON_REG_LIST(id), \
67 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
68}
69
70static const struct dce110_aux_registers dce120_aux_regs[] = {
71 aux_regs(0),
72 aux_regs(1),
73 aux_regs(2),
74 aux_regs(3),
75 aux_regs(4),
76 aux_regs(5),
77};
78
79#define hw_engine_regs(id)\
80{\
81 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
82}
83
84static const struct dce110_i2c_hw_engine_registers dce120_hw_engine_regs[] = {
85 hw_engine_regs(1),
86 hw_engine_regs(2),
87 hw_engine_regs(3),
88 hw_engine_regs(4),
89 hw_engine_regs(5),
90 hw_engine_regs(6)
91};
92
93static const struct dce110_i2c_hw_engine_shift i2c_shift = {
94 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
95};
96
97static const struct dce110_i2c_hw_engine_mask i2c_mask = {
98 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
99};
100
101struct i2caux *dal_i2caux_dce120_create(
102 struct dc_context *ctx)
103{
104 struct i2caux_dce110 *i2caux_dce110 =
105 kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
106
107 if (!i2caux_dce110) {
108 ASSERT_CRITICAL(false);
109 return NULL;
110 }
111
112 dal_i2caux_dce110_construct(i2caux_dce110,
113 ctx,
114 ARRAY_SIZE(dce120_aux_regs),
115 dce120_aux_regs,
116 dce120_hw_engine_regs,
117 &i2c_shift,
118 &i2c_mask);
119 return &i2caux_dce110->base;
120}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
deleted file mode 100644
index fd0832dd2c75..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
+++ /dev/null
@@ -1,875 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_hw_engine.h"
35#include "../i2c_generic_hw_engine.h"
36/*
37 * Header of this unit
38 */
39
40#include "i2c_hw_engine_dce80.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46#include "dce/dce_8_0_d.h"
47#include "dce/dce_8_0_sh_mask.h"
48/*
49 * This unit
50 */
51
52enum dc_i2c_status {
53 DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
54 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
55 DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
56};
57
58enum dc_i2c_arbitration {
59 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
60 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
61};
62
63enum {
64 /* No timeout in HW
65 * (timeout implemented in SW by querying status) */
66 I2C_SETUP_TIME_LIMIT = 255,
67 I2C_HW_BUFFER_SIZE = 144
68};
69
70/*
71 * @brief
72 * Cast 'struct i2c_hw_engine *'
73 * to 'struct i2c_hw_engine_dce80 *'
74 */
75#define FROM_I2C_HW_ENGINE(ptr) \
76 container_of((ptr), struct i2c_hw_engine_dce80, base)
77
78/*
79 * @brief
80 * Cast pointer to 'struct i2c_engine *'
81 * to pointer to 'struct i2c_hw_engine_dce80 *'
82 */
83#define FROM_I2C_ENGINE(ptr) \
84 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
85
86/*
87 * @brief
88 * Cast pointer to 'struct engine *'
89 * to 'pointer to struct i2c_hw_engine_dce80 *'
90 */
91#define FROM_ENGINE(ptr) \
92 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
93
94static void disable_i2c_hw_engine(
95 struct i2c_hw_engine_dce80 *engine)
96{
97 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
98 uint32_t value = 0;
99
100 struct dc_context *ctx = NULL;
101
102 ctx = engine->base.base.base.ctx;
103
104 value = dm_read_reg(ctx, addr);
105
106 set_reg_field_value(
107 value,
108 0,
109 DC_I2C_DDC1_SETUP,
110 DC_I2C_DDC1_ENABLE);
111
112 dm_write_reg(ctx, addr, value);
113}
114
115static void release_engine(
116 struct engine *engine)
117{
118 struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine);
119
120 struct i2c_engine *base = NULL;
121 bool safe_to_reset;
122 uint32_t value = 0;
123
124 base = &hw_engine->base.base;
125
126 /* Restore original HW engine speed */
127
128 base->funcs->set_speed(base, hw_engine->base.original_speed);
129
130 /* Release I2C */
131 {
132 value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION);
133
134 set_reg_field_value(
135 value,
136 1,
137 DC_I2C_ARBITRATION,
138 DC_I2C_SW_DONE_USING_I2C_REG);
139
140 dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value);
141 }
142
143 /* Reset HW engine */
144 {
145 uint32_t i2c_sw_status = 0;
146
147 value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
148
149 i2c_sw_status = get_reg_field_value(
150 value,
151 DC_I2C_SW_STATUS,
152 DC_I2C_SW_STATUS);
153 /* if used by SW, safe to reset */
154 safe_to_reset = (i2c_sw_status == 1);
155 }
156 {
157 value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
158
159 if (safe_to_reset)
160 set_reg_field_value(
161 value,
162 1,
163 DC_I2C_CONTROL,
164 DC_I2C_SOFT_RESET);
165
166 set_reg_field_value(
167 value,
168 1,
169 DC_I2C_CONTROL,
170 DC_I2C_SW_STATUS_RESET);
171
172 dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
173 }
174
175 /* HW I2c engine - clock gating feature */
176 if (!hw_engine->engine_keep_power_up_count)
177 disable_i2c_hw_engine(hw_engine);
178}
179
180static void destruct(
181 struct i2c_hw_engine_dce80 *engine)
182{
183 dal_i2c_hw_engine_destruct(&engine->base);
184}
185
186static void destroy(
187 struct i2c_engine **i2c_engine)
188{
189 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine);
190
191 destruct(engine);
192
193 kfree(engine);
194
195 *i2c_engine = NULL;
196}
197
198static bool setup_engine(
199 struct i2c_engine *i2c_engine)
200{
201 uint32_t value = 0;
202 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
203
204 /* Program pin select */
205 {
206 const uint32_t addr = mmDC_I2C_CONTROL;
207
208 value = dm_read_reg(i2c_engine->base.ctx, addr);
209
210 set_reg_field_value(
211 value,
212 0,
213 DC_I2C_CONTROL,
214 DC_I2C_GO);
215
216 set_reg_field_value(
217 value,
218 0,
219 DC_I2C_CONTROL,
220 DC_I2C_SOFT_RESET);
221
222 set_reg_field_value(
223 value,
224 0,
225 DC_I2C_CONTROL,
226 DC_I2C_SEND_RESET);
227
228 set_reg_field_value(
229 value,
230 0,
231 DC_I2C_CONTROL,
232 DC_I2C_SW_STATUS_RESET);
233
234 set_reg_field_value(
235 value,
236 0,
237 DC_I2C_CONTROL,
238 DC_I2C_TRANSACTION_COUNT);
239
240 set_reg_field_value(
241 value,
242 engine->engine_id,
243 DC_I2C_CONTROL,
244 DC_I2C_DDC_SELECT);
245
246 dm_write_reg(i2c_engine->base.ctx, addr, value);
247 }
248
249 /* Program time limit */
250 {
251 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
252
253 value = dm_read_reg(i2c_engine->base.ctx, addr);
254
255 set_reg_field_value(
256 value,
257 I2C_SETUP_TIME_LIMIT,
258 DC_I2C_DDC1_SETUP,
259 DC_I2C_DDC1_TIME_LIMIT);
260
261 set_reg_field_value(
262 value,
263 1,
264 DC_I2C_DDC1_SETUP,
265 DC_I2C_DDC1_ENABLE);
266
267 dm_write_reg(i2c_engine->base.ctx, addr, value);
268 }
269
270 /* Program HW priority
271 * set to High - interrupt software I2C at any time
272 * Enable restart of SW I2C that was interrupted by HW
273 * disable queuing of software while I2C is in use by HW */
274 {
275 value = dm_read_reg(i2c_engine->base.ctx,
276 mmDC_I2C_ARBITRATION);
277
278 set_reg_field_value(
279 value,
280 0,
281 DC_I2C_ARBITRATION,
282 DC_I2C_NO_QUEUED_SW_GO);
283
284 set_reg_field_value(
285 value,
286 DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
287 DC_I2C_ARBITRATION,
288 DC_I2C_SW_PRIORITY);
289
290 dm_write_reg(i2c_engine->base.ctx,
291 mmDC_I2C_ARBITRATION, value);
292 }
293
294 return true;
295}
296
297static uint32_t get_speed(
298 const struct i2c_engine *i2c_engine)
299{
300 const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
301
302 const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
303
304 uint32_t pre_scale = 0;
305
306 uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
307
308 pre_scale = get_reg_field_value(
309 value,
310 DC_I2C_DDC1_SPEED,
311 DC_I2C_DDC1_PRESCALE);
312
313 /* [anaumov] it seems following is unnecessary */
314 /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
315
316 return pre_scale ?
317 engine->reference_frequency / pre_scale :
318 engine->base.default_speed;
319}
320
321static void set_speed(
322 struct i2c_engine *i2c_engine,
323 uint32_t speed)
324{
325 struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
326
327 if (speed) {
328 const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
329
330 uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
331
332 set_reg_field_value(
333 value,
334 engine->reference_frequency / speed,
335 DC_I2C_DDC1_SPEED,
336 DC_I2C_DDC1_PRESCALE);
337
338 set_reg_field_value(
339 value,
340 2,
341 DC_I2C_DDC1_SPEED,
342 DC_I2C_DDC1_THRESHOLD);
343
344 dm_write_reg(i2c_engine->base.ctx, addr, value);
345 }
346}
347
348static inline void reset_hw_engine(struct engine *engine)
349{
350 uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
351
352 set_reg_field_value(
353 value,
354 1,
355 DC_I2C_CONTROL,
356 DC_I2C_SOFT_RESET);
357
358 set_reg_field_value(
359 value,
360 1,
361 DC_I2C_CONTROL,
362 DC_I2C_SW_STATUS_RESET);
363
364 dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
365}
366
367static bool is_hw_busy(struct engine *engine)
368{
369 uint32_t i2c_sw_status = 0;
370
371 uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
372
373 i2c_sw_status = get_reg_field_value(
374 value,
375 DC_I2C_SW_STATUS,
376 DC_I2C_SW_STATUS);
377
378 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
379 return false;
380
381 reset_hw_engine(engine);
382
383 value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
384
385 i2c_sw_status = get_reg_field_value(
386 value,
387 DC_I2C_SW_STATUS,
388 DC_I2C_SW_STATUS);
389
390 return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
391}
392
393/*
394 * @brief
395 * DC_GPIO_DDC MM register offsets
396 */
397static const uint32_t transaction_addr[] = {
398 mmDC_I2C_TRANSACTION0,
399 mmDC_I2C_TRANSACTION1,
400 mmDC_I2C_TRANSACTION2,
401 mmDC_I2C_TRANSACTION3
402};
403
404static bool process_transaction(
405 struct i2c_hw_engine_dce80 *engine,
406 struct i2c_request_transaction_data *request)
407{
408 uint32_t length = request->length;
409 uint8_t *buffer = request->data;
410
411 bool last_transaction = false;
412 uint32_t value = 0;
413
414 struct dc_context *ctx = NULL;
415
416 ctx = engine->base.base.base.ctx;
417
418 {
419 const uint32_t addr =
420 transaction_addr[engine->transaction_count];
421
422 value = dm_read_reg(ctx, addr);
423
424 set_reg_field_value(
425 value,
426 1,
427 DC_I2C_TRANSACTION0,
428 DC_I2C_STOP_ON_NACK0);
429
430 set_reg_field_value(
431 value,
432 1,
433 DC_I2C_TRANSACTION0,
434 DC_I2C_START0);
435
436 if ((engine->transaction_count == 3) ||
437 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
438 (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
439
440 set_reg_field_value(
441 value,
442 1,
443 DC_I2C_TRANSACTION0,
444 DC_I2C_STOP0);
445
446 last_transaction = true;
447 } else
448 set_reg_field_value(
449 value,
450 0,
451 DC_I2C_TRANSACTION0,
452 DC_I2C_STOP0);
453
454 set_reg_field_value(
455 value,
456 (0 != (request->action &
457 I2CAUX_TRANSACTION_ACTION_I2C_READ)),
458 DC_I2C_TRANSACTION0,
459 DC_I2C_RW0);
460
461 set_reg_field_value(
462 value,
463 length,
464 DC_I2C_TRANSACTION0,
465 DC_I2C_COUNT0);
466
467 dm_write_reg(ctx, addr, value);
468 }
469
470 /* Write the I2C address and I2C data
471 * into the hardware circular buffer, one byte per entry.
472 * As an example, the 7-bit I2C slave address for CRT monitor
473 * for reading DDC/EDID information is 0b1010001.
474 * For an I2C send operation, the LSB must be programmed to 0;
475 * for I2C receive operation, the LSB must be programmed to 1. */
476
477 {
478 value = 0;
479
480 set_reg_field_value(
481 value,
482 false,
483 DC_I2C_DATA,
484 DC_I2C_DATA_RW);
485
486 set_reg_field_value(
487 value,
488 request->address,
489 DC_I2C_DATA,
490 DC_I2C_DATA);
491
492 if (engine->transaction_count == 0) {
493 set_reg_field_value(
494 value,
495 0,
496 DC_I2C_DATA,
497 DC_I2C_INDEX);
498
499 /*enable index write*/
500 set_reg_field_value(
501 value,
502 1,
503 DC_I2C_DATA,
504 DC_I2C_INDEX_WRITE);
505 }
506
507 dm_write_reg(ctx, mmDC_I2C_DATA, value);
508
509 if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
510
511 set_reg_field_value(
512 value,
513 0,
514 DC_I2C_DATA,
515 DC_I2C_INDEX_WRITE);
516
517 while (length) {
518
519 set_reg_field_value(
520 value,
521 *buffer++,
522 DC_I2C_DATA,
523 DC_I2C_DATA);
524
525 dm_write_reg(ctx, mmDC_I2C_DATA, value);
526 --length;
527 }
528 }
529 }
530
531 ++engine->transaction_count;
532 engine->buffer_used_bytes += length + 1;
533
534 return last_transaction;
535}
536
537static void execute_transaction(
538 struct i2c_hw_engine_dce80 *engine)
539{
540 uint32_t value = 0;
541 struct dc_context *ctx = NULL;
542
543 ctx = engine->base.base.base.ctx;
544
545 {
546 const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
547
548 value = dm_read_reg(ctx, addr);
549
550 set_reg_field_value(
551 value,
552 0,
553 DC_I2C_DDC1_SETUP,
554 DC_I2C_DDC1_DATA_DRIVE_EN);
555
556 set_reg_field_value(
557 value,
558 0,
559 DC_I2C_DDC1_SETUP,
560 DC_I2C_DDC1_CLK_DRIVE_EN);
561
562 set_reg_field_value(
563 value,
564 0,
565 DC_I2C_DDC1_SETUP,
566 DC_I2C_DDC1_DATA_DRIVE_SEL);
567
568 set_reg_field_value(
569 value,
570 0,
571 DC_I2C_DDC1_SETUP,
572 DC_I2C_DDC1_INTRA_TRANSACTION_DELAY);
573
574 set_reg_field_value(
575 value,
576 0,
577 DC_I2C_DDC1_SETUP,
578 DC_I2C_DDC1_INTRA_BYTE_DELAY);
579
580 dm_write_reg(ctx, addr, value);
581 }
582
583 {
584 const uint32_t addr = mmDC_I2C_CONTROL;
585
586 value = dm_read_reg(ctx, addr);
587
588 set_reg_field_value(
589 value,
590 0,
591 DC_I2C_CONTROL,
592 DC_I2C_SOFT_RESET);
593
594 set_reg_field_value(
595 value,
596 0,
597 DC_I2C_CONTROL,
598 DC_I2C_SW_STATUS_RESET);
599
600 set_reg_field_value(
601 value,
602 0,
603 DC_I2C_CONTROL,
604 DC_I2C_SEND_RESET);
605
606 set_reg_field_value(
607 value,
608 0,
609 DC_I2C_CONTROL,
610 DC_I2C_GO);
611
612 set_reg_field_value(
613 value,
614 engine->transaction_count - 1,
615 DC_I2C_CONTROL,
616 DC_I2C_TRANSACTION_COUNT);
617
618 dm_write_reg(ctx, addr, value);
619 }
620
621 /* start I2C transfer */
622 {
623 const uint32_t addr = mmDC_I2C_CONTROL;
624
625 value = dm_read_reg(ctx, addr);
626
627 set_reg_field_value(
628 value,
629 1,
630 DC_I2C_CONTROL,
631 DC_I2C_GO);
632
633 dm_write_reg(ctx, addr, value);
634 }
635
636 /* all transactions were executed and HW buffer became empty
637 * (even though it actually happens when status becomes DONE) */
638 engine->transaction_count = 0;
639 engine->buffer_used_bytes = 0;
640}
641
642static void submit_channel_request(
643 struct i2c_engine *engine,
644 struct i2c_request_transaction_data *request)
645{
646 request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
647
648 if (!process_transaction(FROM_I2C_ENGINE(engine), request))
649 return;
650
651 if (is_hw_busy(&engine->base)) {
652 request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
653 return;
654 }
655
656 execute_transaction(FROM_I2C_ENGINE(engine));
657}
658
659static void process_channel_reply(
660 struct i2c_engine *engine,
661 struct i2c_reply_transaction_data *reply)
662{
663 uint32_t length = reply->length;
664 uint8_t *buffer = reply->data;
665
666 uint32_t value = 0;
667
668 /*set index*/
669 set_reg_field_value(
670 value,
671 length - 1,
672 DC_I2C_DATA,
673 DC_I2C_INDEX);
674
675 set_reg_field_value(
676 value,
677 1,
678 DC_I2C_DATA,
679 DC_I2C_DATA_RW);
680
681 set_reg_field_value(
682 value,
683 1,
684 DC_I2C_DATA,
685 DC_I2C_INDEX_WRITE);
686
687 dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value);
688
689 while (length) {
690 /* after reading the status,
691 * if the I2C operation executed successfully
692 * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
693 * should read data bytes from I2C circular data buffer */
694
695 value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA);
696
697 *buffer++ = get_reg_field_value(
698 value,
699 DC_I2C_DATA,
700 DC_I2C_DATA);
701
702 --length;
703 }
704}
705
706static enum i2c_channel_operation_result get_channel_status(
707 struct i2c_engine *engine,
708 uint8_t *returned_bytes)
709{
710 uint32_t i2c_sw_status = 0;
711 uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS);
712
713 i2c_sw_status = get_reg_field_value(
714 value,
715 DC_I2C_SW_STATUS,
716 DC_I2C_SW_STATUS);
717
718 if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
719 return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
720 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK)
721 return I2C_CHANNEL_OPERATION_NO_RESPONSE;
722 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK)
723 return I2C_CHANNEL_OPERATION_TIMEOUT;
724 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK)
725 return I2C_CHANNEL_OPERATION_FAILED;
726 else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK)
727 return I2C_CHANNEL_OPERATION_SUCCEEDED;
728
729 /*
730 * this is the case when HW used for communication, I2C_SW_STATUS
731 * could be zero
732 */
733 return I2C_CHANNEL_OPERATION_SUCCEEDED;
734}
735
736static uint32_t get_hw_buffer_available_size(
737 const struct i2c_hw_engine *engine)
738{
739 return I2C_HW_BUFFER_SIZE -
740 FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
741}
742
743static uint32_t get_transaction_timeout(
744 const struct i2c_hw_engine *engine,
745 uint32_t length)
746{
747 uint32_t speed = engine->base.funcs->get_speed(&engine->base);
748
749 uint32_t period_timeout;
750 uint32_t num_of_clock_stretches;
751
752 if (!speed)
753 return 0;
754
755 period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
756
757 num_of_clock_stretches = 1 + (length << 3) + 1;
758 num_of_clock_stretches +=
759 (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
760 (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
761
762 return period_timeout * num_of_clock_stretches;
763}
764
765/*
766 * @brief
767 * DC_I2C_DDC1_SETUP MM register offsets
768 *
769 * @note
770 * The indices of this offset array are DDC engine IDs
771 */
772static const int32_t ddc_setup_offset[] = {
773
774 mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */
775 mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */
776 mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */
777 mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */
778 mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */
779 mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */
780 mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */
781};
782
783/*
784 * @brief
785 * DC_I2C_DDC1_SPEED MM register offsets
786 *
787 * @note
788 * The indices of this offset array are DDC engine IDs
789 */
790static const int32_t ddc_speed_offset[] = {
791 mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */
792 mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */
793 mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */
794 mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */
795 mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */
796 mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */
797 mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */
798};
799
800static const struct i2c_engine_funcs i2c_engine_funcs = {
801 .destroy = destroy,
802 .get_speed = get_speed,
803 .set_speed = set_speed,
804 .setup_engine = setup_engine,
805 .submit_channel_request = submit_channel_request,
806 .process_channel_reply = process_channel_reply,
807 .get_channel_status = get_channel_status,
808 .acquire_engine = dal_i2c_hw_engine_acquire_engine,
809};
810
811static const struct engine_funcs engine_funcs = {
812 .release_engine = release_engine,
813 .get_engine_type = dal_i2c_hw_engine_get_engine_type,
814 .acquire = dal_i2c_engine_acquire,
815 .submit_request = dal_i2c_hw_engine_submit_request,
816};
817
818static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
819 .get_hw_buffer_available_size =
820 get_hw_buffer_available_size,
821 .get_transaction_timeout =
822 get_transaction_timeout,
823 .wait_on_operation_result =
824 dal_i2c_hw_engine_wait_on_operation_result,
825};
826
827static void construct(
828 struct i2c_hw_engine_dce80 *engine,
829 const struct i2c_hw_engine_dce80_create_arg *arg)
830{
831 dal_i2c_hw_engine_construct(&engine->base, arg->ctx);
832
833 engine->base.base.base.funcs = &engine_funcs;
834 engine->base.base.funcs = &i2c_engine_funcs;
835 engine->base.funcs = &i2c_hw_engine_funcs;
836 engine->base.default_speed = arg->default_speed;
837 engine->addr.DC_I2C_DDCX_SETUP =
838 mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id];
839 engine->addr.DC_I2C_DDCX_SPEED =
840 mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id];
841
842 engine->engine_id = arg->engine_id;
843 engine->reference_frequency = arg->reference_frequency;
844 engine->buffer_used_bytes = 0;
845 engine->transaction_count = 0;
846 engine->engine_keep_power_up_count = 1;
847}
848
849struct i2c_engine *dal_i2c_hw_engine_dce80_create(
850 const struct i2c_hw_engine_dce80_create_arg *arg)
851{
852 struct i2c_hw_engine_dce80 *engine;
853
854 if (!arg) {
855 BREAK_TO_DEBUGGER();
856 return NULL;
857 }
858
859 if ((arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t)) ||
860 (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t)) ||
861 !arg->reference_frequency) {
862 BREAK_TO_DEBUGGER();
863 return NULL;
864 }
865
866 engine = kzalloc(sizeof(struct i2c_hw_engine_dce80), GFP_KERNEL);
867
868 if (!engine) {
869 BREAK_TO_DEBUGGER();
870 return NULL;
871 }
872
873 construct(engine, arg);
874 return &engine->base.base;
875}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
deleted file mode 100644
index 5c6116fb5479..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__
27#define __DAL_I2C_HW_ENGINE_DCE80_H__
28
29struct i2c_hw_engine_dce80 {
30 struct i2c_hw_engine base;
31 struct {
32 uint32_t DC_I2C_DDCX_SETUP;
33 uint32_t DC_I2C_DDCX_SPEED;
34 } addr;
35 uint32_t engine_id;
36 /* expressed in kilohertz */
37 uint32_t reference_frequency;
38 /* number of bytes currently used in HW buffer */
39 uint32_t buffer_used_bytes;
40 /* number of pending transactions (before GO) */
41 uint32_t transaction_count;
42 uint32_t engine_keep_power_up_count;
43};
44
45struct i2c_hw_engine_dce80_create_arg {
46 uint32_t engine_id;
47 uint32_t reference_frequency;
48 uint32_t default_speed;
49 struct dc_context *ctx;
50};
51
52struct i2c_engine *dal_i2c_hw_engine_dce80_create(
53 const struct i2c_hw_engine_dce80_create_arg *arg);
54#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
deleted file mode 100644
index 4853ee26096a..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../engine.h"
33#include "../i2c_engine.h"
34#include "../i2c_sw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_sw_engine_dce80.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46#include "dce/dce_8_0_d.h"
47#include "dce/dce_8_0_sh_mask.h"
48
49/*
50 * This unit
51 */
52
53static const uint32_t ddc_hw_status_addr[] = {
54 mmDC_I2C_DDC1_HW_STATUS,
55 mmDC_I2C_DDC2_HW_STATUS,
56 mmDC_I2C_DDC3_HW_STATUS,
57 mmDC_I2C_DDC4_HW_STATUS,
58 mmDC_I2C_DDC5_HW_STATUS,
59 mmDC_I2C_DDC6_HW_STATUS,
60 mmDC_I2C_DDCVGA_HW_STATUS
61};
62
63/*
64 * @brief
65 * Cast 'struct i2c_sw_engine *'
66 * to 'struct i2c_sw_engine_dce80 *'
67 */
68#define FROM_I2C_SW_ENGINE(ptr) \
69 container_of((ptr), struct i2c_sw_engine_dce80, base)
70
71/*
72 * @brief
73 * Cast 'struct i2c_engine *'
74 * to 'struct i2c_sw_engine_dce80 *'
75 */
76#define FROM_I2C_ENGINE(ptr) \
77 FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
78
79/*
80 * @brief
81 * Cast 'struct engine *'
82 * to 'struct i2c_sw_engine_dce80 *'
83 */
84#define FROM_ENGINE(ptr) \
85 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
86
87static void release_engine(
88 struct engine *engine)
89{
90
91}
92
93static void destruct(
94 struct i2c_sw_engine_dce80 *engine)
95{
96 dal_i2c_sw_engine_destruct(&engine->base);
97}
98
99static void destroy(
100 struct i2c_engine **engine)
101{
102 struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine);
103
104 destruct(sw_engine);
105
106 kfree(sw_engine);
107
108 *engine = NULL;
109}
110
111static bool acquire_engine(
112 struct i2c_engine *engine,
113 struct ddc *ddc_handle)
114{
115 return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
116}
117
118static const struct i2c_engine_funcs i2c_engine_funcs = {
119 .acquire_engine = acquire_engine,
120 .destroy = destroy,
121 .get_speed = dal_i2c_sw_engine_get_speed,
122 .set_speed = dal_i2c_sw_engine_set_speed,
123 .setup_engine = dal_i2c_engine_setup_i2c_engine,
124 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
125 .process_channel_reply = dal_i2c_engine_process_channel_reply,
126 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
127};
128
129static const struct engine_funcs engine_funcs = {
130 .release_engine = release_engine,
131 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
132 .acquire = dal_i2c_engine_acquire,
133 .submit_request = dal_i2c_sw_engine_submit_request,
134};
135
136static void construct(
137 struct i2c_sw_engine_dce80 *engine,
138 const struct i2c_sw_engine_dce80_create_arg *arg)
139{
140 struct i2c_sw_engine_create_arg arg_base;
141
142 arg_base.ctx = arg->ctx;
143 arg_base.default_speed = arg->default_speed;
144
145 dal_i2c_sw_engine_construct(&engine->base, &arg_base);
146
147 engine->base.base.base.funcs = &engine_funcs;
148 engine->base.base.funcs = &i2c_engine_funcs;
149 engine->base.default_speed = arg->default_speed;
150 engine->engine_id = arg->engine_id;
151}
152
153struct i2c_engine *dal_i2c_sw_engine_dce80_create(
154 const struct i2c_sw_engine_dce80_create_arg *arg)
155{
156 struct i2c_sw_engine_dce80 *engine;
157
158 if (!arg) {
159 BREAK_TO_DEBUGGER();
160 return NULL;
161 }
162
163 engine = kzalloc(sizeof(struct i2c_sw_engine_dce80), GFP_KERNEL);
164
165 if (!engine) {
166 BREAK_TO_DEBUGGER();
167 return NULL;
168 }
169
170 construct(engine, arg);
171 return &engine->base.base;
172}
173
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
deleted file mode 100644
index ed48596dd2a5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2caux_dce80.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44#include "../engine.h"
45#include "../i2c_engine.h"
46#include "../i2c_sw_engine.h"
47#include "i2c_sw_engine_dce80.h"
48#include "../i2c_hw_engine.h"
49#include "i2c_hw_engine_dce80.h"
50#include "../i2c_generic_hw_engine.h"
51#include "../aux_engine.h"
52
53
54#include "../dce110/aux_engine_dce110.h"
55#include "../dce110/i2caux_dce110.h"
56
57#include "dce/dce_8_0_d.h"
58#include "dce/dce_8_0_sh_mask.h"
59
60
61/* set register offset */
62#define SR(reg_name)\
63 .reg_name = mm ## reg_name
64
65/* set register offset with instance */
66#define SRI(reg_name, block, id)\
67 .reg_name = mm ## block ## id ## _ ## reg_name
68
69#define aux_regs(id)\
70[id] = {\
71 AUX_COMMON_REG_LIST(id), \
72 .AUX_RESET_MASK = 0 \
73}
74
75static const struct dce110_aux_registers dce80_aux_regs[] = {
76 aux_regs(0),
77 aux_regs(1),
78 aux_regs(2),
79 aux_regs(3),
80 aux_regs(4),
81 aux_regs(5)
82};
83
84/*
85 * This unit
86 */
87
88#define FROM_I2C_AUX(ptr) \
89 container_of((ptr), struct i2caux_dce80, base)
90
91static void destruct(
92 struct i2caux_dce80 *i2caux_dce80)
93{
94 dal_i2caux_destruct(&i2caux_dce80->base);
95}
96
97static void destroy(
98 struct i2caux **i2c_engine)
99{
100 struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine);
101
102 destruct(i2caux_dce80);
103
104 kfree(i2caux_dce80);
105
106 *i2c_engine = NULL;
107}
108
109static struct i2c_engine *acquire_i2c_hw_engine(
110 struct i2caux *i2caux,
111 struct ddc *ddc)
112{
113 struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux);
114
115 struct i2c_engine *engine = NULL;
116 bool non_generic;
117
118 if (!ddc)
119 return NULL;
120
121 if (ddc->hw_info.hw_supported) {
122 enum gpio_ddc_line line = dal_ddc_get_line(ddc);
123
124 if (line < GPIO_DDC_LINE_COUNT) {
125 non_generic = true;
126 engine = i2caux->i2c_hw_engines[line];
127 }
128 }
129
130 if (!engine) {
131 non_generic = false;
132 engine = i2caux->i2c_generic_hw_engine;
133 }
134
135 if (!engine)
136 return NULL;
137
138 if (non_generic) {
139 if (!i2caux_dce80->i2c_hw_buffer_in_use &&
140 engine->base.funcs->acquire(&engine->base, ddc)) {
141 i2caux_dce80->i2c_hw_buffer_in_use = true;
142 return engine;
143 }
144 } else {
145 if (engine->base.funcs->acquire(&engine->base, ddc))
146 return engine;
147 }
148
149 return NULL;
150}
151
152static void release_engine(
153 struct i2caux *i2caux,
154 struct engine *engine)
155{
156 if (engine->funcs->get_engine_type(engine) ==
157 I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
158 FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false;
159
160 dal_i2caux_release_engine(i2caux, engine);
161}
162
163static const enum gpio_ddc_line hw_ddc_lines[] = {
164 GPIO_DDC_LINE_DDC1,
165 GPIO_DDC_LINE_DDC2,
166 GPIO_DDC_LINE_DDC3,
167 GPIO_DDC_LINE_DDC4,
168 GPIO_DDC_LINE_DDC5,
169 GPIO_DDC_LINE_DDC6,
170 GPIO_DDC_LINE_DDC_VGA
171};
172
173static const enum gpio_ddc_line hw_aux_lines[] = {
174 GPIO_DDC_LINE_DDC1,
175 GPIO_DDC_LINE_DDC2,
176 GPIO_DDC_LINE_DDC3,
177 GPIO_DDC_LINE_DDC4,
178 GPIO_DDC_LINE_DDC5,
179 GPIO_DDC_LINE_DDC6
180};
181
182static const struct i2caux_funcs i2caux_funcs = {
183 .destroy = destroy,
184 .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
185 .release_engine = release_engine,
186 .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
187 .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
188};
189
190static void construct(
191 struct i2caux_dce80 *i2caux_dce80,
192 struct dc_context *ctx)
193{
194 /* Entire family have I2C engine reference clock frequency
195 * changed from XTALIN (27) to XTALIN/2 (13.5) */
196
197 struct i2caux *base = &i2caux_dce80->base;
198
199 uint32_t reference_frequency =
200 dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
201
202 /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as,
203 FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/
204
205 /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */
206 bool use_i2c_sw_engine = true;
207
208 uint32_t i;
209
210 dal_i2caux_construct(base, ctx);
211
212 i2caux_dce80->base.funcs = &i2caux_funcs;
213 i2caux_dce80->i2c_hw_buffer_in_use = false;
214
215 /* Create I2C HW engines (HW + SW pairs)
216 * for all lines which has assisted HW DDC
217 * 'i' (loop counter) used as DDC/AUX engine_id */
218
219 i = 0;
220
221 do {
222 enum gpio_ddc_line line_id = hw_ddc_lines[i];
223
224 struct i2c_hw_engine_dce80_create_arg hw_arg;
225
226 if (use_i2c_sw_engine) {
227 struct i2c_sw_engine_dce80_create_arg sw_arg;
228
229 sw_arg.engine_id = i;
230 sw_arg.default_speed = base->default_i2c_sw_speed;
231 sw_arg.ctx = ctx;
232 base->i2c_sw_engines[line_id] =
233 dal_i2c_sw_engine_dce80_create(&sw_arg);
234 }
235
236 hw_arg.engine_id = i;
237 hw_arg.reference_frequency = reference_frequency;
238 hw_arg.default_speed = base->default_i2c_hw_speed;
239 hw_arg.ctx = ctx;
240
241 base->i2c_hw_engines[line_id] =
242 dal_i2c_hw_engine_dce80_create(&hw_arg);
243
244 ++i;
245 } while (i < ARRAY_SIZE(hw_ddc_lines));
246
247 /* Create AUX engines for all lines which has assisted HW AUX
248 * 'i' (loop counter) used as DDC/AUX engine_id */
249
250 i = 0;
251
252 do {
253 enum gpio_ddc_line line_id = hw_aux_lines[i];
254
255 struct aux_engine_dce110_init_data arg;
256
257 arg.engine_id = i;
258 arg.timeout_period = base->aux_timeout_period;
259 arg.ctx = ctx;
260 arg.regs = &dce80_aux_regs[i];
261
262 base->aux_engines[line_id] =
263 dal_aux_engine_dce110_create(&arg);
264
265 ++i;
266 } while (i < ARRAY_SIZE(hw_aux_lines));
267
268 /* TODO Generic I2C SW and HW */
269}
270
271struct i2caux *dal_i2caux_dce80_create(
272 struct dc_context *ctx)
273{
274 struct i2caux_dce80 *i2caux_dce80 =
275 kzalloc(sizeof(struct i2caux_dce80), GFP_KERNEL);
276
277 if (!i2caux_dce80) {
278 BREAK_TO_DEBUGGER();
279 return NULL;
280 }
281
282 construct(i2caux_dce80, ctx);
283 return &i2caux_dce80->base;
284}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
deleted file mode 100644
index a59c1f50c1e8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "include/i2caux_interface.h"
29#include "../i2caux.h"
30#include "../engine.h"
31#include "../i2c_engine.h"
32#include "../i2c_sw_engine.h"
33#include "../i2c_hw_engine.h"
34
35#include "../dce110/aux_engine_dce110.h"
36#include "../dce110/i2c_hw_engine_dce110.h"
37#include "../dce110/i2caux_dce110.h"
38
39#include "dcn/dcn_1_0_offset.h"
40#include "dcn/dcn_1_0_sh_mask.h"
41#include "soc15_hw_ip.h"
42#include "vega10_ip_offset.h"
43
44/* begin *********************
45 * macros to expend register list macro defined in HW object header file */
46
47#define BASE_INNER(seg) \
48 DCE_BASE__INST0_SEG ## seg
49
50/* compile time expand base address. */
51#define BASE(seg) \
52 BASE_INNER(seg)
53
54#define SR(reg_name)\
55 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
56 mm ## reg_name
57
58#define SRI(reg_name, block, id)\
59 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
60 mm ## block ## id ## _ ## reg_name
61/* macros to expend register list macro defined in HW object header file
62 * end *********************/
63
64#define aux_regs(id)\
65[id] = {\
66 AUX_COMMON_REG_LIST(id), \
67 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
68}
69
70#define hw_engine_regs(id)\
71{\
72 I2C_HW_ENGINE_COMMON_REG_LIST(id) \
73}
74
75static const struct dce110_aux_registers dcn10_aux_regs[] = {
76 aux_regs(0),
77 aux_regs(1),
78 aux_regs(2),
79 aux_regs(3),
80 aux_regs(4),
81 aux_regs(5),
82};
83
84static const struct dce110_i2c_hw_engine_registers dcn10_hw_engine_regs[] = {
85 hw_engine_regs(1),
86 hw_engine_regs(2),
87 hw_engine_regs(3),
88 hw_engine_regs(4),
89 hw_engine_regs(5),
90 hw_engine_regs(6)
91};
92
93static const struct dce110_i2c_hw_engine_shift i2c_shift = {
94 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
95};
96
97static const struct dce110_i2c_hw_engine_mask i2c_mask = {
98 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
99};
100
101struct i2caux *dal_i2caux_dcn10_create(
102 struct dc_context *ctx)
103{
104 struct i2caux_dce110 *i2caux_dce110 =
105 kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
106
107 if (!i2caux_dce110) {
108 ASSERT_CRITICAL(false);
109 return NULL;
110 }
111
112 dal_i2caux_dce110_construct(i2caux_dce110,
113 ctx,
114 ARRAY_SIZE(dcn10_aux_regs),
115 dcn10_aux_regs,
116 dcn10_hw_engine_regs,
117 &i2c_shift,
118 &i2c_mask);
119 return &i2caux_dce110->base;
120}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
deleted file mode 100644
index aeb4a86463d4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DCN10_H__
27#define __DAL_I2C_AUX_DCN10_H__
28
29struct i2caux *dal_i2caux_dcn10_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
deleted file mode 100644
index e6408f644086..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "../i2caux.h"
33#include "../engine.h"
34#include "../i2c_engine.h"
35#include "../i2c_sw_engine.h"
36#include "../i2c_hw_engine.h"
37
38/*
39 * Header of this unit
40 */
41#include "i2caux_diag.h"
42
43/*
44 * Post-requisites: headers required by this unit
45 */
46
47/*
48 * This unit
49 */
50
51static void destruct(
52 struct i2caux *i2caux)
53{
54 dal_i2caux_destruct(i2caux);
55}
56
57static void destroy(
58 struct i2caux **i2c_engine)
59{
60 destruct(*i2c_engine);
61
62 kfree(*i2c_engine);
63
64 *i2c_engine = NULL;
65}
66
67/* function table */
68static const struct i2caux_funcs i2caux_funcs = {
69 .destroy = destroy,
70 .acquire_i2c_hw_engine = NULL,
71 .release_engine = NULL,
72 .acquire_i2c_sw_engine = NULL,
73 .acquire_aux_engine = NULL,
74};
75
76static void construct(
77 struct i2caux *i2caux,
78 struct dc_context *ctx)
79{
80 dal_i2caux_construct(i2caux, ctx);
81 i2caux->funcs = &i2caux_funcs;
82}
83
84struct i2caux *dal_i2caux_diag_fpga_create(
85 struct dc_context *ctx)
86{
87 struct i2caux *i2caux = kzalloc(sizeof(struct i2caux),
88 GFP_KERNEL);
89
90 if (!i2caux) {
91 ASSERT_CRITICAL(false);
92 return NULL;
93 }
94
95 construct(i2caux, ctx);
96 return i2caux;
97}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
deleted file mode 100644
index a83eeb748283..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__
27#define __DAL_I2C_AUX_DIAG_FPGA_H__
28
29struct i2caux *dal_i2caux_diag_fpga_create(
30 struct dc_context *ctx);
31
32#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
deleted file mode 100644
index b16fb1ff687d..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ENGINE_H__
27#define __DAL_ENGINE_H__
28
29#include "dc_ddc_types.h"
30
31enum i2caux_transaction_operation {
32 I2CAUX_TRANSACTION_READ,
33 I2CAUX_TRANSACTION_WRITE
34};
35
36enum i2caux_transaction_address_space {
37 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
38 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
39};
40
41struct i2caux_transaction_payload {
42 enum i2caux_transaction_address_space address_space;
43 uint32_t address;
44 uint32_t length;
45 uint8_t *data;
46};
47
48enum i2caux_transaction_status {
49 I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
50 I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
51 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
52 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
53 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
54 I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
55 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
56 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
57 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
58 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
59 I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
60};
61
62struct i2caux_transaction_request {
63 enum i2caux_transaction_operation operation;
64 struct i2caux_transaction_payload payload;
65 enum i2caux_transaction_status status;
66};
67
68enum i2caux_engine_type {
69 I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
70 I2CAUX_ENGINE_TYPE_AUX,
71 I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
72 I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
73 I2CAUX_ENGINE_TYPE_I2C_SW
74};
75
76enum i2c_default_speed {
77 I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
78 I2CAUX_DEFAULT_I2C_SW_SPEED = 50
79};
80
81struct engine;
82
83struct engine_funcs {
84 enum i2caux_engine_type (*get_engine_type)(
85 const struct engine *engine);
86 bool (*acquire)(
87 struct engine *engine,
88 struct ddc *ddc);
89 bool (*submit_request)(
90 struct engine *engine,
91 struct i2caux_transaction_request *request,
92 bool middle_of_transaction);
93 void (*release_engine)(
94 struct engine *engine);
95};
96
97struct engine {
98 const struct engine_funcs *funcs;
99 uint32_t inst;
100 struct ddc *ddc;
101 struct dc_context *ctx;
102};
103
104void dal_i2caux_construct_engine(
105 struct engine *engine,
106 struct dc_context *ctx);
107
108void dal_i2caux_destruct_engine(
109 struct engine *engine);
110
111#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
deleted file mode 100644
index 70e20bd47ce4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2c_engine.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44/*
45 * This unit
46 */
47
48#define FROM_ENGINE(ptr) \
49 container_of((ptr), struct i2c_engine, base)
50
51bool dal_i2c_engine_acquire(
52 struct engine *engine,
53 struct ddc *ddc_handle)
54{
55 struct i2c_engine *i2c_engine = FROM_ENGINE(engine);
56
57 uint32_t counter = 0;
58 bool result;
59
60 do {
61 result = i2c_engine->funcs->acquire_engine(
62 i2c_engine, ddc_handle);
63
64 if (result)
65 break;
66
67 /* i2c_engine is busy by VBios, lets wait and retry */
68
69 udelay(10);
70
71 ++counter;
72 } while (counter < 2);
73
74 if (result) {
75 if (!i2c_engine->funcs->setup_engine(i2c_engine)) {
76 engine->funcs->release_engine(engine);
77 result = false;
78 }
79 }
80
81 return result;
82}
83
84bool dal_i2c_engine_setup_i2c_engine(
85 struct i2c_engine *engine)
86{
87 /* Derivative classes do not have to override this */
88
89 return true;
90}
91
92void dal_i2c_engine_submit_channel_request(
93 struct i2c_engine *engine,
94 struct i2c_request_transaction_data *request)
95{
96
97}
98
99void dal_i2c_engine_process_channel_reply(
100 struct i2c_engine *engine,
101 struct i2c_reply_transaction_data *reply)
102{
103
104}
105
106void dal_i2c_engine_construct(
107 struct i2c_engine *engine,
108 struct dc_context *ctx)
109{
110 dal_i2caux_construct_engine(&engine->base, ctx);
111 engine->timeout_delay = 0;
112}
113
114void dal_i2c_engine_destruct(
115 struct i2c_engine *engine)
116{
117 dal_i2caux_destruct_engine(&engine->base);
118}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
deleted file mode 100644
index ded6ea34b714..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_ENGINE_H__
27#define __DAL_I2C_ENGINE_H__
28
29enum i2c_channel_operation_result {
30 I2C_CHANNEL_OPERATION_SUCCEEDED,
31 I2C_CHANNEL_OPERATION_FAILED,
32 I2C_CHANNEL_OPERATION_NOT_GRANTED,
33 I2C_CHANNEL_OPERATION_IS_BUSY,
34 I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
35 I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
36 I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
37 I2C_CHANNEL_OPERATION_ENGINE_BUSY,
38 I2C_CHANNEL_OPERATION_TIMEOUT,
39 I2C_CHANNEL_OPERATION_NO_RESPONSE,
40 I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
41 I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
42 I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
43 I2C_CHANNEL_OPERATION_NOT_STARTED
44};
45
46struct i2c_request_transaction_data {
47 enum i2caux_transaction_action action;
48 enum i2c_channel_operation_result status;
49 uint8_t address;
50 uint32_t length;
51 uint8_t *data;
52};
53
54struct i2c_reply_transaction_data {
55 uint32_t length;
56 uint8_t *data;
57};
58
59struct i2c_engine;
60
61struct i2c_engine_funcs {
62 void (*destroy)(
63 struct i2c_engine **ptr);
64 uint32_t (*get_speed)(
65 const struct i2c_engine *engine);
66 void (*set_speed)(
67 struct i2c_engine *engine,
68 uint32_t speed);
69 bool (*acquire_engine)(
70 struct i2c_engine *engine,
71 struct ddc *ddc);
72 bool (*setup_engine)(
73 struct i2c_engine *engine);
74 void (*submit_channel_request)(
75 struct i2c_engine *engine,
76 struct i2c_request_transaction_data *request);
77 void (*process_channel_reply)(
78 struct i2c_engine *engine,
79 struct i2c_reply_transaction_data *reply);
80 enum i2c_channel_operation_result (*get_channel_status)(
81 struct i2c_engine *engine,
82 uint8_t *returned_bytes);
83};
84
85struct i2c_engine {
86 struct engine base;
87 const struct i2c_engine_funcs *funcs;
88 uint32_t timeout_delay;
89 uint32_t setup_limit;
90 uint32_t send_reset_length;
91};
92
93void dal_i2c_engine_construct(
94 struct i2c_engine *engine,
95 struct dc_context *ctx);
96
97void dal_i2c_engine_destruct(
98 struct i2c_engine *engine);
99
100bool dal_i2c_engine_setup_i2c_engine(
101 struct i2c_engine *engine);
102
103void dal_i2c_engine_submit_channel_request(
104 struct i2c_engine *engine,
105 struct i2c_request_transaction_data *request);
106
107void dal_i2c_engine_process_channel_reply(
108 struct i2c_engine *engine,
109 struct i2c_reply_transaction_data *reply);
110
111bool dal_i2c_engine_acquire(
112 struct engine *ptr,
113 struct ddc *ddc_handle);
114
115#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
deleted file mode 100644
index 5a4295e0fae5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33#include "i2c_engine.h"
34#include "i2c_hw_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_generic_hw_engine.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46/*
47 * This unit
48 */
49
50/*
51 * @brief
52 * Cast 'struct i2c_hw_engine *'
53 * to 'struct i2c_generic_hw_engine *'
54 */
55#define FROM_I2C_HW_ENGINE(ptr) \
56 container_of((ptr), struct i2c_generic_hw_engine, base)
57
58/*
59 * @brief
60 * Cast 'struct i2c_engine *'
61 * to 'struct i2c_generic_hw_engine *'
62 */
63#define FROM_I2C_ENGINE(ptr) \
64 FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
65
66/*
67 * @brief
68 * Cast 'struct engine *'
69 * to 'struct i2c_generic_hw_engine *'
70 */
71#define FROM_ENGINE(ptr) \
72 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
73
74enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
75 const struct engine *engine)
76{
77 return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW;
78}
79
80/*
81 * @brief
82 * Single transaction handling.
83 * Since transaction may be bigger than HW buffer size,
84 * it divides transaction to sub-transactions
85 * and uses batch transaction feature of the engine.
86 */
87bool dal_i2c_generic_hw_engine_submit_request(
88 struct engine *engine,
89 struct i2caux_transaction_request *i2caux_request,
90 bool middle_of_transaction)
91{
92 struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine);
93
94 struct i2c_hw_engine *base = &hw_engine->base;
95
96 uint32_t max_payload_size =
97 base->funcs->get_hw_buffer_available_size(base);
98
99 bool initial_stop_bit = !middle_of_transaction;
100
101 struct i2c_generic_transaction_attributes attributes;
102
103 enum i2c_channel_operation_result operation_result =
104 I2C_CHANNEL_OPERATION_FAILED;
105
106 bool result = false;
107
108 /* setup transaction initial properties */
109
110 uint8_t address = i2caux_request->payload.address;
111 uint8_t *current_payload = i2caux_request->payload.data;
112 uint32_t remaining_payload_size = i2caux_request->payload.length;
113
114 bool first_iteration = true;
115
116 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
117 attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
118 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
119 attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
120 else {
121 i2caux_request->status =
122 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
123 return false;
124 }
125
126 /* Do batch transaction.
127 * Divide read/write data into payloads which fit HW buffer size.
128 * 1. Single transaction:
129 * start_bit = 1, stop_bit depends on session state, ack_on_read = 0;
130 * 2. Start of batch transaction:
131 * start_bit = 1, stop_bit = 0, ack_on_read = 1;
132 * 3. Middle of batch transaction:
133 * start_bit = 0, stop_bit = 0, ack_on_read = 1;
134 * 4. End of batch transaction:
135 * start_bit = 0, stop_bit depends on session state, ack_on_read = 0.
136 * Session stop bit is set if 'middle_of_transaction' = 0. */
137
138 while (remaining_payload_size) {
139 uint32_t current_transaction_size;
140 uint32_t current_payload_size;
141
142 bool last_iteration;
143 bool stop_bit;
144
145 /* Calculate current transaction size and payload size.
146 * Transaction size = total number of bytes in transaction,
147 * including slave's address;
148 * Payload size = number of data bytes in transaction. */
149
150 if (first_iteration) {
151 /* In the first sub-transaction we send slave's address
152 * thus we need to reserve one byte for it */
153 current_transaction_size =
154 (remaining_payload_size > max_payload_size - 1) ?
155 max_payload_size :
156 remaining_payload_size + 1;
157
158 current_payload_size = current_transaction_size - 1;
159 } else {
160 /* Second and further sub-transactions will have
161 * entire buffer reserved for data */
162 current_transaction_size =
163 (remaining_payload_size > max_payload_size) ?
164 max_payload_size :
165 remaining_payload_size;
166
167 current_payload_size = current_transaction_size;
168 }
169
170 last_iteration =
171 (remaining_payload_size == current_payload_size);
172
173 stop_bit = last_iteration ? initial_stop_bit : false;
174
175 /* write slave device address */
176
177 if (first_iteration)
178 hw_engine->funcs->write_address(hw_engine, address);
179
180 /* write current portion of data, if requested */
181
182 if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
183 hw_engine->funcs->write_data(
184 hw_engine,
185 current_payload,
186 current_payload_size);
187
188 /* execute transaction */
189
190 attributes.start_bit = first_iteration;
191 attributes.stop_bit = stop_bit;
192 attributes.last_read = last_iteration;
193 attributes.transaction_size = current_transaction_size;
194
195 hw_engine->funcs->execute_transaction(hw_engine, &attributes);
196
197 /* wait until transaction is processed; if it fails - quit */
198
199 operation_result = base->funcs->wait_on_operation_result(
200 base,
201 base->funcs->get_transaction_timeout(
202 base, current_transaction_size),
203 I2C_CHANNEL_OPERATION_ENGINE_BUSY);
204
205 if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED)
206 break;
207
208 /* read current portion of data, if requested */
209
210 /* the read offset should be 1 for first sub-transaction,
211 * and 0 for any next one */
212
213 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
214 hw_engine->funcs->read_data(hw_engine, current_payload,
215 current_payload_size, first_iteration ? 1 : 0);
216
217 /* update loop variables */
218
219 first_iteration = false;
220 current_payload += current_payload_size;
221 remaining_payload_size -= current_payload_size;
222 }
223
224 /* update transaction status */
225
226 switch (operation_result) {
227 case I2C_CHANNEL_OPERATION_SUCCEEDED:
228 i2caux_request->status =
229 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
230 result = true;
231 break;
232 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
233 i2caux_request->status =
234 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
235 break;
236 case I2C_CHANNEL_OPERATION_TIMEOUT:
237 i2caux_request->status =
238 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
239 break;
240 case I2C_CHANNEL_OPERATION_FAILED:
241 i2caux_request->status =
242 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
243 break;
244 default:
245 i2caux_request->status =
246 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
247 }
248
249 return result;
250}
251
252/*
253 * @brief
254 * Returns number of microseconds to wait until timeout to be considered
255 */
256uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
257 const struct i2c_hw_engine *engine,
258 uint32_t length)
259{
260 const struct i2c_engine *base = &engine->base;
261
262 uint32_t speed = base->funcs->get_speed(base);
263
264 if (!speed)
265 return 0;
266
267 /* total timeout = period_timeout * (start + data bits count + stop) */
268
269 return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) *
270 (1 + (length << 3) + 1);
271}
272
273void dal_i2c_generic_hw_engine_construct(
274 struct i2c_generic_hw_engine *engine,
275 struct dc_context *ctx)
276{
277 dal_i2c_hw_engine_construct(&engine->base, ctx);
278}
279
280void dal_i2c_generic_hw_engine_destruct(
281 struct i2c_generic_hw_engine *engine)
282{
283 dal_i2c_hw_engine_destruct(&engine->base);
284}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
deleted file mode 100644
index 1da0397b04a2..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__
27#define __DAL_I2C_GENERIC_HW_ENGINE_H__
28
29struct i2c_generic_transaction_attributes {
30 enum i2caux_transaction_action action;
31 uint32_t transaction_size;
32 bool start_bit;
33 bool stop_bit;
34 bool last_read;
35};
36
37struct i2c_generic_hw_engine;
38
39struct i2c_generic_hw_engine_funcs {
40 void (*write_address)(
41 struct i2c_generic_hw_engine *engine,
42 uint8_t address);
43 void (*write_data)(
44 struct i2c_generic_hw_engine *engine,
45 const uint8_t *buffer,
46 uint32_t length);
47 void (*read_data)(
48 struct i2c_generic_hw_engine *engine,
49 uint8_t *buffer,
50 uint32_t length,
51 uint32_t offset);
52 void (*execute_transaction)(
53 struct i2c_generic_hw_engine *engine,
54 struct i2c_generic_transaction_attributes *attributes);
55};
56
57struct i2c_generic_hw_engine {
58 struct i2c_hw_engine base;
59 const struct i2c_generic_hw_engine_funcs *funcs;
60};
61
62void dal_i2c_generic_hw_engine_construct(
63 struct i2c_generic_hw_engine *engine,
64 struct dc_context *ctx);
65
66void dal_i2c_generic_hw_engine_destruct(
67 struct i2c_generic_hw_engine *engine);
68enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
69 const struct engine *engine);
70bool dal_i2c_generic_hw_engine_submit_request(
71 struct engine *ptr,
72 struct i2caux_transaction_request *i2caux_request,
73 bool middle_of_transaction);
74uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
75 const struct i2c_hw_engine *engine,
76 uint32_t length);
77#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
deleted file mode 100644
index 141898533e8e..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+++ /dev/null
@@ -1,251 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dm_event_log.h"
28
29/*
30 * Pre-requisites: headers required by header of this unit
31 */
32#include "include/i2caux_interface.h"
33#include "engine.h"
34#include "i2c_engine.h"
35
36/*
37 * Header of this unit
38 */
39
40#include "i2c_hw_engine.h"
41
42/*
43 * Post-requisites: headers required by this unit
44 */
45
46/*
47 * This unit
48 */
49
50/*
51 * @brief
52 * Cast 'struct i2c_engine *'
53 * to 'struct i2c_hw_engine *'
54 */
55#define FROM_I2C_ENGINE(ptr) \
56 container_of((ptr), struct i2c_hw_engine, base)
57
58/*
59 * @brief
60 * Cast 'struct engine *'
61 * to 'struct i2c_hw_engine *'
62 */
63#define FROM_ENGINE(ptr) \
64 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
65
66enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
67 const struct engine *engine)
68{
69 return I2CAUX_ENGINE_TYPE_I2C_DDC_HW;
70}
71
72bool dal_i2c_hw_engine_submit_request(
73 struct engine *engine,
74 struct i2caux_transaction_request *i2caux_request,
75 bool middle_of_transaction)
76{
77 struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine);
78
79 struct i2c_request_transaction_data request;
80
81 uint32_t transaction_timeout;
82
83 enum i2c_channel_operation_result operation_result;
84
85 bool result = false;
86
87 /* We need following:
88 * transaction length will not exceed
89 * the number of free bytes in HW buffer (minus one for address)*/
90
91 if (i2caux_request->payload.length >=
92 hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) {
93 i2caux_request->status =
94 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
95 return false;
96 }
97
98 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
99 request.action = middle_of_transaction ?
100 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
101 I2CAUX_TRANSACTION_ACTION_I2C_READ;
102 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
103 request.action = middle_of_transaction ?
104 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
105 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
106 else {
107 i2caux_request->status =
108 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
109 /* [anaumov] in DAL2, there was no "return false" */
110 return false;
111 }
112
113 request.address = (uint8_t)i2caux_request->payload.address;
114 request.length = i2caux_request->payload.length;
115 request.data = i2caux_request->payload.data;
116
117 /* obtain timeout value before submitting request */
118
119 transaction_timeout = hw_engine->funcs->get_transaction_timeout(
120 hw_engine, i2caux_request->payload.length + 1);
121
122 hw_engine->base.funcs->submit_channel_request(
123 &hw_engine->base, &request);
124 /* EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */
125 /* request.action, request.address, request.length, request.data); */
126
127 if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
128 (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
129 i2caux_request->status =
130 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
131 return false;
132 }
133
134 /* wait until transaction proceed */
135
136 operation_result = hw_engine->funcs->wait_on_operation_result(
137 hw_engine,
138 transaction_timeout,
139 I2C_CHANNEL_OPERATION_ENGINE_BUSY);
140
141 /* update transaction status */
142
143 switch (operation_result) {
144 case I2C_CHANNEL_OPERATION_SUCCEEDED:
145 i2caux_request->status =
146 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
147 result = true;
148 break;
149 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
150 i2caux_request->status =
151 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
152 break;
153 case I2C_CHANNEL_OPERATION_TIMEOUT:
154 i2caux_request->status =
155 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
156 break;
157 case I2C_CHANNEL_OPERATION_FAILED:
158 i2caux_request->status =
159 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
160 break;
161 default:
162 i2caux_request->status =
163 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
164 }
165
166 if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) {
167 struct i2c_reply_transaction_data reply;
168
169 reply.data = i2caux_request->payload.data;
170 reply.length = i2caux_request->payload.length;
171
172 hw_engine->base.funcs->
173 process_channel_reply(&hw_engine->base, &reply);
174 /* EVENT_LOG_AUX_REP(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */
175 /* AUX_TRANSACTION_REPLY_I2C_ACK, reply.length, reply.data); */
176 }
177
178
179
180 return result;
181}
182
183bool dal_i2c_hw_engine_acquire_engine(
184 struct i2c_engine *engine,
185 struct ddc *ddc)
186{
187 enum gpio_result result;
188 uint32_t current_speed;
189
190 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
191 GPIO_DDC_CONFIG_TYPE_MODE_I2C);
192
193 if (result != GPIO_RESULT_OK)
194 return false;
195
196 engine->base.ddc = ddc;
197
198 current_speed = engine->funcs->get_speed(engine);
199
200 if (current_speed)
201 FROM_I2C_ENGINE(engine)->original_speed = current_speed;
202
203 return true;
204}
205/*
206 * @brief
207 * Queries in a loop for current engine status
208 * until retrieved status matches 'expected_result', or timeout occurs.
209 * Timeout given in microseconds
210 * and the status query frequency is also one per microsecond.
211 */
212enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
213 struct i2c_hw_engine *engine,
214 uint32_t timeout,
215 enum i2c_channel_operation_result expected_result)
216{
217 enum i2c_channel_operation_result result;
218 uint32_t i = 0;
219
220 if (!timeout)
221 return I2C_CHANNEL_OPERATION_SUCCEEDED;
222
223 do {
224 result = engine->base.funcs->get_channel_status(
225 &engine->base, NULL);
226
227 if (result != expected_result)
228 break;
229
230 udelay(1);
231
232 ++i;
233 } while (i < timeout);
234
235 return result;
236}
237
238void dal_i2c_hw_engine_construct(
239 struct i2c_hw_engine *engine,
240 struct dc_context *ctx)
241{
242 dal_i2c_engine_construct(&engine->base, ctx);
243 engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
244 engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
245}
246
247void dal_i2c_hw_engine_destruct(
248 struct i2c_hw_engine *engine)
249{
250 dal_i2c_engine_destruct(&engine->base);
251}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
deleted file mode 100644
index 8936a994804a..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_HW_ENGINE_H__
27#define __DAL_I2C_HW_ENGINE_H__
28
29enum {
30 TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32
31};
32
33struct i2c_hw_engine;
34
35struct i2c_hw_engine_funcs {
36 uint32_t (*get_hw_buffer_available_size)(
37 const struct i2c_hw_engine *engine);
38 enum i2c_channel_operation_result (*wait_on_operation_result)(
39 struct i2c_hw_engine *engine,
40 uint32_t timeout,
41 enum i2c_channel_operation_result expected_result);
42 uint32_t (*get_transaction_timeout)(
43 const struct i2c_hw_engine *engine,
44 uint32_t length);
45};
46
47struct i2c_hw_engine {
48 struct i2c_engine base;
49 const struct i2c_hw_engine_funcs *funcs;
50
51 /* Values below are in kilohertz */
52 uint32_t original_speed;
53 uint32_t default_speed;
54};
55
56void dal_i2c_hw_engine_construct(
57 struct i2c_hw_engine *engine,
58 struct dc_context *ctx);
59
60void dal_i2c_hw_engine_destruct(
61 struct i2c_hw_engine *engine);
62
63enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
64 struct i2c_hw_engine *engine,
65 uint32_t timeout,
66 enum i2c_channel_operation_result expected_result);
67
68bool dal_i2c_hw_engine_acquire_engine(
69 struct i2c_engine *engine,
70 struct ddc *ddc);
71
72bool dal_i2c_hw_engine_submit_request(
73 struct engine *ptr,
74 struct i2caux_transaction_request *i2caux_request,
75 bool middle_of_transaction);
76
77enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
78 const struct engine *engine);
79
80#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
deleted file mode 100644
index 8e19bb629394..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
+++ /dev/null
@@ -1,601 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "engine.h"
33#include "i2c_engine.h"
34
35/*
36 * Header of this unit
37 */
38
39#include "i2c_sw_engine.h"
40
41/*
42 * Post-requisites: headers required by this unit
43 */
44
45/*
46 * This unit
47 */
48
49#define SCL false
50#define SDA true
51
52static inline bool read_bit_from_ddc(
53 struct ddc *ddc,
54 bool data_nor_clock)
55{
56 uint32_t value = 0;
57
58 if (data_nor_clock)
59 dal_gpio_get_value(ddc->pin_data, &value);
60 else
61 dal_gpio_get_value(ddc->pin_clock, &value);
62
63 return (value != 0);
64}
65
66static inline void write_bit_to_ddc(
67 struct ddc *ddc,
68 bool data_nor_clock,
69 bool bit)
70{
71 uint32_t value = bit ? 1 : 0;
72
73 if (data_nor_clock)
74 dal_gpio_set_value(ddc->pin_data, value);
75 else
76 dal_gpio_set_value(ddc->pin_clock, value);
77}
78
79static bool wait_for_scl_high(
80 struct dc_context *ctx,
81 struct ddc *ddc,
82 uint16_t clock_delay_div_4)
83{
84 uint32_t scl_retry = 0;
85 uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
86
87 udelay(clock_delay_div_4);
88
89 /* 3 milliseconds delay
90 * to wake up some displays from "low power" state.
91 */
92
93 do {
94 if (read_bit_from_ddc(ddc, SCL))
95 return true;
96
97 udelay(clock_delay_div_4);
98
99 ++scl_retry;
100 } while (scl_retry <= scl_retry_max);
101
102 return false;
103}
104
105static bool start_sync(
106 struct dc_context *ctx,
107 struct ddc *ddc_handle,
108 uint16_t clock_delay_div_4)
109{
110 uint32_t retry = 0;
111
112 /* The I2C communications start signal is:
113 * the SDA going low from high, while the SCL is high. */
114
115 write_bit_to_ddc(ddc_handle, SCL, true);
116
117 udelay(clock_delay_div_4);
118
119 do {
120 write_bit_to_ddc(ddc_handle, SDA, true);
121
122 if (!read_bit_from_ddc(ddc_handle, SDA)) {
123 ++retry;
124 continue;
125 }
126
127 udelay(clock_delay_div_4);
128
129 write_bit_to_ddc(ddc_handle, SCL, true);
130
131 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
132 break;
133
134 write_bit_to_ddc(ddc_handle, SDA, false);
135
136 udelay(clock_delay_div_4);
137
138 write_bit_to_ddc(ddc_handle, SCL, false);
139
140 udelay(clock_delay_div_4);
141
142 return true;
143 } while (retry <= I2C_SW_RETRIES);
144
145 return false;
146}
147
148static bool stop_sync(
149 struct dc_context *ctx,
150 struct ddc *ddc_handle,
151 uint16_t clock_delay_div_4)
152{
153 uint32_t retry = 0;
154
155 /* The I2C communications stop signal is:
156 * the SDA going high from low, while the SCL is high. */
157
158 write_bit_to_ddc(ddc_handle, SCL, false);
159
160 udelay(clock_delay_div_4);
161
162 write_bit_to_ddc(ddc_handle, SDA, false);
163
164 udelay(clock_delay_div_4);
165
166 write_bit_to_ddc(ddc_handle, SCL, true);
167
168 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
169 return false;
170
171 write_bit_to_ddc(ddc_handle, SDA, true);
172
173 do {
174 udelay(clock_delay_div_4);
175
176 if (read_bit_from_ddc(ddc_handle, SDA))
177 return true;
178
179 ++retry;
180 } while (retry <= 2);
181
182 return false;
183}
184
185static bool write_byte(
186 struct dc_context *ctx,
187 struct ddc *ddc_handle,
188 uint16_t clock_delay_div_4,
189 uint8_t byte)
190{
191 int32_t shift = 7;
192 bool ack;
193
194 /* bits are transmitted serially, starting from MSB */
195
196 do {
197 udelay(clock_delay_div_4);
198
199 write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
200
201 udelay(clock_delay_div_4);
202
203 write_bit_to_ddc(ddc_handle, SCL, true);
204
205 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
206 return false;
207
208 write_bit_to_ddc(ddc_handle, SCL, false);
209
210 --shift;
211 } while (shift >= 0);
212
213 /* The display sends ACK by preventing the SDA from going high
214 * after the SCL pulse we use to send our last data bit.
215 * If the SDA goes high after that bit, it's a NACK */
216
217 udelay(clock_delay_div_4);
218
219 write_bit_to_ddc(ddc_handle, SDA, true);
220
221 udelay(clock_delay_div_4);
222
223 write_bit_to_ddc(ddc_handle, SCL, true);
224
225 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
226 return false;
227
228 /* read ACK bit */
229
230 ack = !read_bit_from_ddc(ddc_handle, SDA);
231
232 udelay(clock_delay_div_4 << 1);
233
234 write_bit_to_ddc(ddc_handle, SCL, false);
235
236 udelay(clock_delay_div_4 << 1);
237
238 return ack;
239}
240
241static bool read_byte(
242 struct dc_context *ctx,
243 struct ddc *ddc_handle,
244 uint16_t clock_delay_div_4,
245 uint8_t *byte,
246 bool more)
247{
248 int32_t shift = 7;
249
250 uint8_t data = 0;
251
252 /* The data bits are read from MSB to LSB;
253 * bit is read while SCL is high */
254
255 do {
256 write_bit_to_ddc(ddc_handle, SCL, true);
257
258 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
259 return false;
260
261 if (read_bit_from_ddc(ddc_handle, SDA))
262 data |= (1 << shift);
263
264 write_bit_to_ddc(ddc_handle, SCL, false);
265
266 udelay(clock_delay_div_4 << 1);
267
268 --shift;
269 } while (shift >= 0);
270
271 /* read only whole byte */
272
273 *byte = data;
274
275 udelay(clock_delay_div_4);
276
277 /* send the acknowledge bit:
278 * SDA low means ACK, SDA high means NACK */
279
280 write_bit_to_ddc(ddc_handle, SDA, !more);
281
282 udelay(clock_delay_div_4);
283
284 write_bit_to_ddc(ddc_handle, SCL, true);
285
286 if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
287 return false;
288
289 write_bit_to_ddc(ddc_handle, SCL, false);
290
291 udelay(clock_delay_div_4);
292
293 write_bit_to_ddc(ddc_handle, SDA, true);
294
295 udelay(clock_delay_div_4);
296
297 return true;
298}
299
300static bool i2c_write(
301 struct dc_context *ctx,
302 struct ddc *ddc_handle,
303 uint16_t clock_delay_div_4,
304 uint8_t address,
305 uint32_t length,
306 const uint8_t *data)
307{
308 uint32_t i = 0;
309
310 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
311 return false;
312
313 while (i < length) {
314 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i]))
315 return false;
316 ++i;
317 }
318
319 return true;
320}
321
322static bool i2c_read(
323 struct dc_context *ctx,
324 struct ddc *ddc_handle,
325 uint16_t clock_delay_div_4,
326 uint8_t address,
327 uint32_t length,
328 uint8_t *data)
329{
330 uint32_t i = 0;
331
332 if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
333 return false;
334
335 while (i < length) {
336 if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i,
337 i < length - 1))
338 return false;
339 ++i;
340 }
341
342 return true;
343}
344
345/*
346 * @brief
347 * Cast 'struct i2c_engine *'
348 * to 'struct i2c_sw_engine *'
349 */
350#define FROM_I2C_ENGINE(ptr) \
351 container_of((ptr), struct i2c_sw_engine, base)
352
353/*
354 * @brief
355 * Cast 'struct engine *'
356 * to 'struct i2c_sw_engine *'
357 */
358#define FROM_ENGINE(ptr) \
359 FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
360
361enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
362 const struct engine *engine)
363{
364 return I2CAUX_ENGINE_TYPE_I2C_SW;
365}
366
367bool dal_i2c_sw_engine_submit_request(
368 struct engine *engine,
369 struct i2caux_transaction_request *i2caux_request,
370 bool middle_of_transaction)
371{
372 struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine);
373
374 struct i2c_engine *base = &sw_engine->base;
375
376 struct i2c_request_transaction_data request;
377 bool operation_succeeded = false;
378
379 if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
380 request.action = middle_of_transaction ?
381 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
382 I2CAUX_TRANSACTION_ACTION_I2C_READ;
383 else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
384 request.action = middle_of_transaction ?
385 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
386 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
387 else {
388 i2caux_request->status =
389 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
390 /* in DAL2, there was no "return false" */
391 return false;
392 }
393
394 request.address = (uint8_t)i2caux_request->payload.address;
395 request.length = i2caux_request->payload.length;
396 request.data = i2caux_request->payload.data;
397
398 base->funcs->submit_channel_request(base, &request);
399
400 if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
401 (request.status == I2C_CHANNEL_OPERATION_FAILED))
402 i2caux_request->status =
403 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
404 else {
405 enum i2c_channel_operation_result operation_result;
406
407 do {
408 operation_result =
409 base->funcs->get_channel_status(base, NULL);
410
411 switch (operation_result) {
412 case I2C_CHANNEL_OPERATION_SUCCEEDED:
413 i2caux_request->status =
414 I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
415 operation_succeeded = true;
416 break;
417 case I2C_CHANNEL_OPERATION_NO_RESPONSE:
418 i2caux_request->status =
419 I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
420 break;
421 case I2C_CHANNEL_OPERATION_TIMEOUT:
422 i2caux_request->status =
423 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
424 break;
425 case I2C_CHANNEL_OPERATION_FAILED:
426 i2caux_request->status =
427 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
428 break;
429 default:
430 i2caux_request->status =
431 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
432 break;
433 }
434 } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
435 }
436
437 return operation_succeeded;
438}
439
440uint32_t dal_i2c_sw_engine_get_speed(
441 const struct i2c_engine *engine)
442{
443 return FROM_I2C_ENGINE(engine)->speed;
444}
445
446void dal_i2c_sw_engine_set_speed(
447 struct i2c_engine *engine,
448 uint32_t speed)
449{
450 struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
451
452 ASSERT(speed);
453
454 sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED;
455
456 sw_engine->clock_delay = 1000 / sw_engine->speed;
457
458 if (sw_engine->clock_delay < 12)
459 sw_engine->clock_delay = 12;
460}
461
462bool dal_i2caux_i2c_sw_engine_acquire_engine(
463 struct i2c_engine *engine,
464 struct ddc *ddc)
465{
466 enum gpio_result result;
467
468 result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
469 GPIO_DDC_CONFIG_TYPE_MODE_I2C);
470
471 if (result != GPIO_RESULT_OK)
472 return false;
473
474 engine->base.ddc = ddc;
475
476 return true;
477}
478
479void dal_i2c_sw_engine_submit_channel_request(
480 struct i2c_engine *engine,
481 struct i2c_request_transaction_data *req)
482{
483 struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
484
485 struct ddc *ddc = engine->base.ddc;
486 uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2;
487
488 /* send sync (start / repeated start) */
489
490 bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4);
491
492 /* process payload */
493
494 if (result) {
495 switch (req->action) {
496 case I2CAUX_TRANSACTION_ACTION_I2C_WRITE:
497 case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT:
498 result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4,
499 req->address, req->length, req->data);
500 break;
501 case I2CAUX_TRANSACTION_ACTION_I2C_READ:
502 case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT:
503 result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4,
504 req->address, req->length, req->data);
505 break;
506 default:
507 result = false;
508 break;
509 }
510 }
511
512 /* send stop if not 'mot' or operation failed */
513
514 if (!result ||
515 (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
516 (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ))
517 if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4))
518 result = false;
519
520 req->status = result ?
521 I2C_CHANNEL_OPERATION_SUCCEEDED :
522 I2C_CHANNEL_OPERATION_FAILED;
523}
524
525enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
526 struct i2c_engine *engine,
527 uint8_t *returned_bytes)
528{
529 /* No arbitration with VBIOS is performed since DCE 6.0 */
530 return I2C_CHANNEL_OPERATION_SUCCEEDED;
531}
532
533void dal_i2c_sw_engine_destruct(
534 struct i2c_sw_engine *engine)
535{
536 dal_i2c_engine_destruct(&engine->base);
537}
538
539static void destroy(
540 struct i2c_engine **ptr)
541{
542 dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr));
543
544 kfree(*ptr);
545 *ptr = NULL;
546}
547
548static const struct i2c_engine_funcs i2c_engine_funcs = {
549 .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine,
550 .destroy = destroy,
551 .get_speed = dal_i2c_sw_engine_get_speed,
552 .set_speed = dal_i2c_sw_engine_set_speed,
553 .setup_engine = dal_i2c_engine_setup_i2c_engine,
554 .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
555 .process_channel_reply = dal_i2c_engine_process_channel_reply,
556 .get_channel_status = dal_i2c_sw_engine_get_channel_status,
557};
558
559static void release_engine(
560 struct engine *engine)
561{
562
563}
564
565static const struct engine_funcs engine_funcs = {
566 .release_engine = release_engine,
567 .get_engine_type = dal_i2c_sw_engine_get_engine_type,
568 .acquire = dal_i2c_engine_acquire,
569 .submit_request = dal_i2c_sw_engine_submit_request,
570};
571
572void dal_i2c_sw_engine_construct(
573 struct i2c_sw_engine *engine,
574 const struct i2c_sw_engine_create_arg *arg)
575{
576 dal_i2c_engine_construct(&engine->base, arg->ctx);
577 dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed);
578 engine->base.funcs = &i2c_engine_funcs;
579 engine->base.base.funcs = &engine_funcs;
580}
581
582struct i2c_engine *dal_i2c_sw_engine_create(
583 const struct i2c_sw_engine_create_arg *arg)
584{
585 struct i2c_sw_engine *engine;
586
587 if (!arg) {
588 BREAK_TO_DEBUGGER();
589 return NULL;
590 }
591
592 engine = kzalloc(sizeof(struct i2c_sw_engine), GFP_KERNEL);
593
594 if (!engine) {
595 BREAK_TO_DEBUGGER();
596 return NULL;
597 }
598
599 dal_i2c_sw_engine_construct(engine, arg);
600 return &engine->base;
601}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
deleted file mode 100644
index 546f15b0d3f1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_SW_ENGINE_H__
27#define __DAL_I2C_SW_ENGINE_H__
28
29enum {
30 I2C_SW_RETRIES = 10,
31 I2C_SW_SCL_READ_RETRIES = 128,
32 /* following value is in microseconds */
33 I2C_SW_TIMEOUT_DELAY = 3000
34};
35
36struct i2c_sw_engine;
37
38struct i2c_sw_engine {
39 struct i2c_engine base;
40 uint32_t clock_delay;
41 /* Values below are in KHz */
42 uint32_t speed;
43 uint32_t default_speed;
44};
45
46struct i2c_sw_engine_create_arg {
47 uint32_t default_speed;
48 struct dc_context *ctx;
49};
50
51void dal_i2c_sw_engine_construct(
52 struct i2c_sw_engine *engine,
53 const struct i2c_sw_engine_create_arg *arg);
54
55bool dal_i2caux_i2c_sw_engine_acquire_engine(
56 struct i2c_engine *engine,
57 struct ddc *ddc_handle);
58
59void dal_i2c_sw_engine_destruct(
60 struct i2c_sw_engine *engine);
61
62struct i2c_engine *dal_i2c_sw_engine_create(
63 const struct i2c_sw_engine_create_arg *arg);
64enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
65 const struct engine *engine);
66bool dal_i2c_sw_engine_submit_request(
67 struct engine *ptr,
68 struct i2caux_transaction_request *i2caux_request,
69 bool middle_of_transaction);
70uint32_t dal_i2c_sw_engine_get_speed(
71 const struct i2c_engine *engine);
72void dal_i2c_sw_engine_set_speed(
73 struct i2c_engine *ptr,
74 uint32_t speed);
75void dal_i2c_sw_engine_submit_channel_request(
76 struct i2c_engine *ptr,
77 struct i2c_request_transaction_data *req);
78enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
79 struct i2c_engine *engine,
80 uint8_t *returned_bytes);
81#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
deleted file mode 100644
index 1ad6e49102ff..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ /dev/null
@@ -1,491 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28/*
29 * Pre-requisites: headers required by header of this unit
30 */
31#include "include/i2caux_interface.h"
32#include "dc_bios_types.h"
33
34/*
35 * Header of this unit
36 */
37
38#include "i2caux.h"
39
40/*
41 * Post-requisites: headers required by this unit
42 */
43
44#include "engine.h"
45#include "i2c_engine.h"
46#include "aux_engine.h"
47
48/*
49 * This unit
50 */
51
52#include "dce80/i2caux_dce80.h"
53
54#include "dce100/i2caux_dce100.h"
55
56#include "dce110/i2caux_dce110.h"
57
58#include "dce112/i2caux_dce112.h"
59
60#include "dce120/i2caux_dce120.h"
61
62#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
63#include "dcn10/i2caux_dcn10.h"
64#endif
65
66#include "diagnostics/i2caux_diag.h"
67
68/*
69 * @brief
70 * Plain API, available publicly
71 */
72
73struct i2caux *dal_i2caux_create(
74 struct dc_context *ctx)
75{
76 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
77 return dal_i2caux_diag_fpga_create(ctx);
78 }
79
80 switch (ctx->dce_version) {
81 case DCE_VERSION_8_0:
82 case DCE_VERSION_8_1:
83 case DCE_VERSION_8_3:
84 return dal_i2caux_dce80_create(ctx);
85 case DCE_VERSION_11_2:
86 case DCE_VERSION_11_22:
87 return dal_i2caux_dce112_create(ctx);
88 case DCE_VERSION_11_0:
89 return dal_i2caux_dce110_create(ctx);
90 case DCE_VERSION_10_0:
91 return dal_i2caux_dce100_create(ctx);
92 case DCE_VERSION_12_0:
93 case DCE_VERSION_12_1:
94 return dal_i2caux_dce120_create(ctx);
95#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
96 case DCN_VERSION_1_0:
97 return dal_i2caux_dcn10_create(ctx);
98#endif
99
100#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
101 case DCN_VERSION_1_01:
102 return dal_i2caux_dcn10_create(ctx);
103#endif
104 default:
105 BREAK_TO_DEBUGGER();
106 return NULL;
107 }
108}
109
110bool dal_i2caux_submit_i2c_command(
111 struct i2caux *i2caux,
112 struct ddc *ddc,
113 struct i2c_command *cmd)
114{
115 struct i2c_engine *engine;
116 uint8_t index_of_payload = 0;
117 bool result;
118
119 if (!ddc) {
120 BREAK_TO_DEBUGGER();
121 return false;
122 }
123
124 if (!cmd) {
125 BREAK_TO_DEBUGGER();
126 return false;
127 }
128
129 /*
130 * default will be SW, however there is a feature flag in adapter
131 * service that determines whether SW i2c_engine will be available or
132 * not, if sw i2c is not available we will fallback to hw. This feature
133 * flag is set to not creating sw i2c engine for every dce except dce80
134 * currently
135 */
136 switch (cmd->engine) {
137 case I2C_COMMAND_ENGINE_DEFAULT:
138 case I2C_COMMAND_ENGINE_SW:
139 /* try to acquire SW engine first,
140 * acquire HW engine if SW engine not available */
141 engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc);
142
143 if (!engine)
144 engine = i2caux->funcs->acquire_i2c_hw_engine(
145 i2caux, ddc);
146 break;
147 case I2C_COMMAND_ENGINE_HW:
148 default:
149 /* try to acquire HW engine first,
150 * acquire SW engine if HW engine not available */
151 engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc);
152
153 if (!engine)
154 engine = i2caux->funcs->acquire_i2c_sw_engine(
155 i2caux, ddc);
156 }
157
158 if (!engine)
159 return false;
160
161 engine->funcs->set_speed(engine, cmd->speed);
162
163 result = true;
164
165 while (index_of_payload < cmd->number_of_payloads) {
166 bool mot = (index_of_payload != cmd->number_of_payloads - 1);
167
168 struct i2c_payload *payload = cmd->payloads + index_of_payload;
169
170 struct i2caux_transaction_request request = { 0 };
171
172 request.operation = payload->write ?
173 I2CAUX_TRANSACTION_WRITE :
174 I2CAUX_TRANSACTION_READ;
175
176 request.payload.address_space =
177 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
178 request.payload.address = (payload->address << 1) |
179 !payload->write;
180 request.payload.length = payload->length;
181 request.payload.data = payload->data;
182
183 if (!engine->base.funcs->submit_request(
184 &engine->base, &request, mot)) {
185 result = false;
186 break;
187 }
188
189 ++index_of_payload;
190 }
191
192 i2caux->funcs->release_engine(i2caux, &engine->base);
193
194 return result;
195}
196
197bool dal_i2caux_submit_aux_command(
198 struct i2caux *i2caux,
199 struct ddc *ddc,
200 struct aux_command *cmd)
201{
202 struct aux_engine *engine;
203 uint8_t index_of_payload = 0;
204 bool result;
205 bool mot;
206
207 if (!ddc) {
208 BREAK_TO_DEBUGGER();
209 return false;
210 }
211
212 if (!cmd) {
213 BREAK_TO_DEBUGGER();
214 return false;
215 }
216
217 engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc);
218
219 if (!engine)
220 return false;
221
222 engine->delay = cmd->defer_delay;
223 engine->max_defer_write_retry = cmd->max_defer_write_retry;
224
225 result = true;
226
227 while (index_of_payload < cmd->number_of_payloads) {
228 struct aux_payload *payload = cmd->payloads + index_of_payload;
229 struct i2caux_transaction_request request = { 0 };
230
231 if (cmd->mot == I2C_MOT_UNDEF)
232 mot = (index_of_payload != cmd->number_of_payloads - 1);
233 else
234 mot = (cmd->mot == I2C_MOT_TRUE);
235
236 request.operation = payload->write ?
237 I2CAUX_TRANSACTION_WRITE :
238 I2CAUX_TRANSACTION_READ;
239
240 if (payload->i2c_over_aux) {
241 request.payload.address_space =
242 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
243
244 request.payload.address = (payload->address << 1) |
245 !payload->write;
246 } else {
247 request.payload.address_space =
248 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD;
249
250 request.payload.address = payload->address;
251 }
252
253 request.payload.length = payload->length;
254 request.payload.data = payload->data;
255
256 if (!engine->base.funcs->submit_request(
257 &engine->base, &request, mot)) {
258 result = false;
259 break;
260 }
261
262 ++index_of_payload;
263 }
264
265 i2caux->funcs->release_engine(i2caux, &engine->base);
266
267 return result;
268}
269
270static bool get_hw_supported_ddc_line(
271 struct ddc *ddc,
272 enum gpio_ddc_line *line)
273{
274 enum gpio_ddc_line line_found;
275
276 *line = GPIO_DDC_LINE_UNKNOWN;
277
278 if (!ddc) {
279 BREAK_TO_DEBUGGER();
280 return false;
281 }
282
283 if (!ddc->hw_info.hw_supported)
284 return false;
285
286 line_found = dal_ddc_get_line(ddc);
287
288 if (line_found >= GPIO_DDC_LINE_COUNT)
289 return false;
290
291 *line = line_found;
292
293 return true;
294}
295
296void dal_i2caux_configure_aux(
297 struct i2caux *i2caux,
298 struct ddc *ddc,
299 union aux_config cfg)
300{
301 struct aux_engine *engine =
302 i2caux->funcs->acquire_aux_engine(i2caux, ddc);
303
304 if (!engine)
305 return;
306
307 engine->funcs->configure(engine, cfg);
308
309 i2caux->funcs->release_engine(i2caux, &engine->base);
310}
311
312void dal_i2caux_destroy(
313 struct i2caux **i2caux)
314{
315 if (!i2caux || !*i2caux) {
316 BREAK_TO_DEBUGGER();
317 return;
318 }
319
320 (*i2caux)->funcs->destroy(i2caux);
321
322 *i2caux = NULL;
323}
324
325/*
326 * @brief
327 * An utility function used by 'struct i2caux' and its descendants
328 */
329
330uint32_t dal_i2caux_get_reference_clock(
331 struct dc_bios *bios)
332{
333 struct dc_firmware_info info = { { 0 } };
334
335 if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
336 return 0;
337
338 return info.pll_info.crystal_frequency;
339}
340
341/*
342 * @brief
343 * i2caux
344 */
345
346enum {
347 /* following are expressed in KHz */
348 DEFAULT_I2C_SW_SPEED = 50,
349 DEFAULT_I2C_HW_SPEED = 50,
350
351 DEFAULT_I2C_SW_SPEED_100KHZ = 100,
352 DEFAULT_I2C_HW_SPEED_100KHZ = 100,
353
354 /* This is the timeout as defined in DP 1.2a,
355 * 2.3.4 "Detailed uPacket TX AUX CH State Description". */
356 AUX_TIMEOUT_PERIOD = 400,
357
358 /* Ideally, the SW timeout should be just above 550usec
359 * which is programmed in HW.
360 * But the SW timeout of 600usec is not reliable,
361 * because on some systems, delay_in_microseconds()
362 * returns faster than it should.
363 * EPR #379763: by trial-and-error on different systems,
364 * 700usec is the minimum reliable SW timeout for polling
365 * the AUX_SW_STATUS.AUX_SW_DONE bit.
366 * This timeout expires *only* when there is
367 * AUX Error or AUX Timeout conditions - not during normal operation.
368 * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
369 * at most within ~240usec. That means,
370 * increasing this timeout will not affect normal operation,
371 * and we'll timeout after
372 * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
373 * This timeout is especially important for
374 * resume from S3 and CTS. */
375 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
376};
377
378struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
379 struct i2caux *i2caux,
380 struct ddc *ddc)
381{
382 enum gpio_ddc_line line;
383 struct i2c_engine *engine = NULL;
384
385 if (get_hw_supported_ddc_line(ddc, &line))
386 engine = i2caux->i2c_sw_engines[line];
387
388 if (!engine)
389 engine = i2caux->i2c_generic_sw_engine;
390
391 if (!engine)
392 return NULL;
393
394 if (!engine->base.funcs->acquire(&engine->base, ddc))
395 return NULL;
396
397 return engine;
398}
399
400struct aux_engine *dal_i2caux_acquire_aux_engine(
401 struct i2caux *i2caux,
402 struct ddc *ddc)
403{
404 enum gpio_ddc_line line;
405 struct aux_engine *engine;
406
407 if (!get_hw_supported_ddc_line(ddc, &line))
408 return NULL;
409
410 engine = i2caux->aux_engines[line];
411
412 if (!engine)
413 return NULL;
414
415 if (!engine->base.funcs->acquire(&engine->base, ddc))
416 return NULL;
417
418 return engine;
419}
420
421void dal_i2caux_release_engine(
422 struct i2caux *i2caux,
423 struct engine *engine)
424{
425 engine->funcs->release_engine(engine);
426
427 dal_ddc_close(engine->ddc);
428
429 engine->ddc = NULL;
430}
431
432void dal_i2caux_construct(
433 struct i2caux *i2caux,
434 struct dc_context *ctx)
435{
436 uint32_t i = 0;
437
438 i2caux->ctx = ctx;
439 do {
440 i2caux->i2c_sw_engines[i] = NULL;
441 i2caux->i2c_hw_engines[i] = NULL;
442 i2caux->aux_engines[i] = NULL;
443
444 ++i;
445 } while (i < GPIO_DDC_LINE_COUNT);
446
447 i2caux->i2c_generic_sw_engine = NULL;
448 i2caux->i2c_generic_hw_engine = NULL;
449
450 i2caux->aux_timeout_period =
451 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD;
452
453 if (ctx->dce_version >= DCE_VERSION_11_2) {
454 i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED_100KHZ;
455 i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED_100KHZ;
456 } else {
457 i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED;
458 i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED;
459 }
460}
461
462void dal_i2caux_destruct(
463 struct i2caux *i2caux)
464{
465 uint32_t i = 0;
466
467 if (i2caux->i2c_generic_hw_engine)
468 i2caux->i2c_generic_hw_engine->funcs->destroy(
469 &i2caux->i2c_generic_hw_engine);
470
471 if (i2caux->i2c_generic_sw_engine)
472 i2caux->i2c_generic_sw_engine->funcs->destroy(
473 &i2caux->i2c_generic_sw_engine);
474
475 do {
476 if (i2caux->aux_engines[i])
477 i2caux->aux_engines[i]->funcs->destroy(
478 &i2caux->aux_engines[i]);
479
480 if (i2caux->i2c_hw_engines[i])
481 i2caux->i2c_hw_engines[i]->funcs->destroy(
482 &i2caux->i2c_hw_engines[i]);
483
484 if (i2caux->i2c_sw_engines[i])
485 i2caux->i2c_sw_engines[i]->funcs->destroy(
486 &i2caux->i2c_sw_engines[i]);
487
488 ++i;
489 } while (i < GPIO_DDC_LINE_COUNT);
490}
491
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
deleted file mode 100644
index 64f51bb06915..000000000000
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_I2C_AUX_H__
27#define __DAL_I2C_AUX_H__
28
29uint32_t dal_i2caux_get_reference_clock(
30 struct dc_bios *bios);
31
32struct i2caux;
33
34struct engine;
35
36struct i2caux_funcs {
37 void (*destroy)(struct i2caux **ptr);
38 struct i2c_engine * (*acquire_i2c_sw_engine)(
39 struct i2caux *i2caux,
40 struct ddc *ddc);
41 struct i2c_engine * (*acquire_i2c_hw_engine)(
42 struct i2caux *i2caux,
43 struct ddc *ddc);
44 struct aux_engine * (*acquire_aux_engine)(
45 struct i2caux *i2caux,
46 struct ddc *ddc);
47 void (*release_engine)(
48 struct i2caux *i2caux,
49 struct engine *engine);
50};
51
52struct i2c_engine;
53struct aux_engine;
54
55struct i2caux {
56 struct dc_context *ctx;
57 const struct i2caux_funcs *funcs;
58 /* On ASIC we have certain amount of lines with HW DDC engine
59 * (4, 6, or maybe more in the future).
60 * For every such line, we create separate HW DDC engine
61 * (since we have these engines in HW) and separate SW DDC engine
62 * (to allow concurrent use of few lines).
63 * In similar way we have AUX engines. */
64
65 /* I2C SW engines, per DDC line.
66 * Only lines with HW DDC support will be initialized */
67 struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT];
68
69 /* I2C HW engines, per DDC line.
70 * Only lines with HW DDC support will be initialized */
71 struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT];
72
73 /* AUX engines, per DDC line.
74 * Only lines with HW AUX support will be initialized */
75 struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT];
76
77 /* For all other lines, we can use
78 * single instance of generic I2C HW engine
79 * (since in HW, there is single instance of it)
80 * or single instance of generic I2C SW engine.
81 * AUX is not supported for other lines. */
82
83 /* General-purpose I2C SW engine.
84 * Can be assigned dynamically to any line per transaction */
85 struct i2c_engine *i2c_generic_sw_engine;
86
87 /* General-purpose I2C generic HW engine.
88 * Can be assigned dynamically to almost any line per transaction */
89 struct i2c_engine *i2c_generic_hw_engine;
90
91 /* [anaumov] in DAL2, there is a Mutex */
92
93 uint32_t aux_timeout_period;
94
95 /* expressed in KHz */
96 uint32_t default_i2c_sw_speed;
97 uint32_t default_i2c_hw_speed;
98};
99
100void dal_i2caux_construct(
101 struct i2caux *i2caux,
102 struct dc_context *ctx);
103
104void dal_i2caux_release_engine(
105 struct i2caux *i2caux,
106 struct engine *engine);
107
108void dal_i2caux_destruct(
109 struct i2caux *i2caux);
110
111void dal_i2caux_destroy(
112 struct i2caux **ptr);
113
114struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
115 struct i2caux *i2caux,
116 struct ddc *ddc);
117
118struct aux_engine *dal_i2caux_acquire_aux_engine(
119 struct i2caux *i2caux,
120 struct ddc *ddc);
121
122#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
index 47ef90495376..43d1fbd8ace5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -78,7 +78,7 @@ struct csdp_ref_clk_ds_params {
78}; 78};
79 79
80struct pixel_clk_params { 80struct pixel_clk_params {
81 uint32_t requested_pix_clk; /* in KHz */ 81 uint32_t requested_pix_clk_100hz;
82/*> Requested Pixel Clock 82/*> Requested Pixel Clock
83 * (based on Video Timing standard used for requested mode)*/ 83 * (based on Video Timing standard used for requested mode)*/
84 uint32_t requested_sym_clk; /* in KHz */ 84 uint32_t requested_sym_clk; /* in KHz */
@@ -104,9 +104,9 @@ struct pixel_clk_params {
104 * with actually calculated Clock and reference Crystal frequency 104 * with actually calculated Clock and reference Crystal frequency
105 */ 105 */
106struct pll_settings { 106struct pll_settings {
107 uint32_t actual_pix_clk; 107 uint32_t actual_pix_clk_100hz;
108 uint32_t adjusted_pix_clk; 108 uint32_t adjusted_pix_clk_100hz;
109 uint32_t calculated_pix_clk; 109 uint32_t calculated_pix_clk_100hz;
110 uint32_t vco_freq; 110 uint32_t vco_freq;
111 uint32_t reference_freq; 111 uint32_t reference_freq;
112 uint32_t reference_divider; 112 uint32_t reference_divider;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 94fc31080fda..2e61a22ef4b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -30,7 +30,7 @@ enum dc_status {
30 DC_OK = 1, 30 DC_OK = 1,
31 31
32 DC_NO_CONTROLLER_RESOURCE = 2, 32 DC_NO_CONTROLLER_RESOURCE = 2,
33 DC_NO_STREAM_ENG_RESOURCE = 3, 33 DC_NO_STREAM_ENC_RESOURCE = 3,
34 DC_NO_CLOCK_SOURCE_RESOURCE = 4, 34 DC_NO_CLOCK_SOURCE_RESOURCE = 4,
35 DC_FAIL_CONTROLLER_VALIDATE = 5, 35 DC_FAIL_CONTROLLER_VALIDATE = 5,
36 DC_FAIL_ENC_VALIDATE = 6, 36 DC_FAIL_ENC_VALIDATE = 6,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b168a5e9dd9d..986ed1728644 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -146,7 +146,7 @@ struct resource_pool {
146 struct mpc *mpc; 146 struct mpc *mpc;
147 struct pp_smu_funcs_rv *pp_smu; 147 struct pp_smu_funcs_rv *pp_smu;
148 struct pp_smu_display_requirement_rv pp_smu_req; 148 struct pp_smu_display_requirement_rv pp_smu_req;
149 struct aux_engine *engines[MAX_PIPES]; 149 struct dce_aux *engines[MAX_PIPES];
150 struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; 150 struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
151 struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; 151 struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
152 bool i2c_hw_buffer_in_use; 152 bool i2c_hw_buffer_in_use;
@@ -180,13 +180,8 @@ struct resource_pool {
180 const struct resource_caps *res_cap; 180 const struct resource_caps *res_cap;
181}; 181};
182 182
183struct dcn_fe_clocks {
184 int dppclk_khz;
185};
186
187struct dcn_fe_bandwidth { 183struct dcn_fe_bandwidth {
188 struct dcn_fe_clocks calc; 184 int dppclk_khz;
189 struct dcn_fe_clocks cur;
190}; 185};
191 186
192struct stream_resource { 187struct stream_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 538b83303b86..16fd4dc6c4dd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -64,13 +64,6 @@ void dal_ddc_i2c_payloads_add(
64 uint8_t *data, 64 uint8_t *data,
65 bool write); 65 bool write);
66 66
67void dal_ddc_aux_payloads_add(
68 struct aux_payloads *payloads,
69 uint32_t address,
70 uint32_t len,
71 uint8_t *data,
72 bool write);
73
74struct ddc_service_init_data { 67struct ddc_service_init_data {
75 struct graphics_object_id id; 68 struct graphics_object_id id;
76 struct dc_context *ctx; 69 struct dc_context *ctx;
@@ -103,12 +96,10 @@ bool dal_ddc_service_query_ddc_data(
103 uint32_t read_size); 96 uint32_t read_size);
104 97
105int dc_link_aux_transfer(struct ddc_service *ddc, 98int dc_link_aux_transfer(struct ddc_service *ddc,
106 unsigned int address, 99 struct aux_payload *payload);
107 uint8_t *reply, 100
108 void *buffer, 101bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
109 unsigned int size, 102 struct aux_payload *payload);
110 enum aux_transaction_type type,
111 enum i2caux_transaction_action action);
112 103
113void dal_ddc_service_write_scdc_data( 104void dal_ddc_service_write_scdc_data(
114 struct ddc_service *ddc_service, 105 struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 02f757dd70d4..9d2d8e51306c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -39,6 +39,18 @@ enum segment_order {
39 segment_order__non_contiguous, 39 segment_order__non_contiguous,
40}; 40};
41 41
42struct dcn_hubbub_wm_set {
43 uint32_t wm_set;
44 uint32_t data_urgent;
45 uint32_t pte_meta_urgent;
46 uint32_t sr_enter;
47 uint32_t sr_exit;
48 uint32_t dram_clk_chanage;
49};
50
51struct dcn_hubbub_wm {
52 struct dcn_hubbub_wm_set sets[4];
53};
42 54
43struct hubbub_funcs { 55struct hubbub_funcs {
44 void (*update_dchub)( 56 void (*update_dchub)(
@@ -58,7 +70,14 @@ struct hubbub_funcs {
58 bool (*dcc_support_pixel_format)( 70 bool (*dcc_support_pixel_format)(
59 enum surface_pixel_format format, 71 enum surface_pixel_format format,
60 unsigned int *bytes_per_element); 72 unsigned int *bytes_per_element);
73
74 void (*wm_read_state)(struct hubbub *hubbub,
75 struct dcn_hubbub_wm *wm);
61}; 76};
62 77
78struct hubbub {
79 const struct hubbub_funcs *funcs;
80 struct dc_context *ctx;
81};
63 82
64#endif 83#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index cb85eaa9857f..cbaa43853611 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -27,16 +27,22 @@
27 27
28#include "dm_services_types.h" 28#include "dm_services_types.h"
29 29
30/* If HW itself ever powered down it will be 0.
31 * fwDmcuInit will write to 1.
32 * Driver will only call MCP init if current state is 1,
33 * and the MCP command will transition this to 2.
34 */
30enum dmcu_state { 35enum dmcu_state {
31 DMCU_NOT_INITIALIZED = 0, 36 DMCU_UNLOADED = 0,
32 DMCU_RUNNING = 1 37 DMCU_LOADED_UNINITIALIZED = 1,
38 DMCU_RUNNING = 2,
33}; 39};
34 40
35struct dmcu_version { 41struct dmcu_version {
36 unsigned int date;
37 unsigned int month;
38 unsigned int year;
39 unsigned int interface_version; 42 unsigned int interface_version;
43 unsigned int abm_version;
44 unsigned int psr_version;
45 unsigned int build_version;
40}; 46};
41 47
42struct dmcu { 48struct dmcu {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index e894e649ce5a..fb7967b39edb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -39,6 +39,11 @@ struct dpp {
39 39
40}; 40};
41 41
42struct dpp_input_csc_matrix {
43 enum dc_color_space color_space;
44 uint16_t regval[12];
45};
46
42struct dpp_grph_csc_adjustment { 47struct dpp_grph_csc_adjustment {
43 struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE]; 48 struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
44 enum graphics_gamut_adjust_type gamut_adjust_type; 49 enum graphics_gamut_adjust_type gamut_adjust_type;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 04c6989aac58..1cd07e94ee63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -78,7 +78,8 @@ struct hubp_funcs {
78 bool (*hubp_program_surface_flip_and_addr)( 78 bool (*hubp_program_surface_flip_and_addr)(
79 struct hubp *hubp, 79 struct hubp *hubp,
80 const struct dc_plane_address *address, 80 const struct dc_plane_address *address,
81 bool flip_immediate); 81 bool flip_immediate,
82 uint8_t vmid);
82 83
83 void (*hubp_program_pte_vm)( 84 void (*hubp_program_pte_vm)(
84 struct hubp *hubp, 85 struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 06df02ddff6a..da89c2edb07c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -31,7 +31,7 @@
31#include "dml/display_mode_structs.h" 31#include "dml/display_mode_structs.h"
32 32
33struct dchub_init_data; 33struct dchub_init_data;
34struct cstate_pstate_watermarks_st1 { 34struct cstate_pstate_watermarks_st {
35 uint32_t cstate_exit_ns; 35 uint32_t cstate_exit_ns;
36 uint32_t cstate_enter_plus_exit_ns; 36 uint32_t cstate_enter_plus_exit_ns;
37 uint32_t pstate_change_ns; 37 uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st1 {
40struct dcn_watermarks { 40struct dcn_watermarks {
41 uint32_t pte_meta_urgent_ns; 41 uint32_t pte_meta_urgent_ns;
42 uint32_t urgent_ns; 42 uint32_t urgent_ns;
43 struct cstate_pstate_watermarks_st1 cstate_pstate; 43 struct cstate_pstate_watermarks_st cstate_pstate;
44}; 44};
45 45
46struct dcn_watermark_set { 46struct dcn_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 53a9b64df11a..4051493557bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -161,6 +161,10 @@ struct stream_encoder_funcs {
161 void (*set_avmute)( 161 void (*set_avmute)(
162 struct stream_encoder *enc, bool enable); 162 struct stream_encoder *enc, bool enable);
163 163
164 void (*dig_connect_to_otg)(
165 struct stream_encoder *enc,
166 int tg_inst);
167
164}; 168};
165 169
166#endif /* STREAM_ENCODER_H_ */ 170#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h
index 5d155d36d353..037beb0a2a27 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,30 +23,27 @@
23 * 23 *
24 */ 24 */
25 25
26#include "dm_services.h" 26#ifndef DAL_DC_INC_HW_VMID_H_
27#define DAL_DC_INC_HW_VMID_H_
27 28
28/* 29#include "core_types.h"
29 * Pre-requisites: headers required by header of this unit 30#include "dchubbub.h"
30 */
31#include "include/i2caux_interface.h"
32
33/*
34 * Header of this unit
35 */
36
37#include "engine.h"
38 31
39void dal_i2caux_construct_engine( 32struct dcn_vmid_registers {
40 struct engine *engine, 33 uint32_t CNTL;
41 struct dc_context *ctx) 34 uint32_t PAGE_TABLE_BASE_ADDR_HI32;
42{ 35 uint32_t PAGE_TABLE_BASE_ADDR_LO32;
43 engine->ddc = NULL; 36 uint32_t PAGE_TABLE_START_ADDR_HI32;
44 engine->ctx = ctx; 37 uint32_t PAGE_TABLE_START_ADDR_LO32;
45} 38 uint32_t PAGE_TABLE_END_ADDR_HI32;
39 uint32_t PAGE_TABLE_END_ADDR_LO32;
40};
46 41
47void dal_i2caux_destruct_engine( 42struct dcn_vmid_page_table_config {
48 struct engine *engine) 43 uint64_t page_table_start_addr;
49{ 44 uint64_t page_table_end_addr;
50 /* nothing to do */ 45 enum dcn_hubbub_page_table_depth depth;
51} 46 enum dcn_hubbub_page_table_block_size block_size;
47};
52 48
49#endif /* DAL_DC_INC_HW_VMID_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/inc/vm_helper.h
index c48c61f540a8..a202206e22a3 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/vm_helper.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,21 +23,33 @@
23 * 23 *
24 */ 24 */
25 25
26#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__ 26#ifndef DC_INC_VM_HELPER_H_
27#define __DAL_I2C_SW_ENGINE_DCE110_H__ 27#define DC_INC_VM_HELPER_H_
28 28
29struct i2c_sw_engine_dce110 { 29#include "dc_types.h"
30 struct i2c_sw_engine base; 30
31 uint32_t engine_id; 31#define MAX_VMID 16
32#define MAX_HUBP 6
33
34struct vmid_usage {
35 uint16_t vmid_usage[2];
32}; 36};
33 37
34struct i2c_sw_engine_dce110_create_arg { 38struct vm_helper {
35 uint32_t engine_id; 39 unsigned int num_vmid;
36 uint32_t default_speed; 40 unsigned int num_hubp;
37 struct dc_context *ctx; 41 unsigned int num_vmids_available;
42 uint64_t *ptb_assigned_to_vmid;
43 struct vmid_usage *hubp_vmid_usage;
38}; 44};
39 45
40struct i2c_engine *dal_i2c_sw_engine_dce110_create( 46uint8_t get_vmid_for_ptb(
41 const struct i2c_sw_engine_dce110_create_arg *arg); 47 struct vm_helper *vm_helper,
48 int64_t ptb,
49 uint8_t pipe_idx);
50
51struct vm_helper init_vm_helper(
52 unsigned int num_vmid,
53 unsigned int num_hubp);
42 54
43#endif 55#endif /* DC_INC_VM_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 7fd78a696800..01bf01a34a08 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -211,8 +211,8 @@ struct bp_pixel_clock_parameters {
211 /* signal_type -> Encoder Mode - needed by VBIOS Exec table */ 211 /* signal_type -> Encoder Mode - needed by VBIOS Exec table */
212 enum signal_type signal_type; 212 enum signal_type signal_type;
213 /* Adjusted Pixel Clock (after VBIOS exec table) 213 /* Adjusted Pixel Clock (after VBIOS exec table)
214 * that becomes Target Pixel Clock (KHz) */ 214 * that becomes Target Pixel Clock (100 Hz units) */
215 uint32_t target_pixel_clock; 215 uint32_t target_pixel_clock_100hz;
216 /* Calculated Reference divider of Display PLL */ 216 /* Calculated Reference divider of Display PLL */
217 uint32_t reference_divider; 217 uint32_t reference_divider;
218 /* Calculated Feedback divider of Display PLL */ 218 /* Calculated Feedback divider of Display PLL */
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index 13a3c82d118f..bb012cb1a9f5 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -40,9 +40,19 @@ struct aux_payload {
40 /* set following flag to write data, 40 /* set following flag to write data,
41 * reset it to read data */ 41 * reset it to read data */
42 bool write; 42 bool write;
43 bool mot;
43 uint32_t address; 44 uint32_t address;
44 uint8_t length; 45 uint8_t length;
45 uint8_t *data; 46 uint8_t *data;
47 /*
48 * used to return the reply type of the transaction
49 * ignored if NULL
50 */
51 uint8_t *reply;
52 /* expressed in milliseconds
53 * zero means "use default value"
54 */
55 uint32_t defer_delay;
46}; 56};
47 57
48struct aux_command { 58struct aux_command {
@@ -66,27 +76,4 @@ union aux_config {
66 uint32_t raw; 76 uint32_t raw;
67}; 77};
68 78
69struct i2caux;
70
71struct i2caux *dal_i2caux_create(
72 struct dc_context *ctx);
73
74bool dal_i2caux_submit_i2c_command(
75 struct i2caux *i2caux,
76 struct ddc *ddc,
77 struct i2c_command *cmd);
78
79bool dal_i2caux_submit_aux_command(
80 struct i2caux *i2caux,
81 struct ddc *ddc,
82 struct aux_command *cmd);
83
84void dal_i2caux_configure_aux(
85 struct i2caux *i2caux,
86 struct ddc *ddc,
87 union aux_config cfg);
88
89void dal_i2caux_destroy(
90 struct i2caux **ptr);
91
92#endif 79#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 479b77c2e89e..eefb85928298 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -823,7 +823,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
823 bool is_clipped = false; 823 bool is_clipped = false;
824 struct fixed31_32 sdr_white_level; 824 struct fixed31_32 sdr_white_level;
825 825
826 if (fs_params == NULL || fs_params->max_content == 0 || 826 if (fs_params->max_content == 0 ||
827 fs_params->max_display == 0) 827 fs_params->max_display == 0)
828 return false; 828 return false;
829 829
@@ -1508,7 +1508,7 @@ static bool map_regamma_hw_to_x_user(
1508 struct hw_x_point *coords = coords_x; 1508 struct hw_x_point *coords = coords_x;
1509 const struct pwl_float_data_ex *regamma = rgb_regamma; 1509 const struct pwl_float_data_ex *regamma = rgb_regamma;
1510 1510
1511 if (mapUserRamp) { 1511 if (ramp && mapUserRamp) {
1512 copy_rgb_regamma_to_coordinates_x(coords, 1512 copy_rgb_regamma_to_coordinates_x(coords,
1513 hw_points_num, 1513 hw_points_num,
1514 rgb_regamma); 1514 rgb_regamma);
@@ -1545,7 +1545,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1545 1545
1546 struct pwl_float_data *rgb_user = NULL; 1546 struct pwl_float_data *rgb_user = NULL;
1547 struct pwl_float_data_ex *rgb_regamma = NULL; 1547 struct pwl_float_data_ex *rgb_regamma = NULL;
1548 struct gamma_pixel *axix_x = NULL; 1548 struct gamma_pixel *axis_x = NULL;
1549 struct pixel_gamma_point *coeff = NULL; 1549 struct pixel_gamma_point *coeff = NULL;
1550 enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; 1550 enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
1551 bool ret = false; 1551 bool ret = false;
@@ -1555,47 +1555,54 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1555 1555
1556 /* we can use hardcoded curve for plain SRGB TF */ 1556 /* we can use hardcoded curve for plain SRGB TF */
1557 if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && 1557 if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
1558 output_tf->tf == TRANSFER_FUNCTION_SRGB && 1558 output_tf->tf == TRANSFER_FUNCTION_SRGB) {
1559 (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))) 1559 if (ramp == NULL)
1560 return true; 1560 return true;
1561 if (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))
1562 return true;
1563 }
1561 1564
1562 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1565 output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
1563 1566
1564 rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, 1567 if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
1568 rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
1565 sizeof(*rgb_user), 1569 sizeof(*rgb_user),
1566 GFP_KERNEL); 1570 GFP_KERNEL);
1567 if (!rgb_user) 1571 if (!rgb_user)
1568 goto rgb_user_alloc_fail; 1572 goto rgb_user_alloc_fail;
1573
1574 axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
1575 GFP_KERNEL);
1576 if (!axis_x)
1577 goto axis_x_alloc_fail;
1578
1579 dividers.divider1 = dc_fixpt_from_fraction(3, 2);
1580 dividers.divider2 = dc_fixpt_from_int(2);
1581 dividers.divider3 = dc_fixpt_from_fraction(5, 2);
1582
1583 build_evenly_distributed_points(
1584 axis_x,
1585 ramp->num_entries,
1586 dividers);
1587
1588 if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
1589 scale_gamma(rgb_user, ramp, dividers);
1590 else if (ramp->type == GAMMA_RGB_FLOAT_1024)
1591 scale_gamma_dx(rgb_user, ramp, dividers);
1592 }
1593
1569 rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 1594 rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
1570 sizeof(*rgb_regamma), 1595 sizeof(*rgb_regamma),
1571 GFP_KERNEL); 1596 GFP_KERNEL);
1572 if (!rgb_regamma) 1597 if (!rgb_regamma)
1573 goto rgb_regamma_alloc_fail; 1598 goto rgb_regamma_alloc_fail;
1574 axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x), 1599
1575 GFP_KERNEL);
1576 if (!axix_x)
1577 goto axix_x_alloc_fail;
1578 coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), 1600 coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
1579 GFP_KERNEL); 1601 GFP_KERNEL);
1580 if (!coeff) 1602 if (!coeff)
1581 goto coeff_alloc_fail; 1603 goto coeff_alloc_fail;
1582 1604
1583 dividers.divider1 = dc_fixpt_from_fraction(3, 2);
1584 dividers.divider2 = dc_fixpt_from_int(2);
1585 dividers.divider3 = dc_fixpt_from_fraction(5, 2);
1586
1587 tf = output_tf->tf; 1605 tf = output_tf->tf;
1588
1589 build_evenly_distributed_points(
1590 axix_x,
1591 ramp->num_entries,
1592 dividers);
1593
1594 if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
1595 scale_gamma(rgb_user, ramp, dividers);
1596 else if (ramp->type == GAMMA_RGB_FLOAT_1024)
1597 scale_gamma_dx(rgb_user, ramp, dividers);
1598
1599 if (tf == TRANSFER_FUNCTION_PQ) { 1606 if (tf == TRANSFER_FUNCTION_PQ) {
1600 tf_pts->end_exponent = 7; 1607 tf_pts->end_exponent = 7;
1601 tf_pts->x_point_at_y1_red = 125; 1608 tf_pts->x_point_at_y1_red = 125;
@@ -1623,22 +1630,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
1623 coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false); 1630 coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
1624 } 1631 }
1625 map_regamma_hw_to_x_user(ramp, coeff, rgb_user, 1632 map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
1626 coordinates_x, axix_x, rgb_regamma, 1633 coordinates_x, axis_x, rgb_regamma,
1627 MAX_HW_POINTS, tf_pts, 1634 MAX_HW_POINTS, tf_pts,
1628 (mapUserRamp || ramp->type != GAMMA_RGB_256) && 1635 (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
1629 ramp->type != GAMMA_CS_TFM_1D); 1636 (ramp && ramp->type != GAMMA_CS_TFM_1D));
1630 1637
1631 if (ramp->type == GAMMA_CS_TFM_1D) 1638 if (ramp && ramp->type == GAMMA_CS_TFM_1D)
1632 apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); 1639 apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
1633 1640
1634 ret = true; 1641 ret = true;
1635 1642
1636 kvfree(coeff); 1643 kvfree(coeff);
1637coeff_alloc_fail: 1644coeff_alloc_fail:
1638 kvfree(axix_x);
1639axix_x_alloc_fail:
1640 kvfree(rgb_regamma); 1645 kvfree(rgb_regamma);
1641rgb_regamma_alloc_fail: 1646rgb_regamma_alloc_fail:
1647 kvfree(axis_x);
1648axis_x_alloc_fail:
1642 kvfree(rgb_user); 1649 kvfree(rgb_user);
1643rgb_user_alloc_fail: 1650rgb_user_alloc_fail:
1644 return ret; 1651 return ret;
@@ -1772,8 +1779,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
1772 /* we can use hardcoded curve for plain SRGB TF */ 1779 /* we can use hardcoded curve for plain SRGB TF */
1773 if (input_tf->type == TF_TYPE_PREDEFINED && 1780 if (input_tf->type == TF_TYPE_PREDEFINED &&
1774 input_tf->tf == TRANSFER_FUNCTION_SRGB && 1781 input_tf->tf == TRANSFER_FUNCTION_SRGB &&
1775 (!mapUserRamp && 1782 !mapUserRamp)
1776 (ramp->type == GAMMA_RGB_256 || ramp->num_entries == 0)))
1777 return true; 1783 return true;
1778 1784
1779 input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1785 input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 1544ed3f1747..94a84bc57c7a 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -108,8 +108,8 @@ static unsigned int calc_duration_in_us_from_v_total(
108{ 108{
109 unsigned int duration_in_us = 109 unsigned int duration_in_us =
110 (unsigned int)(div64_u64(((unsigned long long)(v_total) 110 (unsigned int)(div64_u64(((unsigned long long)(v_total)
111 * 1000) * stream->timing.h_total, 111 * 10000) * stream->timing.h_total,
112 stream->timing.pix_clk_khz)); 112 stream->timing.pix_clk_100hz));
113 113
114 return duration_in_us; 114 return duration_in_us;
115} 115}
@@ -126,7 +126,7 @@ static unsigned int calc_v_total_from_refresh(
126 refresh_in_uhz))); 126 refresh_in_uhz)));
127 127
128 v_total = div64_u64(div64_u64(((unsigned long long)( 128 v_total = div64_u64(div64_u64(((unsigned long long)(
129 frame_duration_in_ns) * stream->timing.pix_clk_khz), 129 frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
130 stream->timing.h_total), 1000000); 130 stream->timing.h_total), 1000000);
131 131
132 /* v_total cannot be less than nominal */ 132 /* v_total cannot be less than nominal */
@@ -152,7 +152,7 @@ static unsigned int calc_v_total_from_duration(
152 duration_in_us = vrr->max_duration_in_us; 152 duration_in_us = vrr->max_duration_in_us;
153 153
154 v_total = div64_u64(div64_u64(((unsigned long long)( 154 v_total = div64_u64(div64_u64(((unsigned long long)(
155 duration_in_us) * stream->timing.pix_clk_khz), 155 duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
156 stream->timing.h_total), 1000); 156 stream->timing.h_total), 1000);
157 157
158 /* v_total cannot be less than nominal */ 158 /* v_total cannot be less than nominal */
@@ -227,7 +227,7 @@ static void update_v_total_for_static_ramp(
227 } 227 }
228 228
229 v_total = div64_u64(div64_u64(((unsigned long long)( 229 v_total = div64_u64(div64_u64(((unsigned long long)(
230 current_duration_in_us) * stream->timing.pix_clk_khz), 230 current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
231 stream->timing.h_total), 1000); 231 stream->timing.h_total), 1000);
232 232
233 in_out_vrr->adjust.v_total_min = v_total; 233 in_out_vrr->adjust.v_total_min = v_total;
@@ -461,6 +461,26 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
461 return false; 461 return false;
462} 462}
463 463
464static void build_vrr_infopacket_header_vtem(enum signal_type signal,
465 struct dc_info_packet *infopacket)
466{
467 // HEADER
468
469 // HB0, HB1, HB2 indicates PacketType VTEMPacket
470 infopacket->hb0 = 0x7F;
471 infopacket->hb1 = 0xC0;
472 infopacket->hb2 = 0x00;
473 /* HB3 Bit Fields
474 * Reserved :1 = 0
475 * Sync :1 = 0
476 * VFR :1 = 1
477 * Ds_Type :2 = 0
478 * End :1 = 0
479 * New :1 = 0
480 */
481 infopacket->hb3 = 0x20;
482}
483
464static void build_vrr_infopacket_header_v1(enum signal_type signal, 484static void build_vrr_infopacket_header_v1(enum signal_type signal,
465 struct dc_info_packet *infopacket, 485 struct dc_info_packet *infopacket,
466 unsigned int *payload_size) 486 unsigned int *payload_size)
@@ -559,6 +579,54 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
559 } 579 }
560} 580}
561 581
582static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream,
583 const struct mod_vrr_params *vrr,
584 struct dc_info_packet *infopacket)
585{
586 /* dc_info_packet to VtemPacket Translation of Bit-fields,
587 * SB[6]
588 * unsigned char VRR_EN :1
589 * unsigned char M_CONST :1
590 * unsigned char Reserved2 :2
591 * unsigned char FVA_Factor_M1 :4
592 * SB[7]
593 * unsigned char Base_Vfront :8
594 * SB[8]
595 * unsigned char Base_Refresh_Rate_98 :2
596 * unsigned char RB :1
597 * unsigned char Reserved3 :5
598 * SB[9]
599 * unsigned char Base_RefreshRate_07 :8
600 */
601 unsigned int fieldRateInHz;
602
603 if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
604 vrr->state == VRR_STATE_ACTIVE_FIXED){
605 infopacket->sb[6] |= 0x80; //VRR_EN Bit = 1
606 } else {
607 infopacket->sb[6] &= 0x7F; //VRR_EN Bit = 0
608 }
609
610 if (!stream->timing.vic) {
611 infopacket->sb[7] = stream->timing.v_front_porch;
612
613 /* TODO: In dal2, we check mode flags for a reduced blanking timing.
614 * Need a way to relay that information to this function.
615 * if("ReducedBlanking")
616 * {
617 * infopacket->sb[8] |= 0x20; //Set 3rd bit to 1
618 * }
619 */
620 fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/
621 (stream->timing.h_total * stream->timing.v_total);
622
623 infopacket->sb[8] |= ((fieldRateInHz & 0x300) >> 2);
624 infopacket->sb[9] |= fieldRateInHz & 0xFF;
625
626 }
627 infopacket->valid = true;
628}
629
562static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, 630static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
563 struct dc_info_packet *infopacket) 631 struct dc_info_packet *infopacket)
564{ 632{
@@ -672,6 +740,19 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
672 infopacket->valid = true; 740 infopacket->valid = true;
673} 741}
674 742
743static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream,
744 const struct mod_vrr_params *vrr,
745 struct dc_info_packet *infopacket)
746{
747 //VTEM info packet for HdmiVrr
748
749 //VTEM Packet is structured differently
750 build_vrr_infopacket_header_vtem(stream->signal, infopacket);
751 build_vrr_vtem_infopacket_data(stream, vrr, infopacket);
752
753 infopacket->valid = true;
754}
755
675void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, 756void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
676 const struct dc_stream_state *stream, 757 const struct dc_stream_state *stream,
677 const struct mod_vrr_params *vrr, 758 const struct mod_vrr_params *vrr,
@@ -679,18 +760,21 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
679 const enum color_transfer_func *app_tf, 760 const enum color_transfer_func *app_tf,
680 struct dc_info_packet *infopacket) 761 struct dc_info_packet *infopacket)
681{ 762{
682 /* SPD info packet for FreeSync */ 763 /* SPD info packet for FreeSync
683 764 * VTEM info packet for HdmiVRR
684 /* Check if Freesync is supported. Return if false. If true, 765 * Check if Freesync is supported. Return if false. If true,
685 * set the corresponding bit in the info packet 766 * set the corresponding bit in the info packet
686 */ 767 */
687 if (!vrr->supported || !vrr->send_vsif) 768 if (!vrr->supported || (!vrr->send_info_frame && packet_type != PACKET_TYPE_VTEM))
688 return; 769 return;
689 770
690 switch (packet_type) { 771 switch (packet_type) {
691 case PACKET_TYPE_FS2: 772 case PACKET_TYPE_FS2:
692 build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket); 773 build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
693 break; 774 break;
775 case PACKET_TYPE_VTEM:
776 build_vrr_infopacket_vtem(stream, vrr, infopacket);
777 break;
694 case PACKET_TYPE_VRR: 778 case PACKET_TYPE_VRR:
695 case PACKET_TYPE_FS1: 779 case PACKET_TYPE_FS1:
696 default: 780 default:
@@ -739,7 +823,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
739 return; 823 return;
740 824
741 in_out_vrr->state = in_config->state; 825 in_out_vrr->state = in_config->state;
742 in_out_vrr->send_vsif = in_config->vsif_supported; 826 in_out_vrr->send_info_frame = in_config->vsif_supported;
743 827
744 if (in_config->state == VRR_STATE_UNSUPPORTED) { 828 if (in_config->state == VRR_STATE_UNSUPPORTED) {
745 in_out_vrr->state = VRR_STATE_UNSUPPORTED; 829 in_out_vrr->state = VRR_STATE_UNSUPPORTED;
@@ -972,7 +1056,7 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
972 unsigned long long nominal_field_rate_in_uhz = 0; 1056 unsigned long long nominal_field_rate_in_uhz = 0;
973 1057
974 /* Calculate nominal field rate for stream */ 1058 /* Calculate nominal field rate for stream */
975 nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; 1059 nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
976 nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; 1060 nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
977 nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, 1061 nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
978 stream->timing.h_total); 1062 stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 949a8b62aa98..4222e403b151 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -104,7 +104,7 @@ struct mod_vrr_params_fixed_refresh {
104 104
105struct mod_vrr_params { 105struct mod_vrr_params {
106 bool supported; 106 bool supported;
107 bool send_vsif; 107 bool send_info_frame;
108 enum mod_vrr_state state; 108 enum mod_vrr_state state;
109 109
110 uint32_t min_refresh_in_uhz; 110 uint32_t min_refresh_in_uhz;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 1bd02c0ac30c..b711e7e6c204 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -41,7 +41,8 @@ enum color_transfer_func {
41enum vrr_packet_type { 41enum vrr_packet_type {
42 PACKET_TYPE_VRR, 42 PACKET_TYPE_VRR,
43 PACKET_TYPE_FS1, 43 PACKET_TYPE_FS1,
44 PACKET_TYPE_FS2 44 PACKET_TYPE_FS2,
45 PACKET_TYPE_VTEM
45}; 46};
46 47
47 48
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..baab6c4ae191 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -41,6 +41,17 @@ static const unsigned char min_reduction_table[13] = {
41static const unsigned char max_reduction_table[13] = { 41static const unsigned char max_reduction_table[13] = {
420xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32}; 420xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
43 43
44/* ABM 2.2 Min Reduction effectively disabled (100% for all configs)*/
45static const unsigned char min_reduction_table_v_2_2[13] = {
460xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47
48/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
49 * 0 1 2 3 4 5 6 7 8 9 10 11 12
50 * 96.1 89.8 85.1 80.3 69.4 64.7 54.9 45.1 30.2 25.1 19.6 12.5 12.5 %
51 */
52static const unsigned char max_reduction_table_v_2_2[13] = {
530xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0x8c, 0x73, 0x4d, 0x40, 0x32, 0x20, 0x20};
54
44/* Predefined ABM configuration sets. We may have different configuration sets 55/* Predefined ABM configuration sets. We may have different configuration sets
45 * in order to satisfy different power/quality requirements. 56 * in order to satisfy different power/quality requirements.
46 */ 57 */
@@ -56,7 +67,10 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
56#define NUM_AGGR_LEVEL 4 67#define NUM_AGGR_LEVEL 4
57#define NUM_POWER_FN_SEGS 8 68#define NUM_POWER_FN_SEGS 8
58#define NUM_BL_CURVE_SEGS 16 69#define NUM_BL_CURVE_SEGS 16
70#define IRAM_RESERVE_AREA_START 0xF0 // reserve 0xF0~0xFF are write by DMCU only
71#define IRAM_SIZE 256
59 72
73#pragma pack(push, 1)
60/* NOTE: iRAM is 256B in size */ 74/* NOTE: iRAM is 256B in size */
61struct iram_table_v_2 { 75struct iram_table_v_2 {
62 /* flags */ 76 /* flags */
@@ -85,11 +99,10 @@ struct iram_table_v_2 {
85 99
86 /* For reading PSR State directly from IRAM */ 100 /* For reading PSR State directly from IRAM */
87 uint8_t psr_state; /* 0xf0 */ 101 uint8_t psr_state; /* 0xf0 */
88 uint8_t dmcu_interface_version; /* 0xf1 */ 102 uint8_t dmcu_mcp_interface_version; /* 0xf1 */
89 uint8_t dmcu_date_version_year_b0; /* 0xf2 */ 103 uint8_t dmcu_abm_feature_version; /* 0xf2 */
90 uint8_t dmcu_date_version_year_b1; /* 0xf3 */ 104 uint8_t dmcu_psr_feature_version; /* 0xf3 */
91 uint8_t dmcu_date_version_month; /* 0xf4 */ 105 uint16_t dmcu_version; /* 0xf4 */
92 uint8_t dmcu_date_version_day; /* 0xf5 */
93 uint8_t dmcu_state; /* 0xf6 */ 106 uint8_t dmcu_state; /* 0xf6 */
94 107
95 uint16_t blRampReduction; /* 0xf7 */ 108 uint16_t blRampReduction; /* 0xf7 */
@@ -101,6 +114,50 @@ struct iram_table_v_2 {
101 uint8_t dummy9; /* 0xff */ 114 uint8_t dummy9; /* 0xff */
102}; 115};
103 116
117struct iram_table_v_2_2 {
118 /* flags */
119 uint16_t flags; /* 0x00 U16 */
120
121 /* parameters for ABM2.2 algorithm */
122 uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
123 uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
124 uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
125 uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
126 uint8_t hybridFactor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
127 uint8_t contrastFactor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
128 uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
129 uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
130 uint8_t pad[29]; /* 0x63 U0.8 */
131
132 /* parameters for crgb conversion */
133 uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
134 uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
135 uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
136
137 /* parameters for custom curve */
138 /* thresholds for brightness --> backlight */
139 uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
140 /* offsets for brightness --> backlight */
141 uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
142
143 /* For reading PSR State directly from IRAM */
144 uint8_t psr_state; /* 0xf0 */
145 uint8_t dmcu_mcp_interface_version; /* 0xf1 */
146 uint8_t dmcu_abm_feature_version; /* 0xf2 */
147 uint8_t dmcu_psr_feature_version; /* 0xf3 */
148 uint16_t dmcu_version; /* 0xf4 */
149 uint8_t dmcu_state; /* 0xf6 */
150
151 uint16_t blRampReduction; /* 0xf7 */
152 uint16_t blRampStart; /* 0xf9 */
153 uint8_t dummy5; /* 0xfb */
154 uint8_t dummy6; /* 0xfc */
155 uint8_t dummy7; /* 0xfd */
156 uint8_t dummy8; /* 0xfe */
157 uint8_t dummy9; /* 0xff */
158};
159#pragma pack(pop)
160
104static uint16_t backlight_8_to_16(unsigned int backlight_8bit) 161static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
105{ 162{
106 return (uint16_t)(backlight_8bit * 0x101); 163 return (uint16_t)(backlight_8bit * 0x101);
@@ -143,11 +200,367 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
143 } 200 }
144} 201}
145 202
203static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
204 struct iram_table_v_2_2 *table)
205{
206 unsigned int i;
207 unsigned int num_entries = NUM_BL_CURVE_SEGS;
208 unsigned int query_input_8bit;
209 unsigned int query_output_8bit;
210 unsigned int lut_index;
211
212 table->backlight_thresholds[0] = 0;
213 table->backlight_offsets[0] = params.backlight_lut_array[0];
214 table->backlight_thresholds[num_entries-1] = 0xFFFF;
215 table->backlight_offsets[num_entries-1] =
216 params.backlight_lut_array[params.backlight_lut_array_size - 1];
217
218 /* Setup all brightness levels between 0% and 100% exclusive
219 * Fills brightness-to-backlight transform table. Backlight custom curve
220 * describes transform from brightness to backlight. It will be defined
221 * as set of thresholds and set of offsets, together, implying
222 * extrapolation of custom curve into 16 uniformly spanned linear
223 * segments. Each threshold/offset represented by 16 bit entry in
224 * format U4.10.
225 */
226 for (i = 1; i+1 < num_entries; i++) {
227 query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
228
229 lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
230 ASSERT(lut_index < params.backlight_lut_array_size);
231 query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
232
233 table->backlight_thresholds[i] =
234 backlight_8_to_16(query_input_8bit);
235 table->backlight_offsets[i] =
236 backlight_8_to_16(query_output_8bit);
237 }
238}
239
240void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
241{
242 unsigned int set = params.set;
243
244 ram_table->flags = 0x0;
245 ram_table->deviation_gain = 0xb3;
246
247 ram_table->blRampReduction =
248 cpu_to_be16(params.backlight_ramping_reduction);
249 ram_table->blRampStart =
250 cpu_to_be16(params.backlight_ramping_start);
251
252 ram_table->min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
253 ram_table->min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
254 ram_table->min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
255 ram_table->min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
256 ram_table->min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
257 ram_table->max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
258 ram_table->max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
259 ram_table->max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
260 ram_table->max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
261 ram_table->max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
262
263 ram_table->min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
264 ram_table->min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
265 ram_table->min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
266 ram_table->min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
267 ram_table->min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
268 ram_table->max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
269 ram_table->max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
270 ram_table->max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
271 ram_table->max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
272 ram_table->max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
273
274 ram_table->min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
275 ram_table->min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
276 ram_table->min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
277 ram_table->min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
278 ram_table->min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
279 ram_table->max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
280 ram_table->max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
281 ram_table->max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
282 ram_table->max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
283 ram_table->max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
284
285 ram_table->min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
286 ram_table->min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
287 ram_table->min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
288 ram_table->min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
289 ram_table->min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
290 ram_table->max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
291 ram_table->max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
292 ram_table->max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
293 ram_table->max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
294 ram_table->max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
295
296 ram_table->bright_pos_gain[0][0] = 0x20;
297 ram_table->bright_pos_gain[0][1] = 0x20;
298 ram_table->bright_pos_gain[0][2] = 0x20;
299 ram_table->bright_pos_gain[0][3] = 0x20;
300 ram_table->bright_pos_gain[1][0] = 0x20;
301 ram_table->bright_pos_gain[1][1] = 0x20;
302 ram_table->bright_pos_gain[1][2] = 0x20;
303 ram_table->bright_pos_gain[1][3] = 0x20;
304 ram_table->bright_pos_gain[2][0] = 0x20;
305 ram_table->bright_pos_gain[2][1] = 0x20;
306 ram_table->bright_pos_gain[2][2] = 0x20;
307 ram_table->bright_pos_gain[2][3] = 0x20;
308 ram_table->bright_pos_gain[3][0] = 0x20;
309 ram_table->bright_pos_gain[3][1] = 0x20;
310 ram_table->bright_pos_gain[3][2] = 0x20;
311 ram_table->bright_pos_gain[3][3] = 0x20;
312 ram_table->bright_pos_gain[4][0] = 0x20;
313 ram_table->bright_pos_gain[4][1] = 0x20;
314 ram_table->bright_pos_gain[4][2] = 0x20;
315 ram_table->bright_pos_gain[4][3] = 0x20;
316 ram_table->bright_neg_gain[0][1] = 0x00;
317 ram_table->bright_neg_gain[0][2] = 0x00;
318 ram_table->bright_neg_gain[0][3] = 0x00;
319 ram_table->bright_neg_gain[1][0] = 0x00;
320 ram_table->bright_neg_gain[1][1] = 0x00;
321 ram_table->bright_neg_gain[1][2] = 0x00;
322 ram_table->bright_neg_gain[1][3] = 0x00;
323 ram_table->bright_neg_gain[2][0] = 0x00;
324 ram_table->bright_neg_gain[2][1] = 0x00;
325 ram_table->bright_neg_gain[2][2] = 0x00;
326 ram_table->bright_neg_gain[2][3] = 0x00;
327 ram_table->bright_neg_gain[3][0] = 0x00;
328 ram_table->bright_neg_gain[3][1] = 0x00;
329 ram_table->bright_neg_gain[3][2] = 0x00;
330 ram_table->bright_neg_gain[3][3] = 0x00;
331 ram_table->bright_neg_gain[4][0] = 0x00;
332 ram_table->bright_neg_gain[4][1] = 0x00;
333 ram_table->bright_neg_gain[4][2] = 0x00;
334 ram_table->bright_neg_gain[4][3] = 0x00;
335 ram_table->dark_pos_gain[0][0] = 0x00;
336 ram_table->dark_pos_gain[0][1] = 0x00;
337 ram_table->dark_pos_gain[0][2] = 0x00;
338 ram_table->dark_pos_gain[0][3] = 0x00;
339 ram_table->dark_pos_gain[1][0] = 0x00;
340 ram_table->dark_pos_gain[1][1] = 0x00;
341 ram_table->dark_pos_gain[1][2] = 0x00;
342 ram_table->dark_pos_gain[1][3] = 0x00;
343 ram_table->dark_pos_gain[2][0] = 0x00;
344 ram_table->dark_pos_gain[2][1] = 0x00;
345 ram_table->dark_pos_gain[2][2] = 0x00;
346 ram_table->dark_pos_gain[2][3] = 0x00;
347 ram_table->dark_pos_gain[3][0] = 0x00;
348 ram_table->dark_pos_gain[3][1] = 0x00;
349 ram_table->dark_pos_gain[3][2] = 0x00;
350 ram_table->dark_pos_gain[3][3] = 0x00;
351 ram_table->dark_pos_gain[4][0] = 0x00;
352 ram_table->dark_pos_gain[4][1] = 0x00;
353 ram_table->dark_pos_gain[4][2] = 0x00;
354 ram_table->dark_pos_gain[4][3] = 0x00;
355 ram_table->dark_neg_gain[0][0] = 0x00;
356 ram_table->dark_neg_gain[0][1] = 0x00;
357 ram_table->dark_neg_gain[0][2] = 0x00;
358 ram_table->dark_neg_gain[0][3] = 0x00;
359 ram_table->dark_neg_gain[1][0] = 0x00;
360 ram_table->dark_neg_gain[1][1] = 0x00;
361 ram_table->dark_neg_gain[1][2] = 0x00;
362 ram_table->dark_neg_gain[1][3] = 0x00;
363 ram_table->dark_neg_gain[2][0] = 0x00;
364 ram_table->dark_neg_gain[2][1] = 0x00;
365 ram_table->dark_neg_gain[2][2] = 0x00;
366 ram_table->dark_neg_gain[2][3] = 0x00;
367 ram_table->dark_neg_gain[3][0] = 0x00;
368 ram_table->dark_neg_gain[3][1] = 0x00;
369 ram_table->dark_neg_gain[3][2] = 0x00;
370 ram_table->dark_neg_gain[3][3] = 0x00;
371 ram_table->dark_neg_gain[4][0] = 0x00;
372 ram_table->dark_neg_gain[4][1] = 0x00;
373 ram_table->dark_neg_gain[4][2] = 0x00;
374 ram_table->dark_neg_gain[4][3] = 0x00;
375
376 ram_table->iir_curve[0] = 0x65;
377 ram_table->iir_curve[1] = 0x65;
378 ram_table->iir_curve[2] = 0x65;
379 ram_table->iir_curve[3] = 0x65;
380 ram_table->iir_curve[4] = 0x65;
381
382 //Gamma 2.4
383 ram_table->crgb_thresh[0] = cpu_to_be16(0x13b6);
384 ram_table->crgb_thresh[1] = cpu_to_be16(0x1648);
385 ram_table->crgb_thresh[2] = cpu_to_be16(0x18e3);
386 ram_table->crgb_thresh[3] = cpu_to_be16(0x1b41);
387 ram_table->crgb_thresh[4] = cpu_to_be16(0x1d46);
388 ram_table->crgb_thresh[5] = cpu_to_be16(0x1f21);
389 ram_table->crgb_thresh[6] = cpu_to_be16(0x2167);
390 ram_table->crgb_thresh[7] = cpu_to_be16(0x2384);
391 ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
392 ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
393 ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
394 ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
395 ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
396 ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
397 ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
398 ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
399 ram_table->crgb_slope[0] = cpu_to_be16(0x3147);
400 ram_table->crgb_slope[1] = cpu_to_be16(0x2978);
401 ram_table->crgb_slope[2] = cpu_to_be16(0x23a2);
402 ram_table->crgb_slope[3] = cpu_to_be16(0x1f55);
403 ram_table->crgb_slope[4] = cpu_to_be16(0x1c63);
404 ram_table->crgb_slope[5] = cpu_to_be16(0x1a0f);
405 ram_table->crgb_slope[6] = cpu_to_be16(0x178d);
406 ram_table->crgb_slope[7] = cpu_to_be16(0x15ab);
407
408 fill_backlight_transform_table(
409 params, ram_table);
410}
411
412void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
413{
414 unsigned int set = params.set;
415
416 ram_table->flags = 0x0;
417
418 ram_table->deviation_gain[0] = 0xb3;
419 ram_table->deviation_gain[1] = 0xb3;
420 ram_table->deviation_gain[2] = 0xb3;
421 ram_table->deviation_gain[3] = 0xb3;
422
423 ram_table->blRampReduction =
424 cpu_to_be16(params.backlight_ramping_reduction);
425 ram_table->blRampStart =
426 cpu_to_be16(params.backlight_ramping_start);
427
428 ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
429 ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
430 ram_table->min_reduction[2][0] = min_reduction_table_v_2_2[abm_config[set][0]];
431 ram_table->min_reduction[3][0] = min_reduction_table_v_2_2[abm_config[set][0]];
432 ram_table->min_reduction[4][0] = min_reduction_table_v_2_2[abm_config[set][0]];
433 ram_table->max_reduction[0][0] = max_reduction_table_v_2_2[abm_config[set][0]];
434 ram_table->max_reduction[1][0] = max_reduction_table_v_2_2[abm_config[set][0]];
435 ram_table->max_reduction[2][0] = max_reduction_table_v_2_2[abm_config[set][0]];
436 ram_table->max_reduction[3][0] = max_reduction_table_v_2_2[abm_config[set][0]];
437 ram_table->max_reduction[4][0] = max_reduction_table_v_2_2[abm_config[set][0]];
438
439 ram_table->min_reduction[0][1] = min_reduction_table_v_2_2[abm_config[set][1]];
440 ram_table->min_reduction[1][1] = min_reduction_table_v_2_2[abm_config[set][1]];
441 ram_table->min_reduction[2][1] = min_reduction_table_v_2_2[abm_config[set][1]];
442 ram_table->min_reduction[3][1] = min_reduction_table_v_2_2[abm_config[set][1]];
443 ram_table->min_reduction[4][1] = min_reduction_table_v_2_2[abm_config[set][1]];
444 ram_table->max_reduction[0][1] = max_reduction_table_v_2_2[abm_config[set][1]];
445 ram_table->max_reduction[1][1] = max_reduction_table_v_2_2[abm_config[set][1]];
446 ram_table->max_reduction[2][1] = max_reduction_table_v_2_2[abm_config[set][1]];
447 ram_table->max_reduction[3][1] = max_reduction_table_v_2_2[abm_config[set][1]];
448 ram_table->max_reduction[4][1] = max_reduction_table_v_2_2[abm_config[set][1]];
449
450 ram_table->min_reduction[0][2] = min_reduction_table_v_2_2[abm_config[set][2]];
451 ram_table->min_reduction[1][2] = min_reduction_table_v_2_2[abm_config[set][2]];
452 ram_table->min_reduction[2][2] = min_reduction_table_v_2_2[abm_config[set][2]];
453 ram_table->min_reduction[3][2] = min_reduction_table_v_2_2[abm_config[set][2]];
454 ram_table->min_reduction[4][2] = min_reduction_table_v_2_2[abm_config[set][2]];
455 ram_table->max_reduction[0][2] = max_reduction_table_v_2_2[abm_config[set][2]];
456 ram_table->max_reduction[1][2] = max_reduction_table_v_2_2[abm_config[set][2]];
457 ram_table->max_reduction[2][2] = max_reduction_table_v_2_2[abm_config[set][2]];
458 ram_table->max_reduction[3][2] = max_reduction_table_v_2_2[abm_config[set][2]];
459 ram_table->max_reduction[4][2] = max_reduction_table_v_2_2[abm_config[set][2]];
460
461 ram_table->min_reduction[0][3] = min_reduction_table_v_2_2[abm_config[set][3]];
462 ram_table->min_reduction[1][3] = min_reduction_table_v_2_2[abm_config[set][3]];
463 ram_table->min_reduction[2][3] = min_reduction_table_v_2_2[abm_config[set][3]];
464 ram_table->min_reduction[3][3] = min_reduction_table_v_2_2[abm_config[set][3]];
465 ram_table->min_reduction[4][3] = min_reduction_table_v_2_2[abm_config[set][3]];
466 ram_table->max_reduction[0][3] = max_reduction_table_v_2_2[abm_config[set][3]];
467 ram_table->max_reduction[1][3] = max_reduction_table_v_2_2[abm_config[set][3]];
468 ram_table->max_reduction[2][3] = max_reduction_table_v_2_2[abm_config[set][3]];
469 ram_table->max_reduction[3][3] = max_reduction_table_v_2_2[abm_config[set][3]];
470 ram_table->max_reduction[4][3] = max_reduction_table_v_2_2[abm_config[set][3]];
471
472 ram_table->bright_pos_gain[0][0] = 0x20;
473 ram_table->bright_pos_gain[0][1] = 0x20;
474 ram_table->bright_pos_gain[0][2] = 0x20;
475 ram_table->bright_pos_gain[0][3] = 0x20;
476 ram_table->bright_pos_gain[1][0] = 0x20;
477 ram_table->bright_pos_gain[1][1] = 0x20;
478 ram_table->bright_pos_gain[1][2] = 0x20;
479 ram_table->bright_pos_gain[1][3] = 0x20;
480 ram_table->bright_pos_gain[2][0] = 0x20;
481 ram_table->bright_pos_gain[2][1] = 0x20;
482 ram_table->bright_pos_gain[2][2] = 0x20;
483 ram_table->bright_pos_gain[2][3] = 0x20;
484 ram_table->bright_pos_gain[3][0] = 0x20;
485 ram_table->bright_pos_gain[3][1] = 0x20;
486 ram_table->bright_pos_gain[3][2] = 0x20;
487 ram_table->bright_pos_gain[3][3] = 0x20;
488 ram_table->bright_pos_gain[4][0] = 0x20;
489 ram_table->bright_pos_gain[4][1] = 0x20;
490 ram_table->bright_pos_gain[4][2] = 0x20;
491 ram_table->bright_pos_gain[4][3] = 0x20;
492
493 ram_table->dark_pos_gain[0][0] = 0x00;
494 ram_table->dark_pos_gain[0][1] = 0x00;
495 ram_table->dark_pos_gain[0][2] = 0x00;
496 ram_table->dark_pos_gain[0][3] = 0x00;
497 ram_table->dark_pos_gain[1][0] = 0x00;
498 ram_table->dark_pos_gain[1][1] = 0x00;
499 ram_table->dark_pos_gain[1][2] = 0x00;
500 ram_table->dark_pos_gain[1][3] = 0x00;
501 ram_table->dark_pos_gain[2][0] = 0x00;
502 ram_table->dark_pos_gain[2][1] = 0x00;
503 ram_table->dark_pos_gain[2][2] = 0x00;
504 ram_table->dark_pos_gain[2][3] = 0x00;
505 ram_table->dark_pos_gain[3][0] = 0x00;
506 ram_table->dark_pos_gain[3][1] = 0x00;
507 ram_table->dark_pos_gain[3][2] = 0x00;
508 ram_table->dark_pos_gain[3][3] = 0x00;
509 ram_table->dark_pos_gain[4][0] = 0x00;
510 ram_table->dark_pos_gain[4][1] = 0x00;
511 ram_table->dark_pos_gain[4][2] = 0x00;
512 ram_table->dark_pos_gain[4][3] = 0x00;
513
514 ram_table->hybridFactor[0] = 0xff;
515 ram_table->hybridFactor[1] = 0xff;
516 ram_table->hybridFactor[2] = 0xff;
517 ram_table->hybridFactor[3] = 0xc0;
518
519 ram_table->contrastFactor[0] = 0x99;
520 ram_table->contrastFactor[1] = 0x99;
521 ram_table->contrastFactor[2] = 0x99;
522 ram_table->contrastFactor[3] = 0x80;
523
524 ram_table->iir_curve[0] = 0x65;
525 ram_table->iir_curve[1] = 0x65;
526 ram_table->iir_curve[2] = 0x65;
527 ram_table->iir_curve[3] = 0x65;
528 ram_table->iir_curve[4] = 0x65;
529
530 //Gamma 2.2
531 ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
532 ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
533 ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
534 ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
535 ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
536 ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
537 ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
538 ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
539 ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
540 ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
541 ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
542 ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
543 ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
544 ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
545 ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
546 ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
547 ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
548 ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
549 ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
550 ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
551 ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
552 ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
553 ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
554 ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
555
556 fill_backlight_transform_table_v_2_2(
557 params, ram_table);
558}
559
146bool dmcu_load_iram(struct dmcu *dmcu, 560bool dmcu_load_iram(struct dmcu *dmcu,
147 struct dmcu_iram_parameters params) 561 struct dmcu_iram_parameters params)
148{ 562{
149 struct iram_table_v_2 ram_table; 563 unsigned char ram_table[IRAM_SIZE];
150 unsigned int set = params.set;
151 564
152 if (dmcu == NULL) 565 if (dmcu == NULL)
153 return false; 566 return false;
@@ -157,170 +570,12 @@ bool dmcu_load_iram(struct dmcu *dmcu,
157 570
158 memset(&ram_table, 0, sizeof(ram_table)); 571 memset(&ram_table, 0, sizeof(ram_table));
159 572
160 ram_table.flags = 0x0; 573 if (dmcu->dmcu_version.abm_version == 0x22) {
161 ram_table.deviation_gain = 0xb3; 574 fill_iram_v_2_2((struct iram_table_v_2_2 *)ram_table, params);
162 575 } else {
163 ram_table.blRampReduction = 576 fill_iram_v_2((struct iram_table_v_2 *)ram_table, params);
164 cpu_to_be16(params.backlight_ramping_reduction); 577 }
165 ram_table.blRampStart =
166 cpu_to_be16(params.backlight_ramping_start);
167
168 ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
169 ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
170 ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
171 ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
172 ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
173 ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
174 ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
175 ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
176 ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
177 ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
178
179 ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
180 ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
181 ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
182 ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
183 ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
184 ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
185 ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
186 ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
187 ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
188 ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
189
190 ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
191 ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
192 ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
193 ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
194 ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
195 ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
196 ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
197 ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
198 ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
199 ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
200
201 ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
202 ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
203 ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
204 ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
205 ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
206 ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
207 ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
208 ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
209 ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
210 ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
211
212 ram_table.bright_pos_gain[0][0] = 0x20;
213 ram_table.bright_pos_gain[0][1] = 0x20;
214 ram_table.bright_pos_gain[0][2] = 0x20;
215 ram_table.bright_pos_gain[0][3] = 0x20;
216 ram_table.bright_pos_gain[1][0] = 0x20;
217 ram_table.bright_pos_gain[1][1] = 0x20;
218 ram_table.bright_pos_gain[1][2] = 0x20;
219 ram_table.bright_pos_gain[1][3] = 0x20;
220 ram_table.bright_pos_gain[2][0] = 0x20;
221 ram_table.bright_pos_gain[2][1] = 0x20;
222 ram_table.bright_pos_gain[2][2] = 0x20;
223 ram_table.bright_pos_gain[2][3] = 0x20;
224 ram_table.bright_pos_gain[3][0] = 0x20;
225 ram_table.bright_pos_gain[3][1] = 0x20;
226 ram_table.bright_pos_gain[3][2] = 0x20;
227 ram_table.bright_pos_gain[3][3] = 0x20;
228 ram_table.bright_pos_gain[4][0] = 0x20;
229 ram_table.bright_pos_gain[4][1] = 0x20;
230 ram_table.bright_pos_gain[4][2] = 0x20;
231 ram_table.bright_pos_gain[4][3] = 0x20;
232 ram_table.bright_neg_gain[0][1] = 0x00;
233 ram_table.bright_neg_gain[0][2] = 0x00;
234 ram_table.bright_neg_gain[0][3] = 0x00;
235 ram_table.bright_neg_gain[1][0] = 0x00;
236 ram_table.bright_neg_gain[1][1] = 0x00;
237 ram_table.bright_neg_gain[1][2] = 0x00;
238 ram_table.bright_neg_gain[1][3] = 0x00;
239 ram_table.bright_neg_gain[2][0] = 0x00;
240 ram_table.bright_neg_gain[2][1] = 0x00;
241 ram_table.bright_neg_gain[2][2] = 0x00;
242 ram_table.bright_neg_gain[2][3] = 0x00;
243 ram_table.bright_neg_gain[3][0] = 0x00;
244 ram_table.bright_neg_gain[3][1] = 0x00;
245 ram_table.bright_neg_gain[3][2] = 0x00;
246 ram_table.bright_neg_gain[3][3] = 0x00;
247 ram_table.bright_neg_gain[4][0] = 0x00;
248 ram_table.bright_neg_gain[4][1] = 0x00;
249 ram_table.bright_neg_gain[4][2] = 0x00;
250 ram_table.bright_neg_gain[4][3] = 0x00;
251 ram_table.dark_pos_gain[0][0] = 0x00;
252 ram_table.dark_pos_gain[0][1] = 0x00;
253 ram_table.dark_pos_gain[0][2] = 0x00;
254 ram_table.dark_pos_gain[0][3] = 0x00;
255 ram_table.dark_pos_gain[1][0] = 0x00;
256 ram_table.dark_pos_gain[1][1] = 0x00;
257 ram_table.dark_pos_gain[1][2] = 0x00;
258 ram_table.dark_pos_gain[1][3] = 0x00;
259 ram_table.dark_pos_gain[2][0] = 0x00;
260 ram_table.dark_pos_gain[2][1] = 0x00;
261 ram_table.dark_pos_gain[2][2] = 0x00;
262 ram_table.dark_pos_gain[2][3] = 0x00;
263 ram_table.dark_pos_gain[3][0] = 0x00;
264 ram_table.dark_pos_gain[3][1] = 0x00;
265 ram_table.dark_pos_gain[3][2] = 0x00;
266 ram_table.dark_pos_gain[3][3] = 0x00;
267 ram_table.dark_pos_gain[4][0] = 0x00;
268 ram_table.dark_pos_gain[4][1] = 0x00;
269 ram_table.dark_pos_gain[4][2] = 0x00;
270 ram_table.dark_pos_gain[4][3] = 0x00;
271 ram_table.dark_neg_gain[0][0] = 0x00;
272 ram_table.dark_neg_gain[0][1] = 0x00;
273 ram_table.dark_neg_gain[0][2] = 0x00;
274 ram_table.dark_neg_gain[0][3] = 0x00;
275 ram_table.dark_neg_gain[1][0] = 0x00;
276 ram_table.dark_neg_gain[1][1] = 0x00;
277 ram_table.dark_neg_gain[1][2] = 0x00;
278 ram_table.dark_neg_gain[1][3] = 0x00;
279 ram_table.dark_neg_gain[2][0] = 0x00;
280 ram_table.dark_neg_gain[2][1] = 0x00;
281 ram_table.dark_neg_gain[2][2] = 0x00;
282 ram_table.dark_neg_gain[2][3] = 0x00;
283 ram_table.dark_neg_gain[3][0] = 0x00;
284 ram_table.dark_neg_gain[3][1] = 0x00;
285 ram_table.dark_neg_gain[3][2] = 0x00;
286 ram_table.dark_neg_gain[3][3] = 0x00;
287 ram_table.dark_neg_gain[4][0] = 0x00;
288 ram_table.dark_neg_gain[4][1] = 0x00;
289 ram_table.dark_neg_gain[4][2] = 0x00;
290 ram_table.dark_neg_gain[4][3] = 0x00;
291 ram_table.iir_curve[0] = 0x65;
292 ram_table.iir_curve[1] = 0x65;
293 ram_table.iir_curve[2] = 0x65;
294 ram_table.iir_curve[3] = 0x65;
295 ram_table.iir_curve[4] = 0x65;
296 ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
297 ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
298 ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
299 ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
300 ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
301 ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
302 ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
303 ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
304 ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
305 ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
306 ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
307 ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
308 ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
309 ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
310 ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
311 ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
312 ram_table.crgb_slope[0] = cpu_to_be16(0x3147);
313 ram_table.crgb_slope[1] = cpu_to_be16(0x2978);
314 ram_table.crgb_slope[2] = cpu_to_be16(0x23a2);
315 ram_table.crgb_slope[3] = cpu_to_be16(0x1f55);
316 ram_table.crgb_slope[4] = cpu_to_be16(0x1c63);
317 ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f);
318 ram_table.crgb_slope[6] = cpu_to_be16(0x178d);
319 ram_table.crgb_slope[7] = cpu_to_be16(0x15ab);
320
321 fill_backlight_transform_table(
322 params, &ram_table);
323 578
324 return dmcu->funcs->load_iram( 579 return dmcu->funcs->load_iram(
325 dmcu, 0, (char *)(&ram_table), sizeof(ram_table)); 580 dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START);
326} 581}
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h
index 13d4de645190..d8e0dd192fdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h
@@ -2247,6 +2247,8 @@
2247 2247
2248// addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975] 2248// addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975]
2249// base address: 0x3480 2249// base address: 0x3480
2250#define mmRCC_BIF_STRAP0 0x0000
2251#define mmRCC_BIF_STRAP0_BASE_IDX 2
2250#define mmRCC_DEV0_EPF0_STRAP0 0x000f 2252#define mmRCC_DEV0_EPF0_STRAP0 0x000f
2251#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2 2253#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2
2252 2254
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h
index a02b67943372..29af5167cd00 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h
@@ -16838,6 +16838,10 @@
16838 16838
16839 16839
16840// addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975] 16840// addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975]
16841//RCC_BIF_STRAP0
16842#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
16843#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
16844
16841//RCC_DEV0_EPF0_STRAP0 16845//RCC_DEV0_EPF0_STRAP0
16842#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 16846#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
16843#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 16847#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h
new file mode 100644
index 000000000000..8c75669eb500
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22#ifndef _nbio_6_1_SMN_HEADER
23#define _nbio_6_1_SMN_HEADER
24
25
26#define smnCPM_CONTROL 0x11180460
27#define smnPCIE_CNTL2 0x11180070
28#define smnPCIE_CONFIG_CNTL 0x11180044
29#define smnPCIE_CI_CNTL 0x11180080
30
31
32#define smnPCIE_PERF_COUNT_CNTL 0x11180200
33#define smnPCIE_PERF_CNTL_TXCLK 0x11180204
34#define smnPCIE_PERF_COUNT0_TXCLK 0x11180208
35#define smnPCIE_PERF_COUNT1_TXCLK 0x1118020c
36#define smnPCIE_PERF_CNTL_MST_R_CLK 0x11180210
37#define smnPCIE_PERF_COUNT0_MST_R_CLK 0x11180214
38#define smnPCIE_PERF_COUNT1_MST_R_CLK 0x11180218
39#define smnPCIE_PERF_CNTL_MST_C_CLK 0x1118021c
40#define smnPCIE_PERF_COUNT0_MST_C_CLK 0x11180220
41#define smnPCIE_PERF_COUNT1_MST_C_CLK 0x11180224
42#define smnPCIE_PERF_CNTL_SLV_R_CLK 0x11180228
43#define smnPCIE_PERF_COUNT0_SLV_R_CLK 0x1118022c
44#define smnPCIE_PERF_COUNT1_SLV_R_CLK 0x11180230
45#define smnPCIE_PERF_CNTL_SLV_S_C_CLK 0x11180234
46#define smnPCIE_PERF_COUNT0_SLV_S_C_CLK 0x11180238
47#define smnPCIE_PERF_COUNT1_SLV_S_C_CLK 0x1118023c
48#define smnPCIE_PERF_CNTL_SLV_NS_C_CLK 0x11180240
49#define smnPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x11180244
50#define smnPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x11180248
51#define smnPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x1118024c
52#define smnPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x11180250
53#define smnPCIE_PERF_CNTL_TXCLK2 0x11180254
54#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258
55#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c
56
57#endif // _nbio_6_1_SMN_HEADER
58
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
new file mode 100644
index 000000000000..5563f0715896
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22#ifndef _nbio_7_0_SMN_HEADER
23#define _nbio_7_0_SMN_HEADER
24
25
26#define smnCPM_CONTROL 0x11180460
27#define smnPCIE_CNTL2 0x11180070
28
29#define smnPCIE_PERF_COUNT_CNTL 0x11180200
30#define smnPCIE_PERF_CNTL_TXCLK 0x11180204
31#define smnPCIE_PERF_COUNT0_TXCLK 0x11180208
32#define smnPCIE_PERF_COUNT1_TXCLK 0x1118020c
33#define smnPCIE_PERF_CNTL_MST_R_CLK 0x11180210
34#define smnPCIE_PERF_COUNT0_MST_R_CLK 0x11180214
35#define smnPCIE_PERF_COUNT1_MST_R_CLK 0x11180218
36#define smnPCIE_PERF_CNTL_MST_C_CLK 0x1118021c
37#define smnPCIE_PERF_COUNT0_MST_C_CLK 0x11180220
38#define smnPCIE_PERF_COUNT1_MST_C_CLK 0x11180224
39#define smnPCIE_PERF_CNTL_SLV_R_CLK 0x11180228
40#define smnPCIE_PERF_COUNT0_SLV_R_CLK 0x1118022c
41#define smnPCIE_PERF_COUNT1_SLV_R_CLK 0x11180230
42#define smnPCIE_PERF_CNTL_SLV_S_C_CLK 0x11180234
43#define smnPCIE_PERF_COUNT0_SLV_S_C_CLK 0x11180238
44#define smnPCIE_PERF_COUNT1_SLV_S_C_CLK 0x1118023c
45#define smnPCIE_PERF_CNTL_SLV_NS_C_CLK 0x11180240
46#define smnPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x11180244
47#define smnPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x11180248
48#define smnPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x1118024c
49#define smnPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x11180250
50#define smnPCIE_PERF_CNTL_TXCLK2 0x11180254
51#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258
52#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c
53
54#endif // _nbio_7_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
new file mode 100644
index 000000000000..c1457d880c4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22#ifndef _nbio_7_4_0_SMN_HEADER
23#define _nbio_7_4_0_SMN_HEADER
24
25
26#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
27#define smnCPM_CONTROL 0x11180460
28#define smnPCIE_CNTL2 0x11180070
29#define smnPCIE_CI_CNTL 0x11180080
30
31#define smnPCIE_PERF_COUNT_CNTL 0x11180200
32#define smnPCIE_PERF_CNTL_TXCLK1 0x11180204
33#define smnPCIE_PERF_COUNT0_TXCLK1 0x11180208
34#define smnPCIE_PERF_COUNT1_TXCLK1 0x1118020c
35#define smnPCIE_PERF_CNTL_TXCLK2 0x11180210
36#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180214
37#define smnPCIE_PERF_COUNT1_TXCLK2 0x11180218
38#define smnPCIE_PERF_CNTL_TXCLK3 0x1118021c
39#define smnPCIE_PERF_COUNT0_TXCLK3 0x11180220
40#define smnPCIE_PERF_COUNT1_TXCLK3 0x11180224
41#define smnPCIE_PERF_CNTL_TXCLK4 0x11180228
42#define smnPCIE_PERF_COUNT0_TXCLK4 0x1118022c
43#define smnPCIE_PERF_COUNT1_TXCLK4 0x11180230
44#define smnPCIE_PERF_CNTL_SCLK1 0x11180234
45#define smnPCIE_PERF_COUNT0_SCLK1 0x11180238
46#define smnPCIE_PERF_COUNT1_SCLK1 0x1118023c
47#define smnPCIE_PERF_CNTL_SCLK2 0x11180240
48#define smnPCIE_PERF_COUNT0_SCLK2 0x11180244
49#define smnPCIE_PERF_COUNT1_SCLK2 0x11180248
50#define smnPCIE_PERF_CNTL_EVENT_LC_PORT_SEL 0x1118024c
51#define smnPCIE_PERF_CNTL_EVENT_CI_PORT_SEL 0x11180250
52
53#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index e932213f87f0..994e796a28d7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2567,6 +2567,8 @@
2567 2567
2568// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 2568// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1
2569// base address: 0x0 2569// base address: 0x0
2570#define mmRCC_BIF_STRAP0 0x0000
2571#define mmRCC_BIF_STRAP0_BASE_IDX 2
2570#define mmRCC_DEV0_EPF0_STRAP0 0x0011 2572#define mmRCC_DEV0_EPF0_STRAP0 0x0011
2571#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2 2573#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2
2572 2574
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d3704b438f2d..d467b939c971 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -19690,6 +19690,9 @@
19690 19690
19691 19691
19692// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 19692// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1
19693//RCC_BIF_STRAP0
19694#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
19695#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
19693//RCC_DEV0_EPF0_STRAP0 19696//RCC_DEV0_EPF0_STRAP0
19694#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 19697#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
19695#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 19698#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
index a9eb57a53e59..a485526f3a51 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
@@ -46,4 +46,7 @@
46#define mmTHM_TCON_THERM_TRIP 0x0002 46#define mmTHM_TCON_THERM_TRIP 0x0002
47#define mmTHM_TCON_THERM_TRIP_BASE_IDX 0 47#define mmTHM_TCON_THERM_TRIP_BASE_IDX 0
48 48
49#define mmTHM_BACO_CNTL 0x0081
50#define mmTHM_BACO_CNTL_BASE_IDX 0
51
49#endif 52#endif
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 7931502fa54f..8ba21747b40a 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4106,7 +4106,7 @@ typedef struct _ATOM_LCD_MODE_CONTROL_CAP
4106typedef struct _ATOM_FAKE_EDID_PATCH_RECORD 4106typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
4107{ 4107{
4108 UCHAR ucRecordType; 4108 UCHAR ucRecordType;
4109 UCHAR ucFakeEDIDLength; // = 128 means EDID lenght is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128 4109 UCHAR ucFakeEDIDLength; // = 128 means EDID length is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128
4110 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. 4110 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
4111} ATOM_FAKE_EDID_PATCH_RECORD; 4111} ATOM_FAKE_EDID_PATCH_RECORD;
4112 4112
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 8154d67388cc..83d960110d23 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -34,7 +34,6 @@
34 34
35struct pci_dev; 35struct pci_dev;
36 36
37#define KFD_INTERFACE_VERSION 2
38#define KGD_MAX_QUEUES 128 37#define KGD_MAX_QUEUES 128
39 38
40struct kfd_dev; 39struct kfd_dev;
@@ -330,56 +329,4 @@ struct kfd2kgd_calls {
330 329
331}; 330};
332 331
333/**
334 * struct kgd2kfd_calls
335 *
336 * @exit: Notifies amdkfd that kgd module is unloaded
337 *
338 * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
339 *
340 * @device_init: Initialize the newly probed device (if it is a device that
341 * amdkfd supports)
342 *
343 * @device_exit: Notifies amdkfd about a removal of a kgd device
344 *
345 * @suspend: Notifies amdkfd about a suspend action done to a kgd device
346 *
347 * @resume: Notifies amdkfd about a resume action done to a kgd device
348 *
349 * @quiesce_mm: Quiesce all user queue access to specified MM address space
350 *
351 * @resume_mm: Resume user queue access to specified MM address space
352 *
353 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
354 * for safe eviction of KFD BOs that belong to the specified process.
355 *
356 * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
357 *
358 * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
359 *
360 * This structure contains function callback pointers so the kgd driver
361 * will notify to the amdkfd about certain status changes.
362 *
363 */
364struct kgd2kfd_calls {
365 void (*exit)(void);
366 struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
367 const struct kfd2kgd_calls *f2g);
368 bool (*device_init)(struct kfd_dev *kfd,
369 const struct kgd2kfd_shared_resources *gpu_resources);
370 void (*device_exit)(struct kfd_dev *kfd);
371 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
372 void (*suspend)(struct kfd_dev *kfd);
373 int (*resume)(struct kfd_dev *kfd);
374 int (*quiesce_mm)(struct mm_struct *mm);
375 int (*resume_mm)(struct mm_struct *mm);
376 int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
377 struct dma_fence *fence);
378 int (*pre_reset)(struct kfd_dev *kfd);
379 int (*post_reset)(struct kfd_dev *kfd);
380};
381
382int kgd2kfd_init(unsigned interface_version,
383 const struct kgd2kfd_calls **g2f);
384
385#endif /* KGD_KFD_INTERFACE_H_INCLUDED */ 332#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1479ea1dc3e7..2b579ba9b685 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -92,6 +92,9 @@ enum pp_clock_type {
92 PP_SCLK, 92 PP_SCLK,
93 PP_MCLK, 93 PP_MCLK,
94 PP_PCIE, 94 PP_PCIE,
95 PP_SOCCLK,
96 PP_FCLK,
97 PP_DCEFCLK,
95 OD_SCLK, 98 OD_SCLK,
96 OD_MCLK, 99 OD_MCLK,
97 OD_VDDC_CURVE, 100 OD_VDDC_CURVE,
@@ -127,12 +130,13 @@ enum amd_pp_task {
127}; 130};
128 131
129enum PP_SMC_POWER_PROFILE { 132enum PP_SMC_POWER_PROFILE {
130 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, 133 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
131 PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, 134 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
132 PP_SMC_POWER_PROFILE_VIDEO = 0x2, 135 PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
133 PP_SMC_POWER_PROFILE_VR = 0x3, 136 PP_SMC_POWER_PROFILE_VIDEO = 0x3,
134 PP_SMC_POWER_PROFILE_COMPUTE = 0x4, 137 PP_SMC_POWER_PROFILE_VR = 0x4,
135 PP_SMC_POWER_PROFILE_CUSTOM = 0x5, 138 PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
139 PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
136}; 140};
137 141
138enum { 142enum {
@@ -280,6 +284,11 @@ struct amd_pm_funcs {
280 int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); 284 int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
281 int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); 285 int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
282 int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); 286 int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
287 int (*get_asic_baco_capability)(void *handle, bool *cap);
288 int (*get_asic_baco_state)(void *handle, int *state);
289 int (*set_asic_baco_state)(void *handle, int state);
290 int (*get_ppfeature_status)(void *handle, char *buf);
291 int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
283}; 292};
284 293
285#endif 294#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9bc27f468d5b..3f73f7cd18b9 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1404,6 +1404,97 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
1404 return ret; 1404 return ret;
1405} 1405}
1406 1406
1407static int pp_get_asic_baco_capability(void *handle, bool *cap)
1408{
1409 struct pp_hwmgr *hwmgr = handle;
1410
1411 if (!hwmgr)
1412 return -EINVAL;
1413
1414 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1415 return 0;
1416
1417 mutex_lock(&hwmgr->smu_lock);
1418 hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1419 mutex_unlock(&hwmgr->smu_lock);
1420
1421 return 0;
1422}
1423
1424static int pp_get_asic_baco_state(void *handle, int *state)
1425{
1426 struct pp_hwmgr *hwmgr = handle;
1427
1428 if (!hwmgr)
1429 return -EINVAL;
1430
1431 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1432 return 0;
1433
1434 mutex_lock(&hwmgr->smu_lock);
1435 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1436 mutex_unlock(&hwmgr->smu_lock);
1437
1438 return 0;
1439}
1440
1441static int pp_set_asic_baco_state(void *handle, int state)
1442{
1443 struct pp_hwmgr *hwmgr = handle;
1444
1445 if (!hwmgr)
1446 return -EINVAL;
1447
1448 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1449 return 0;
1450
1451 mutex_lock(&hwmgr->smu_lock);
1452 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1453 mutex_unlock(&hwmgr->smu_lock);
1454
1455 return 0;
1456}
1457
1458static int pp_get_ppfeature_status(void *handle, char *buf)
1459{
1460 struct pp_hwmgr *hwmgr = handle;
1461 int ret = 0;
1462
1463 if (!hwmgr || !hwmgr->pm_en || !buf)
1464 return -EINVAL;
1465
1466 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1467 pr_info_ratelimited("%s was not implemented.\n", __func__);
1468 return -EINVAL;
1469 }
1470
1471 mutex_lock(&hwmgr->smu_lock);
1472 ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1473 mutex_unlock(&hwmgr->smu_lock);
1474
1475 return ret;
1476}
1477
1478static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1479{
1480 struct pp_hwmgr *hwmgr = handle;
1481 int ret = 0;
1482
1483 if (!hwmgr || !hwmgr->pm_en)
1484 return -EINVAL;
1485
1486 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1487 pr_info_ratelimited("%s was not implemented.\n", __func__);
1488 return -EINVAL;
1489 }
1490
1491 mutex_lock(&hwmgr->smu_lock);
1492 ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1493 mutex_unlock(&hwmgr->smu_lock);
1494
1495 return ret;
1496}
1497
1407static const struct amd_pm_funcs pp_dpm_funcs = { 1498static const struct amd_pm_funcs pp_dpm_funcs = {
1408 .load_firmware = pp_dpm_load_fw, 1499 .load_firmware = pp_dpm_load_fw,
1409 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1500 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1454,4 +1545,9 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
1454 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, 1545 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1455 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, 1546 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1456 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, 1547 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1548 .get_asic_baco_capability = pp_get_asic_baco_capability,
1549 .get_asic_baco_state = pp_get_asic_baco_state,
1550 .set_asic_baco_state = pp_set_asic_baco_state,
1551 .get_ppfeature_status = pp_get_ppfeature_status,
1552 .set_ppfeature_status = pp_set_ppfeature_status,
1457}; 1553};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index ade8973b6f4d..0b3c6d1d52e4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -35,7 +35,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
35 vega12_thermal.o \ 35 vega12_thermal.o \
36 pp_overdriver.o smu_helper.o \ 36 pp_overdriver.o smu_helper.o \
37 vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ 37 vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
38 vega20_thermal.o 38 vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o
39 39
40AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 40AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
41 41
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
new file mode 100644
index 000000000000..9c57c1f67749
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "common_baco.h"
25
26
27static bool baco_wait_register(struct pp_hwmgr *hwmgr, u32 reg, u32 mask, u32 value)
28{
29 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
30 u32 timeout = 5000, data;
31
32 do {
33 msleep(1);
34 data = RREG32(reg);
35 timeout--;
36 } while (value != (data & mask) && (timeout != 0));
37
38 if (timeout == 0)
39 return false;
40
41 return true;
42}
43
44static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 mask,
45 u32 shift, u32 value, u32 timeout)
46{
47 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
48 u32 data;
49 bool ret = true;
50
51 switch (command) {
52 case CMD_WRITE:
53 WREG32(reg, value << shift);
54 break;
55 case CMD_READMODIFYWRITE:
56 data = RREG32(reg);
57 data = (data & (~mask)) | (value << shift);
58 WREG32(reg, data);
59 break;
60 case CMD_WAITFOR:
61 ret = baco_wait_register(hwmgr, reg, mask, value);
62 break;
63 case CMD_DELAY_MS:
64 if (timeout)
65 /* Delay in milli Seconds */
66 msleep(timeout);
67 break;
68 case CMD_DELAY_US:
69 if (timeout)
70 /* Delay in micro Seconds */
71 udelay(timeout);
72 break;
73
74 default:
75 dev_warn(adev->dev, "Invalid BACO command.\n");
76 ret = false;
77 }
78
79 return ret;
80}
81
82bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
83 const struct soc15_baco_cmd_entry *entry,
84 const u32 array_size)
85{
86 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
87 u32 i, reg = 0;
88
89 for (i = 0; i < array_size; i++) {
90 if ((entry[i].cmd == CMD_WRITE) ||
91 (entry[i].cmd == CMD_READMODIFYWRITE) ||
92 (entry[i].cmd == CMD_WAITFOR))
93 reg = adev->reg_offset[entry[i].hwip][entry[i].inst][entry[i].seg]
94 + entry[i].reg_offset;
95 if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
96 entry[i].shift, entry[i].val, entry[i].timeout))
97 return false;
98 }
99
100 return true;
101}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 26355c088746..95296c916f4e 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,25 +19,32 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: AMD
23 *
24 */ 22 */
23#ifndef __COMMON_BOCO_H__
24#define __COMMON_BOCO_H__
25#include "hwmgr.h"
25 26
26#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__
27#define __DAL_I2C_SW_ENGINE_DCE80_H__
28 27
29struct i2c_sw_engine_dce80 { 28enum baco_cmd_type {
30 struct i2c_sw_engine base; 29 CMD_WRITE = 0,
31 uint32_t engine_id; 30 CMD_READMODIFYWRITE,
31 CMD_WAITFOR,
32 CMD_DELAY_MS,
33 CMD_DELAY_US,
32}; 34};
33 35
34struct i2c_sw_engine_dce80_create_arg { 36struct soc15_baco_cmd_entry {
35 uint32_t engine_id; 37 enum baco_cmd_type cmd;
36 uint32_t default_speed; 38 uint32_t hwip;
37 struct dc_context *ctx; 39 uint32_t inst;
40 uint32_t seg;
41 uint32_t reg_offset;
42 uint32_t mask;
43 uint32_t shift;
44 uint32_t timeout;
45 uint32_t val;
38}; 46};
39 47extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
40struct i2c_engine *dal_i2c_sw_engine_dce80_create( 48 const struct soc15_baco_cmd_entry *entry,
41 const struct i2c_sw_engine_dce80_create_arg *arg); 49 const u32 array_size);
42
43#endif 50#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 1f92a9f4c9e3..c1c51c115e57 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -154,15 +154,6 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
154 return 0; 154 return 0;
155} 155}
156 156
157int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
158{
159 PHM_FUNC_CHECK(hwmgr);
160
161 if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
162 return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
163
164 return 0;
165}
166 157
167int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) 158int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
168{ 159{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0173d0480024..6cd6497c6fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
64 64
65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
66{ 66{
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; 67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; 68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; 69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; 70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; 71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
72 72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
73 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; 73
74 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; 74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
75 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
76 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; 76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
77 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
78} 80}
79 81
80int hwmgr_early_init(struct pp_hwmgr *hwmgr) 82int hwmgr_early_init(struct pp_hwmgr *hwmgr)
@@ -271,7 +273,7 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
271 273
272 phm_stop_thermal_controller(hwmgr); 274 phm_stop_thermal_controller(hwmgr);
273 psm_set_boot_states(hwmgr); 275 psm_set_boot_states(hwmgr);
274 psm_adjust_power_state_dynamic(hwmgr, false, NULL); 276 psm_adjust_power_state_dynamic(hwmgr, true, NULL);
275 phm_disable_dynamic_state_management(hwmgr); 277 phm_disable_dynamic_state_management(hwmgr);
276 phm_disable_clock_power_gatings(hwmgr); 278 phm_disable_clock_power_gatings(hwmgr);
277 279
@@ -293,7 +295,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
293 ret = psm_set_boot_states(hwmgr); 295 ret = psm_set_boot_states(hwmgr);
294 if (ret) 296 if (ret)
295 return ret; 297 return ret;
296 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); 298 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
297 if (ret) 299 if (ret)
298 return ret; 300 return ret;
299 ret = phm_power_down_asic(hwmgr); 301 ret = phm_power_down_asic(hwmgr);
@@ -323,7 +325,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
323 if (ret) 325 if (ret)
324 return ret; 326 return ret;
325 327
326 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); 328 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
327 329
328 return ret; 330 return ret;
329} 331}
@@ -377,12 +379,12 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
377 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); 379 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
378 if (ret) 380 if (ret)
379 return ret; 381 return ret;
380 ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps); 382 ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps);
381 break; 383 break;
382 } 384 }
383 case AMD_PP_TASK_COMPLETE_INIT: 385 case AMD_PP_TASK_COMPLETE_INIT:
384 case AMD_PP_TASK_READJUST_POWER_STATE: 386 case AMD_PP_TASK_READJUST_POWER_STATE:
385 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); 387 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
386 break; 388 break;
387 default: 389 default:
388 break; 390 break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 56437866d120..ce177d7f04cb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -256,16 +256,14 @@ static void power_state_management(struct pp_hwmgr *hwmgr,
256 } 256 }
257} 257}
258 258
259int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, 259int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_settings,
260 struct pp_power_state *new_ps) 260 struct pp_power_state *new_ps)
261{ 261{
262 uint32_t index; 262 uint32_t index;
263 long workload; 263 long workload;
264 264
265 if (skip) 265 if (!skip_display_settings)
266 return 0; 266 phm_display_configuration_changed(hwmgr);
267
268 phm_display_configuration_changed(hwmgr);
269 267
270 if (hwmgr->ps) 268 if (hwmgr->ps)
271 power_state_management(hwmgr, new_ps); 269 power_state_management(hwmgr, new_ps);
@@ -276,9 +274,11 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
276 */ 274 */
277 phm_apply_clock_adjust_rules(hwmgr); 275 phm_apply_clock_adjust_rules(hwmgr);
278 276
279 phm_notify_smc_display_config_after_ps_adjustment(hwmgr); 277 if (!skip_display_settings)
278 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
280 279
281 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) 280 if ((hwmgr->request_dpm_level != hwmgr->dpm_level) &&
281 !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
282 hwmgr->dpm_level = hwmgr->request_dpm_level; 282 hwmgr->dpm_level = hwmgr->request_dpm_level;
283 283
284 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 284 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
index fa1b6825036a..b62d55f1f289 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
@@ -34,7 +34,7 @@ int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
34 enum PP_StateUILabel label_id, 34 enum PP_StateUILabel label_id,
35 struct pp_power_state **state); 35 struct pp_power_state **state);
36int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, 36int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr,
37 bool skip, 37 bool skip_display_settings,
38 struct pp_power_state *new_ps); 38 struct pp_power_state *new_ps);
39 39
40#endif 40#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d91390459326..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
77#define PCIE_BUS_CLK 10000 77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10) 78#define TCLK (PCIE_BUS_CLK / 10)
79 79
80static const struct profile_mode_setting smu7_profiling[6] = 80static const struct profile_mode_setting smu7_profiling[7] =
81 {{1, 0, 100, 30, 1, 0, 100, 10}, 81 {{0, 0, 0, 0, 0, 0, 0, 0},
82 {1, 0, 100, 30, 1, 0, 100, 10},
82 {1, 10, 0, 30, 0, 0, 0, 0}, 83 {1, 10, 0, 30, 0, 0, 0, 0},
83 {0, 0, 0, 0, 1, 10, 16, 31}, 84 {0, 0, 0, 0, 1, 10, 16, 31},
84 {1, 0, 11, 50, 1, 0, 100, 10}, 85 {1, 0, 11, 50, 1, 0, 100, 10},
@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4889 uint32_t i, size = 0; 4890 uint32_t i, size = 0;
4890 uint32_t len; 4891 uint32_t len;
4891 4892
4892 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4893 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4894 "3D_FULL_SCREEN",
4893 "POWER_SAVING", 4895 "POWER_SAVING",
4894 "VIDEO", 4896 "VIDEO",
4895 "VR", 4897 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
new file mode 100644
index 000000000000..f94dab27f486
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -0,0 +1,136 @@
1#include "amdgpu.h"
2#include "soc15.h"
3#include "soc15_hw_ip.h"
4#include "vega10_ip_offset.h"
5#include "soc15_common.h"
6#include "vega10_inc.h"
7#include "vega10_ppsmc.h"
8#include "vega10_baco.h"
9
10
11
12static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
13{
14 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_DOORBELL_CNTL), BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 1},
15 {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_FB_EN), 0, 0, 0, 0},
16 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1},
17 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1}
18};
19
20static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
21{
22 {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000},
23 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1},
24 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1},
25 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1},
26 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1},
27 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT,0, 1},
28 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1},
29 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 1},
30 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1},
31 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1},
32 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1},
33 {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 5, 0},
34 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 1},
35 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 0},
36 {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100}
37};
38
39static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
40{
41 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0},
42 {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10,0},
43 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0,0},
44 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0},
45 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 0},
46 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0},
47 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1},
48 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0},
49 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 0},
50 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, THM_BACO_CNTL__BACO_EXIT__SHIFT, 0, 1},
51 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0},
52 {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0},
53 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0},
54 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0},
55 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK ,BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0},
56 {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK , BACO_CNTL__BACO_EN__SHIFT, 0,0},
57 {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0}
58 };
59
60static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
61{
62 {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0},
63 {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
64};
65
66int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
67{
68 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
69 uint32_t reg, data;
70
71 *cap = false;
72 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
73 return 0;
74
75 WREG32(0x12074, 0xFFF0003B);
76 data = RREG32(0x12075);
77
78 if (data == 0x1) {
79 reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
80
81 if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
82 *cap = true;
83 }
84
85 return 0;
86}
87
88int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
89{
90 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
91 uint32_t reg;
92
93 reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
94
95 if (reg & BACO_CNTL__BACO_MODE_MASK)
96 /* gfx has already entered BACO state */
97 *state = BACO_STATE_IN;
98 else
99 *state = BACO_STATE_OUT;
100 return 0;
101}
102
103int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
104{
105 enum BACO_STATE cur_state;
106
107 vega10_baco_get_state(hwmgr, &cur_state);
108
109 if (cur_state == state)
110 /* aisc already in the target state */
111 return 0;
112
113 if (state == BACO_STATE_IN) {
114 if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
115 ARRAY_SIZE(pre_baco_tbl))) {
116 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
117 return -1;
118
119 if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
120 ARRAY_SIZE(enter_baco_tbl)))
121 return 0;
122 }
123 } else if (state == BACO_STATE_OUT) {
124 /* HW requires at least 20ms between regulator off and on */
125 msleep(20);
126 /* Execute Hardware BACO exit sequence */
127 if (soc15_baco_program_registers(hwmgr, exit_baco_tbl,
128 ARRAY_SIZE(exit_baco_tbl))) {
129 if (soc15_baco_program_registers(hwmgr, clean_baco_tbl,
130 ARRAY_SIZE(clean_baco_tbl)))
131 return 0;
132 }
133 }
134
135 return -1;
136}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index 21908629e973..a93b1e6d1c66 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,20 +19,14 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: AMD
23 *
24 */ 22 */
23#ifndef __VEGA10_BOCO_H__
24#define __VEGA10_BOCO_H__
25#include "hwmgr.h"
26#include "common_baco.h"
25 27
26#ifndef __DAL_I2C_AUX_DCE80_H__ 28extern int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
27#define __DAL_I2C_AUX_DCE80_H__ 29extern int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
28 30extern int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
29struct i2caux_dce80 {
30 struct i2caux base;
31 /* indicate the I2C HW circular buffer is in use */
32 bool i2c_hw_buffer_in_use;
33};
34
35struct i2caux *dal_i2caux_dce80_create(
36 struct dc_context *ctx);
37 31
38#endif 32#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 79c86247d0ac..0d38ac2fdbf1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -48,6 +48,7 @@
48#include "ppinterrupt.h" 48#include "ppinterrupt.h"
49#include "pp_overdriver.h" 49#include "pp_overdriver.h"
50#include "pp_thermal.h" 50#include "pp_thermal.h"
51#include "vega10_baco.h"
51 52
52#include "smuio/smuio_9_0_offset.h" 53#include "smuio/smuio_9_0_offset.h"
53#include "smuio/smuio_9_0_sh_mask.h" 54#include "smuio/smuio_9_0_sh_mask.h"
@@ -71,6 +72,21 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
71#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L 72#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
72#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L 73#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
73 74
75typedef enum {
76 CLK_SMNCLK = 0,
77 CLK_SOCCLK,
78 CLK_MP0CLK,
79 CLK_MP1CLK,
80 CLK_LCLK,
81 CLK_DCEFCLK,
82 CLK_VCLK,
83 CLK_DCLK,
84 CLK_ECLK,
85 CLK_UCLK,
86 CLK_GFXCLK,
87 CLK_COUNT,
88} CLOCK_ID_e;
89
74static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); 90static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
75 91
76struct vega10_power_state *cast_phw_vega10_power_state( 92struct vega10_power_state *cast_phw_vega10_power_state(
@@ -804,9 +820,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
804 820
805 hwmgr->backend = data; 821 hwmgr->backend = data;
806 822
807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 823 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 824 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 825 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 826
811 vega10_set_default_registry_data(hwmgr); 827 vega10_set_default_registry_data(hwmgr);
812 data->disable_dpm_mask = 0xff; 828 data->disable_dpm_mask = 0xff;
@@ -3485,6 +3501,17 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3485 } 3501 }
3486 } 3502 }
3487 3503
3504 if (!data->registry_data.socclk_dpm_key_disabled) {
3505 if (data->smc_state_table.soc_boot_level !=
3506 data->dpm_table.soc_table.dpm_state.soft_min_level) {
3507 smum_send_msg_to_smc_with_parameter(hwmgr,
3508 PPSMC_MSG_SetSoftMinSocclkByIndex,
3509 data->smc_state_table.soc_boot_level);
3510 data->dpm_table.soc_table.dpm_state.soft_min_level =
3511 data->smc_state_table.soc_boot_level;
3512 }
3513 }
3514
3488 return 0; 3515 return 0;
3489} 3516}
3490 3517
@@ -3516,6 +3543,17 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3516 } 3543 }
3517 } 3544 }
3518 3545
3546 if (!data->registry_data.socclk_dpm_key_disabled) {
3547 if (data->smc_state_table.soc_max_level !=
3548 data->dpm_table.soc_table.dpm_state.soft_max_level) {
3549 smum_send_msg_to_smc_with_parameter(hwmgr,
3550 PPSMC_MSG_SetSoftMaxSocclkByIndex,
3551 data->smc_state_table.soc_max_level);
3552 data->dpm_table.soc_table.dpm_state.soft_max_level =
3553 data->smc_state_table.soc_max_level;
3554 }
3555 }
3556
3519 return 0; 3557 return 0;
3520} 3558}
3521 3559
@@ -4028,6 +4066,24 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4028 4066
4029 break; 4067 break;
4030 4068
4069 case PP_SOCCLK:
4070 data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0;
4071 data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0;
4072
4073 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4074 "Failed to upload boot level to lowest!",
4075 return -EINVAL);
4076
4077 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4078 "Failed to upload dpm max level to highest!",
4079 return -EINVAL);
4080
4081 break;
4082
4083 case PP_DCEFCLK:
4084 pr_info("Setting DCEFCLK min/max dpm level is not supported!\n");
4085 break;
4086
4031 case PP_PCIE: 4087 case PP_PCIE:
4032 default: 4088 default:
4033 break; 4089 break;
@@ -4267,12 +4323,113 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4267 return result; 4323 return result;
4268} 4324}
4269 4325
4326static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
4327{
4328 static const char *ppfeature_name[] = {
4329 "DPM_PREFETCHER",
4330 "GFXCLK_DPM",
4331 "UCLK_DPM",
4332 "SOCCLK_DPM",
4333 "UVD_DPM",
4334 "VCE_DPM",
4335 "ULV",
4336 "MP0CLK_DPM",
4337 "LINK_DPM",
4338 "DCEFCLK_DPM",
4339 "AVFS",
4340 "GFXCLK_DS",
4341 "SOCCLK_DS",
4342 "LCLK_DS",
4343 "PPT",
4344 "TDC",
4345 "THERMAL",
4346 "GFX_PER_CU_CG",
4347 "RM",
4348 "DCEFCLK_DS",
4349 "ACDC",
4350 "VR0HOT",
4351 "VR1HOT",
4352 "FW_CTF",
4353 "LED_DISPLAY",
4354 "FAN_CONTROL",
4355 "FAST_PPT",
4356 "DIDT",
4357 "ACG",
4358 "PCC_LIMIT"};
4359 static const char *output_title[] = {
4360 "FEATURES",
4361 "BITMASK",
4362 "ENABLEMENT"};
4363 uint64_t features_enabled;
4364 int i;
4365 int ret = 0;
4366 int size = 0;
4367
4368 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4369 PP_ASSERT_WITH_CODE(!ret,
4370 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
4371 return ret);
4372
4373 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
4374 size += sprintf(buf + size, "%-19s %-22s %s\n",
4375 output_title[0],
4376 output_title[1],
4377 output_title[2]);
4378 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
4379 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
4380 ppfeature_name[i],
4381 1ULL << i,
4382 (features_enabled & (1ULL << i)) ? "Y" : "N");
4383 }
4384
4385 return size;
4386}
4387
4388static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
4389{
4390 uint64_t features_enabled;
4391 uint64_t features_to_enable;
4392 uint64_t features_to_disable;
4393 int ret = 0;
4394
4395 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
4396 return -EINVAL;
4397
4398 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4399 if (ret)
4400 return ret;
4401
4402 features_to_disable =
4403 (features_enabled ^ new_ppfeature_masks) & features_enabled;
4404 features_to_enable =
4405 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
4406
4407 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
4408 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
4409
4410 if (features_to_disable) {
4411 ret = vega10_enable_smc_features(hwmgr, false, features_to_disable);
4412 if (ret)
4413 return ret;
4414 }
4415
4416 if (features_to_enable) {
4417 ret = vega10_enable_smc_features(hwmgr, true, features_to_enable);
4418 if (ret)
4419 return ret;
4420 }
4421
4422 return 0;
4423}
4424
4270static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, 4425static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4271 enum pp_clock_type type, char *buf) 4426 enum pp_clock_type type, char *buf)
4272{ 4427{
4273 struct vega10_hwmgr *data = hwmgr->backend; 4428 struct vega10_hwmgr *data = hwmgr->backend;
4274 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4429 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4275 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4430 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4431 struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
4432 struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
4276 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 4433 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4277 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; 4434 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4278 4435
@@ -4303,6 +4460,32 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4303 i, mclk_table->dpm_levels[i].value / 100, 4460 i, mclk_table->dpm_levels[i].value / 100,
4304 (i == now) ? "*" : ""); 4461 (i == now) ? "*" : "");
4305 break; 4462 break;
4463 case PP_SOCCLK:
4464 if (data->registry_data.socclk_dpm_key_disabled)
4465 break;
4466
4467 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
4468 now = smum_get_argument(hwmgr);
4469
4470 for (i = 0; i < soc_table->count; i++)
4471 size += sprintf(buf + size, "%d: %uMhz %s\n",
4472 i, soc_table->dpm_levels[i].value / 100,
4473 (i == now) ? "*" : "");
4474 break;
4475 case PP_DCEFCLK:
4476 if (data->registry_data.dcefclk_dpm_key_disabled)
4477 break;
4478
4479 smum_send_msg_to_smc_with_parameter(hwmgr,
4480 PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
4481 now = smum_get_argument(hwmgr);
4482
4483 for (i = 0; i < dcef_table->count; i++)
4484 size += sprintf(buf + size, "%d: %uMhz %s\n",
4485 i, dcef_table->dpm_levels[i].value / 100,
4486 (dcef_table->dpm_levels[i].value / 100 == now) ?
4487 "*" : "");
4488 break;
4306 case PP_PCIE: 4489 case PP_PCIE:
4307 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex); 4490 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4308 now = smum_get_argument(hwmgr); 4491 now = smum_get_argument(hwmgr);
@@ -4668,13 +4851,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4668{ 4851{
4669 struct vega10_hwmgr *data = hwmgr->backend; 4852 struct vega10_hwmgr *data = hwmgr->backend;
4670 uint32_t i, size = 0; 4853 uint32_t i, size = 0;
4671 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, 4854 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4855 {70, 60, 1, 3,},
4672 {90, 60, 0, 0,}, 4856 {90, 60, 0, 0,},
4673 {70, 60, 0, 0,}, 4857 {70, 60, 0, 0,},
4674 {70, 90, 0, 0,}, 4858 {70, 90, 0, 0,},
4675 {30, 60, 0, 6,}, 4859 {30, 60, 0, 6,},
4676 }; 4860 };
4677 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4861 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4862 "3D_FULL_SCREEN",
4678 "POWER_SAVING", 4863 "POWER_SAVING",
4679 "VIDEO", 4864 "VIDEO",
4680 "VR", 4865 "VR",
@@ -4978,6 +5163,12 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4978 .set_power_limit = vega10_set_power_limit, 5163 .set_power_limit = vega10_set_power_limit,
4979 .odn_edit_dpm_table = vega10_odn_edit_dpm_table, 5164 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
4980 .get_performance_level = vega10_get_performance_level, 5165 .get_performance_level = vega10_get_performance_level,
5166 .get_asic_baco_capability = vega10_baco_get_capability,
5167 .get_asic_baco_state = vega10_baco_get_state,
5168 .set_asic_baco_state = vega10_baco_set_state,
5169 .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
5170 .get_ppfeature_status = vega10_get_ppfeature_status,
5171 .set_ppfeature_status = vega10_set_ppfeature_status,
4981}; 5172};
4982 5173
4983int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) 5174int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 89870556de1b..f752b4ad0c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -199,6 +199,7 @@ struct vega10_smc_state_table {
199 uint32_t vce_boot_level; 199 uint32_t vce_boot_level;
200 uint32_t gfx_max_level; 200 uint32_t gfx_max_level;
201 uint32_t mem_max_level; 201 uint32_t mem_max_level;
202 uint32_t soc_max_level;
202 uint8_t vr_hot_gpio; 203 uint8_t vr_hot_gpio;
203 uint8_t ac_dc_gpio; 204 uint8_t ac_dc_gpio;
204 uint8_t therm_out_gpio; 205 uint8_t therm_out_gpio;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index b3e63003a789..c934e9612c1b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -282,6 +282,30 @@ typedef struct _ATOM_Vega10_Fan_Table_V2 {
282 UCHAR ucFanMaxRPM; 282 UCHAR ucFanMaxRPM;
283} ATOM_Vega10_Fan_Table_V2; 283} ATOM_Vega10_Fan_Table_V2;
284 284
285typedef struct _ATOM_Vega10_Fan_Table_V3 {
286 UCHAR ucRevId;
287 USHORT usFanOutputSensitivity;
288 USHORT usFanAcousticLimitRpm;
289 USHORT usThrottlingRPM;
290 USHORT usTargetTemperature;
291 USHORT usMinimumPWMLimit;
292 USHORT usTargetGfxClk;
293 USHORT usFanGainEdge;
294 USHORT usFanGainHotspot;
295 USHORT usFanGainLiquid;
296 USHORT usFanGainVrVddc;
297 USHORT usFanGainVrMvdd;
298 USHORT usFanGainPlx;
299 USHORT usFanGainHbm;
300 UCHAR ucEnableZeroRPM;
301 USHORT usFanStopTemperature;
302 USHORT usFanStartTemperature;
303 UCHAR ucFanParameters;
304 UCHAR ucFanMinRPM;
305 UCHAR ucFanMaxRPM;
306 USHORT usMGpuThrottlingRPM;
307} ATOM_Vega10_Fan_Table_V3;
308
285typedef struct _ATOM_Vega10_Thermal_Controller { 309typedef struct _ATOM_Vega10_Thermal_Controller {
286 UCHAR ucRevId; 310 UCHAR ucRevId;
287 UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ 311 UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b8747a5c9204..b6767d74dc85 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
32#include "vega10_pptable.h" 32#include "vega10_pptable.h"
33 33
34#define NUM_DSPCLK_LEVELS 8 34#define NUM_DSPCLK_LEVELS 8
35#define VEGA10_ENGINECLOCK_HARDMAX 198000
35 36
36static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 37static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
37 enum phm_platform_caps cap) 38 enum phm_platform_caps cap)
@@ -122,6 +123,7 @@ static int init_thermal_controller(
122 const Vega10_PPTable_Generic_SubTable_Header *header; 123 const Vega10_PPTable_Generic_SubTable_Header *header;
123 const ATOM_Vega10_Fan_Table *fan_table_v1; 124 const ATOM_Vega10_Fan_Table *fan_table_v1;
124 const ATOM_Vega10_Fan_Table_V2 *fan_table_v2; 125 const ATOM_Vega10_Fan_Table_V2 *fan_table_v2;
126 const ATOM_Vega10_Fan_Table_V3 *fan_table_v3;
125 127
126 thermal_controller = (ATOM_Vega10_Thermal_Controller *) 128 thermal_controller = (ATOM_Vega10_Thermal_Controller *)
127 (((unsigned long)powerplay_table) + 129 (((unsigned long)powerplay_table) +
@@ -206,7 +208,7 @@ static int init_thermal_controller(
206 le16_to_cpu(fan_table_v1->usFanStopTemperature); 208 le16_to_cpu(fan_table_v1->usFanStopTemperature);
207 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = 209 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
208 le16_to_cpu(fan_table_v1->usFanStartTemperature); 210 le16_to_cpu(fan_table_v1->usFanStartTemperature);
209 } else if (header->ucRevId > 10) { 211 } else if (header->ucRevId == 0xb) {
210 fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header; 212 fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header;
211 213
212 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = 214 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
@@ -250,7 +252,54 @@ static int init_thermal_controller(
250 le16_to_cpu(fan_table_v2->usFanStopTemperature); 252 le16_to_cpu(fan_table_v2->usFanStopTemperature);
251 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = 253 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
252 le16_to_cpu(fan_table_v2->usFanStartTemperature); 254 le16_to_cpu(fan_table_v2->usFanStartTemperature);
255 } else if (header->ucRevId > 0xb) {
256 fan_table_v3 = (ATOM_Vega10_Fan_Table_V3 *)header;
257
258 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
259 fan_table_v3->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
260 hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v3->ucFanMinRPM * 100UL;
261 hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v3->ucFanMaxRPM * 100UL;
262 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
263 PHM_PlatformCaps_MicrocodeFanControl);
264 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
265 le16_to_cpu(fan_table_v3->usFanOutputSensitivity);
266 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
267 fan_table_v3->ucFanMaxRPM * 100UL;
268 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
269 le16_to_cpu(fan_table_v3->usThrottlingRPM);
270 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
271 le16_to_cpu(fan_table_v3->usFanAcousticLimitRpm);
272 hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
273 le16_to_cpu(fan_table_v3->usTargetTemperature);
274 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
275 le16_to_cpu(fan_table_v3->usMinimumPWMLimit);
276 hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
277 le16_to_cpu(fan_table_v3->usTargetGfxClk);
278 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
279 le16_to_cpu(fan_table_v3->usFanGainEdge);
280 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
281 le16_to_cpu(fan_table_v3->usFanGainHotspot);
282 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
283 le16_to_cpu(fan_table_v3->usFanGainLiquid);
284 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
285 le16_to_cpu(fan_table_v3->usFanGainVrVddc);
286 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
287 le16_to_cpu(fan_table_v3->usFanGainVrMvdd);
288 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
289 le16_to_cpu(fan_table_v3->usFanGainPlx);
290 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
291 le16_to_cpu(fan_table_v3->usFanGainHbm);
292
293 hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
294 fan_table_v3->ucEnableZeroRPM;
295 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
296 le16_to_cpu(fan_table_v3->usFanStopTemperature);
297 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
298 le16_to_cpu(fan_table_v3->usFanStartTemperature);
299 hwmgr->thermal_controller.advanceFanControlParameters.usMGpuThrottlingRPMLimit =
300 le16_to_cpu(fan_table_v3->usMGpuThrottlingRPM);
253 } 301 }
302
254 return 0; 303 return 0;
255} 304}
256 305
@@ -258,7 +307,26 @@ static int init_over_drive_limits(
258 struct pp_hwmgr *hwmgr, 307 struct pp_hwmgr *hwmgr,
259 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) 308 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
260{ 309{
261 hwmgr->platform_descriptor.overdriveLimit.engineClock = 310 const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
311 (const ATOM_Vega10_GFXCLK_Dependency_Table *)
312 (((unsigned long) powerplay_table) +
313 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
314 bool is_acg_enabled = false;
315 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
316
317 if (gfxclk_dep_table->ucRevId == 1) {
318 patom_record_v2 =
319 (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
320 is_acg_enabled =
321 (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
322 }
323
324 if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
325 !is_acg_enabled)
326 hwmgr->platform_descriptor.overdriveLimit.engineClock =
327 VEGA10_ENGINECLOCK_HARDMAX;
328 else
329 hwmgr->platform_descriptor.overdriveLimit.engineClock =
262 le32_to_cpu(powerplay_table->ulMaxODEngineClock); 330 le32_to_cpu(powerplay_table->ulMaxODEngineClock);
263 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 331 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
264 le32_to_cpu(powerplay_table->ulMaxODMemoryClock); 332 le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index 3f807d6c95ce..ba8763daa380 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -556,6 +556,43 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
556 return ret; 556 return ret;
557} 557}
558 558
559int vega10_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
560{
561 struct vega10_hwmgr *data = hwmgr->backend;
562 PPTable_t *table = &(data->smc_state_table.pp_table);
563 int ret;
564
565 if (!data->smu_features[GNLD_FAN_CONTROL].supported)
566 return 0;
567
568 if (!hwmgr->thermal_controller.advanceFanControlParameters.
569 usMGpuThrottlingRPMLimit)
570 return 0;
571
572 table->FanThrottlingRpm = hwmgr->thermal_controller.
573 advanceFanControlParameters.usMGpuThrottlingRPMLimit;
574
575 ret = smum_smc_table_manager(hwmgr,
576 (uint8_t *)(&(data->smc_state_table.pp_table)),
577 PPTABLE, false);
578 if (ret) {
579 pr_info("Failed to update fan control table in pptable!");
580 return ret;
581 }
582
583 ret = vega10_disable_fan_control_feature(hwmgr);
584 if (ret) {
585 pr_info("Attempt to disable SMC fan control feature failed!");
586 return ret;
587 }
588
589 ret = vega10_enable_fan_control_feature(hwmgr);
590 if (ret)
591 pr_info("Attempt to enable SMC fan control feature failed!");
592
593 return ret;
594}
595
559/** 596/**
560* Start the fan control on the SMC. 597* Start the fan control on the SMC.
561* @param hwmgr the address of the powerplay hardware manager. 598* @param hwmgr the address of the powerplay hardware manager.
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 21e7c4dfa2ca..4a0ede7c1f07 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -73,6 +73,7 @@ extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
73extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); 73extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
74extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr, 74extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
75 struct PP_TemperatureRange *range); 75 struct PP_TemperatureRange *range);
76extern int vega10_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr);
76 77
77 78
78#endif 79#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..6c8e78611c03 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
753 return 0; 753 return 0;
754} 754}
755 755
756static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
757{
758 uint32_t result;
759
760 PP_ASSERT_WITH_CODE(
761 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
762 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
763 return -EINVAL);
764
765 result = smum_get_argument(hwmgr);
766 PP_ASSERT_WITH_CODE(result == 1,
767 "Failed to run ACG BTC!", return -EINVAL);
768
769 return 0;
770}
771
756static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 772static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
757{ 773{
758 struct vega12_hwmgr *data = 774 struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
931 "Failed to initialize SMC table!", 947 "Failed to initialize SMC table!",
932 result = tmp_result); 948 result = tmp_result);
933 949
950 tmp_result = vega12_run_acg_btc(hwmgr);
951 PP_ASSERT_WITH_CODE(!tmp_result,
952 "Failed to run ACG BTC!",
953 result = tmp_result);
954
934 result = vega12_enable_all_smu_features(hwmgr); 955 result = vega12_enable_all_smu_features(hwmgr);
935 PP_ASSERT_WITH_CODE(!result, 956 PP_ASSERT_WITH_CODE(!result,
936 "Failed to enable all smu features!", 957 "Failed to enable all smu features!",
@@ -1072,6 +1093,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1072 return ret); 1093 return ret);
1073 } 1094 }
1074 1095
1096 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1097 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1098
1099 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1100 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1101 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
1102 "Failed to set hard min dcefclk!",
1103 return ret);
1104 }
1105
1075 return ret; 1106 return ret;
1076 1107
1077} 1108}
@@ -1797,7 +1828,7 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1797 enum pp_clock_type type, uint32_t mask) 1828 enum pp_clock_type type, uint32_t mask)
1798{ 1829{
1799 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1830 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1800 uint32_t soft_min_level, soft_max_level; 1831 uint32_t soft_min_level, soft_max_level, hard_min_level;
1801 int ret = 0; 1832 int ret = 0;
1802 1833
1803 switch (type) { 1834 switch (type) {
@@ -1842,6 +1873,56 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1842 1873
1843 break; 1874 break;
1844 1875
1876 case PP_SOCCLK:
1877 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1878 soft_max_level = mask ? (fls(mask) - 1) : 0;
1879
1880 if (soft_max_level >= data->dpm_table.soc_table.count) {
1881 pr_err("Clock level specified %d is over max allowed %d\n",
1882 soft_max_level,
1883 data->dpm_table.soc_table.count - 1);
1884 return -EINVAL;
1885 }
1886
1887 data->dpm_table.soc_table.dpm_state.soft_min_level =
1888 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
1889 data->dpm_table.soc_table.dpm_state.soft_max_level =
1890 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
1891
1892 ret = vega12_upload_dpm_min_level(hwmgr);
1893 PP_ASSERT_WITH_CODE(!ret,
1894 "Failed to upload boot level to lowest!",
1895 return ret);
1896
1897 ret = vega12_upload_dpm_max_level(hwmgr);
1898 PP_ASSERT_WITH_CODE(!ret,
1899 "Failed to upload dpm max level to highest!",
1900 return ret);
1901
1902 break;
1903
1904 case PP_DCEFCLK:
1905 hard_min_level = mask ? (ffs(mask) - 1) : 0;
1906
1907 if (hard_min_level >= data->dpm_table.dcef_table.count) {
1908 pr_err("Clock level specified %d is over max allowed %d\n",
1909 hard_min_level,
1910 data->dpm_table.dcef_table.count - 1);
1911 return -EINVAL;
1912 }
1913
1914 data->dpm_table.dcef_table.dpm_state.hard_min_level =
1915 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
1916
1917 ret = vega12_upload_dpm_min_level(hwmgr);
1918 PP_ASSERT_WITH_CODE(!ret,
1919 "Failed to upload boot level to lowest!",
1920 return ret);
1921
1922 //TODO: Setting DCEFCLK max dpm level is not supported
1923
1924 break;
1925
1845 case PP_PCIE: 1926 case PP_PCIE:
1846 break; 1927 break;
1847 1928
@@ -1852,6 +1933,104 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1852 return 0; 1933 return 0;
1853} 1934}
1854 1935
1936static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
1937{
1938 static const char *ppfeature_name[] = {
1939 "DPM_PREFETCHER",
1940 "GFXCLK_DPM",
1941 "UCLK_DPM",
1942 "SOCCLK_DPM",
1943 "UVD_DPM",
1944 "VCE_DPM",
1945 "ULV",
1946 "MP0CLK_DPM",
1947 "LINK_DPM",
1948 "DCEFCLK_DPM",
1949 "GFXCLK_DS",
1950 "SOCCLK_DS",
1951 "LCLK_DS",
1952 "PPT",
1953 "TDC",
1954 "THERMAL",
1955 "GFX_PER_CU_CG",
1956 "RM",
1957 "DCEFCLK_DS",
1958 "ACDC",
1959 "VR0HOT",
1960 "VR1HOT",
1961 "FW_CTF",
1962 "LED_DISPLAY",
1963 "FAN_CONTROL",
1964 "DIDT",
1965 "GFXOFF",
1966 "CG",
1967 "ACG"};
1968 static const char *output_title[] = {
1969 "FEATURES",
1970 "BITMASK",
1971 "ENABLEMENT"};
1972 uint64_t features_enabled;
1973 int i;
1974 int ret = 0;
1975 int size = 0;
1976
1977 ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
1978 PP_ASSERT_WITH_CODE(!ret,
1979 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
1980 return ret);
1981
1982 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
1983 size += sprintf(buf + size, "%-19s %-22s %s\n",
1984 output_title[0],
1985 output_title[1],
1986 output_title[2]);
1987 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
1988 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
1989 ppfeature_name[i],
1990 1ULL << i,
1991 (features_enabled & (1ULL << i)) ? "Y" : "N");
1992 }
1993
1994 return size;
1995}
1996
1997static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
1998{
1999 uint64_t features_enabled;
2000 uint64_t features_to_enable;
2001 uint64_t features_to_disable;
2002 int ret = 0;
2003
2004 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
2005 return -EINVAL;
2006
2007 ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
2008 if (ret)
2009 return ret;
2010
2011 features_to_disable =
2012 (features_enabled ^ new_ppfeature_masks) & features_enabled;
2013 features_to_enable =
2014 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
2015
2016 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
2017 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
2018
2019 if (features_to_disable) {
2020 ret = vega12_enable_smc_features(hwmgr, false, features_to_disable);
2021 if (ret)
2022 return ret;
2023 }
2024
2025 if (features_to_enable) {
2026 ret = vega12_enable_smc_features(hwmgr, true, features_to_enable);
2027 if (ret)
2028 return ret;
2029 }
2030
2031 return 0;
2032}
2033
1855static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, 2034static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1856 enum pp_clock_type type, char *buf) 2035 enum pp_clock_type type, char *buf)
1857{ 2036{
@@ -1891,6 +2070,42 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1891 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); 2070 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1892 break; 2071 break;
1893 2072
2073 case PP_SOCCLK:
2074 PP_ASSERT_WITH_CODE(
2075 smum_send_msg_to_smc_with_parameter(hwmgr,
2076 PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
2077 "Attempt to get Current SOCCLK Frequency Failed!",
2078 return -EINVAL);
2079 now = smum_get_argument(hwmgr);
2080
2081 PP_ASSERT_WITH_CODE(
2082 vega12_get_socclocks(hwmgr, &clocks) == 0,
2083 "Attempt to get soc clk levels Failed!",
2084 return -1);
2085 for (i = 0; i < clocks.num_levels; i++)
2086 size += sprintf(buf + size, "%d: %uMhz %s\n",
2087 i, clocks.data[i].clocks_in_khz / 1000,
2088 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2089 break;
2090
2091 case PP_DCEFCLK:
2092 PP_ASSERT_WITH_CODE(
2093 smum_send_msg_to_smc_with_parameter(hwmgr,
2094 PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
2095 "Attempt to get Current DCEFCLK Frequency Failed!",
2096 return -EINVAL);
2097 now = smum_get_argument(hwmgr);
2098
2099 PP_ASSERT_WITH_CODE(
2100 vega12_get_dcefclocks(hwmgr, &clocks) == 0,
2101 "Attempt to get dcef clk levels Failed!",
2102 return -1);
2103 for (i = 0; i < clocks.num_levels; i++)
2104 size += sprintf(buf + size, "%d: %uMhz %s\n",
2105 i, clocks.data[i].clocks_in_khz / 1000,
2106 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2107 break;
2108
1894 case PP_PCIE: 2109 case PP_PCIE:
1895 break; 2110 break;
1896 2111
@@ -2411,6 +2626,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2411 .start_thermal_controller = vega12_start_thermal_controller, 2626 .start_thermal_controller = vega12_start_thermal_controller,
2412 .powergate_gfx = vega12_gfx_off_control, 2627 .powergate_gfx = vega12_gfx_off_control,
2413 .get_performance_level = vega12_get_performance_level, 2628 .get_performance_level = vega12_get_performance_level,
2629 .get_ppfeature_status = vega12_get_ppfeature_status,
2630 .set_ppfeature_status = vega12_set_ppfeature_status,
2414}; 2631};
2415 2632
2416int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) 2633int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
new file mode 100644
index 000000000000..0d883b358df2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -0,0 +1,81 @@
1#include "amdgpu.h"
2#include "soc15.h"
3#include "soc15_hw_ip.h"
4#include "soc15_common.h"
5#include "vega20_inc.h"
6#include "vega20_ppsmc.h"
7#include "vega20_baco.h"
8
9
10
11static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
12{
13 {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0},
14 {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
15};
16
17int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
18{
19 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
20 uint32_t reg;
21
22 *cap = false;
23 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
24 return 0;
25
26 if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) {
27 reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
28
29 if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
30 *cap = true;
31 }
32
33 return 0;
34}
35
36int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
37{
38 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
39 uint32_t reg;
40
41 reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
42
43 if (reg & BACO_CNTL__BACO_MODE_MASK)
44 /* gfx has already entered BACO state */
45 *state = BACO_STATE_IN;
46 else
47 *state = BACO_STATE_OUT;
48 return 0;
49}
50
51int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
52{
53 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
54 enum BACO_STATE cur_state;
55 uint32_t data;
56
57 vega20_baco_get_state(hwmgr, &cur_state);
58
59 if (cur_state == state)
60 /* aisc already in the target state */
61 return 0;
62
63 if (state == BACO_STATE_IN) {
64 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
65 data |= 0x80000000;
66 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
67
68
69 if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
70 return -1;
71
72 } else if (state == BACO_STATE_OUT) {
73 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
74 return -1;
75 if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
76 ARRAY_SIZE(clean_baco_tbl)))
77 return -1;
78 }
79
80 return 0;
81}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index b6ac47617c70..c51988a9ed77 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012-16 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,14 +19,14 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: AMD
23 *
24 */ 22 */
23#ifndef __VEGA20_BOCO_H__
24#define __VEGA20_BOCO_H__
25#include "hwmgr.h"
26#include "common_baco.h"
25 27
26#ifndef __DAL_I2C_AUX_DCE120_H__ 28extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
27#define __DAL_I2C_AUX_DCE120_H__ 29extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
28 30extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
29struct i2caux *dal_i2caux_dce120_create(
30 struct dc_context *ctx);
31 31
32#endif /* __DAL_I2C_AUX_DCE120_H__ */ 32#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 26154f9b2178..7b49a9a13a4a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -47,6 +47,7 @@
47#include "pp_overdriver.h" 47#include "pp_overdriver.h"
48#include "pp_thermal.h" 48#include "pp_thermal.h"
49#include "soc15_common.h" 49#include "soc15_common.h"
50#include "vega20_baco.h"
50#include "smuio/smuio_9_0_offset.h" 51#include "smuio/smuio_9_0_offset.h"
51#include "smuio/smuio_9_0_sh_mask.h" 52#include "smuio/smuio_9_0_sh_mask.h"
52#include "nbio/nbio_7_4_sh_mask.h" 53#include "nbio/nbio_7_4_sh_mask.h"
@@ -390,9 +391,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
390 391
391 hwmgr->backend = data; 392 hwmgr->backend = data;
392 393
393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 394 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 395 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 396 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
396 397
397 vega20_set_default_registry_data(hwmgr); 398 vega20_set_default_registry_data(hwmgr);
398 399
@@ -803,6 +804,11 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
803 return 0; 804 return 0;
804} 805}
805 806
807static int vega20_run_btc(struct pp_hwmgr *hwmgr)
808{
809 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
810}
811
806static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 812static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
807{ 813{
808 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc); 814 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
@@ -980,6 +986,9 @@ static int vega20_od8_set_feature_capabilities(
980 pp_table->FanZeroRpmEnable) 986 pp_table->FanZeroRpmEnable)
981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 987 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
982 988
989 if (!od_settings->overdrive8_capabilities)
990 hwmgr->od_enabled = false;
991
983 return 0; 992 return 0;
984} 993}
985 994
@@ -1561,6 +1570,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1561 "[EnableDPMTasks] Failed to initialize SMC table!", 1570 "[EnableDPMTasks] Failed to initialize SMC table!",
1562 return result); 1571 return result);
1563 1572
1573 result = vega20_run_btc(hwmgr);
1574 PP_ASSERT_WITH_CODE(!result,
1575 "[EnableDPMTasks] Failed to run btc!",
1576 return result);
1577
1564 result = vega20_run_btc_afll(hwmgr); 1578 result = vega20_run_btc_afll(hwmgr);
1565 PP_ASSERT_WITH_CODE(!result, 1579 PP_ASSERT_WITH_CODE(!result,
1566 "[EnableDPMTasks] Failed to run btc afll!", 1580 "[EnableDPMTasks] Failed to run btc afll!",
@@ -1689,13 +1703,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1689 (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1703 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1690 "Failed to set soft min memclk !", 1704 "Failed to set soft min memclk !",
1691 return ret); 1705 return ret);
1692
1693 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1694 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1695 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1696 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1697 "Failed to set hard min memclk !",
1698 return ret);
1699 } 1706 }
1700 1707
1701 if (data->smu_features[GNLD_DPM_UVD].enabled && 1708 if (data->smu_features[GNLD_DPM_UVD].enabled &&
@@ -1739,6 +1746,28 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1739 return ret); 1746 return ret);
1740 } 1747 }
1741 1748
1749 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1750 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1751 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1752
1753 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1754 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1755 (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
1756 "Failed to set soft min fclk!",
1757 return ret);
1758 }
1759
1760 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1761 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1762 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1763
1764 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1765 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1766 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
1767 "Failed to set hard min dcefclk!",
1768 return ret);
1769 }
1770
1742 return ret; 1771 return ret;
1743} 1772}
1744 1773
@@ -1811,6 +1840,17 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1811 return ret); 1840 return ret);
1812 } 1841 }
1813 1842
1843 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1844 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1845 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1846
1847 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1848 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1849 (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
1850 "Failed to set soft max fclk!",
1851 return ret);
1852 }
1853
1814 return ret; 1854 return ret;
1815} 1855}
1816 1856
@@ -1918,16 +1958,36 @@ static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1918 return (mem_clk * 100); 1958 return (mem_clk * 100);
1919} 1959}
1920 1960
1961static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
1962{
1963 struct vega20_hwmgr *data =
1964 (struct vega20_hwmgr *)(hwmgr->backend);
1965 int ret = 0;
1966
1967 if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
1968 ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
1969 TABLE_SMU_METRICS, true);
1970 if (ret) {
1971 pr_info("Failed to export SMU metrics table!\n");
1972 return ret;
1973 }
1974 memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
1975 data->metrics_time = jiffies;
1976 } else
1977 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
1978
1979 return ret;
1980}
1981
1921static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, 1982static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
1922 uint32_t *query) 1983 uint32_t *query)
1923{ 1984{
1924 int ret = 0; 1985 int ret = 0;
1925 SmuMetrics_t metrics_table; 1986 SmuMetrics_t metrics_table;
1926 1987
1927 ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); 1988 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
1928 PP_ASSERT_WITH_CODE(!ret, 1989 if (ret)
1929 "Failed to export SMU METRICS table!", 1990 return ret;
1930 return ret);
1931 1991
1932 *query = metrics_table.CurrSocketPower << 8; 1992 *query = metrics_table.CurrSocketPower << 8;
1933 1993
@@ -1958,10 +2018,9 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
1958 int ret = 0; 2018 int ret = 0;
1959 SmuMetrics_t metrics_table; 2019 SmuMetrics_t metrics_table;
1960 2020
1961 ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); 2021 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
1962 PP_ASSERT_WITH_CODE(!ret, 2022 if (ret)
1963 "Failed to export SMU METRICS table!", 2023 return ret;
1964 return ret);
1965 2024
1966 *activity_percent = metrics_table.AverageGfxActivity; 2025 *activity_percent = metrics_table.AverageGfxActivity;
1967 2026
@@ -1973,16 +2032,18 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1973{ 2032{
1974 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2033 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1975 struct amdgpu_device *adev = hwmgr->adev; 2034 struct amdgpu_device *adev = hwmgr->adev;
2035 SmuMetrics_t metrics_table;
1976 uint32_t val_vid; 2036 uint32_t val_vid;
1977 int ret = 0; 2037 int ret = 0;
1978 2038
1979 switch (idx) { 2039 switch (idx) {
1980 case AMDGPU_PP_SENSOR_GFX_SCLK: 2040 case AMDGPU_PP_SENSOR_GFX_SCLK:
1981 ret = vega20_get_current_clk_freq(hwmgr, 2041 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
1982 PPCLK_GFXCLK, 2042 if (ret)
1983 (uint32_t *)value); 2043 return ret;
1984 if (!ret) 2044
1985 *size = 4; 2045 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2046 *size = 4;
1986 break; 2047 break;
1987 case AMDGPU_PP_SENSOR_GFX_MCLK: 2048 case AMDGPU_PP_SENSOR_GFX_MCLK:
1988 ret = vega20_get_current_clk_freq(hwmgr, 2049 ret = vega20_get_current_clk_freq(hwmgr,
@@ -2140,6 +2201,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2140 data->dpm_table.mem_table.dpm_state.soft_max_level = 2201 data->dpm_table.mem_table.dpm_state.soft_max_level =
2141 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2202 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2142 2203
2204 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2205
2206 data->dpm_table.soc_table.dpm_state.soft_min_level =
2207 data->dpm_table.soc_table.dpm_state.soft_max_level =
2208 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2209
2143 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2210 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
2144 PP_ASSERT_WITH_CODE(!ret, 2211 PP_ASSERT_WITH_CODE(!ret,
2145 "Failed to upload boot level to highest!", 2212 "Failed to upload boot level to highest!",
@@ -2172,6 +2239,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2172 data->dpm_table.mem_table.dpm_state.soft_max_level = 2239 data->dpm_table.mem_table.dpm_state.soft_max_level =
2173 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2240 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2174 2241
2242 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2243
2244 data->dpm_table.soc_table.dpm_state.soft_min_level =
2245 data->dpm_table.soc_table.dpm_state.soft_max_level =
2246 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2247
2175 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2248 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
2176 PP_ASSERT_WITH_CODE(!ret, 2249 PP_ASSERT_WITH_CODE(!ret,
2177 "Failed to upload boot level to highest!", 2250 "Failed to upload boot level to highest!",
@@ -2188,8 +2261,32 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2188 2261
2189static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2262static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2190{ 2263{
2264 struct vega20_hwmgr *data =
2265 (struct vega20_hwmgr *)(hwmgr->backend);
2266 uint32_t soft_min_level, soft_max_level;
2191 int ret = 0; 2267 int ret = 0;
2192 2268
2269 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2270 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2271 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2272 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2273 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2274 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2275
2276 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2277 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2278 data->dpm_table.mem_table.dpm_state.soft_min_level =
2279 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2280 data->dpm_table.mem_table.dpm_state.soft_max_level =
2281 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2282
2283 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2284 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2285 data->dpm_table.soc_table.dpm_state.soft_min_level =
2286 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2287 data->dpm_table.soc_table.dpm_state.soft_max_level =
2288 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2289
2193 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2290 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
2194 PP_ASSERT_WITH_CODE(!ret, 2291 PP_ASSERT_WITH_CODE(!ret,
2195 "Failed to upload DPM Bootup Levels!", 2292 "Failed to upload DPM Bootup Levels!",
@@ -2240,7 +2337,7 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2240 enum pp_clock_type type, uint32_t mask) 2337 enum pp_clock_type type, uint32_t mask)
2241{ 2338{
2242 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2339 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2243 uint32_t soft_min_level, soft_max_level; 2340 uint32_t soft_min_level, soft_max_level, hard_min_level;
2244 int ret = 0; 2341 int ret = 0;
2245 2342
2246 switch (type) { 2343 switch (type) {
@@ -2248,6 +2345,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2248 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2345 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2249 soft_max_level = mask ? (fls(mask) - 1) : 0; 2346 soft_max_level = mask ? (fls(mask) - 1) : 0;
2250 2347
2348 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2349 pr_err("Clock level specified %d is over max allowed %d\n",
2350 soft_max_level,
2351 data->dpm_table.gfx_table.count - 1);
2352 return -EINVAL;
2353 }
2354
2251 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2355 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2252 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2356 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2253 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2357 data->dpm_table.gfx_table.dpm_state.soft_max_level =
@@ -2268,6 +2372,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2268 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2372 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2269 soft_max_level = mask ? (fls(mask) - 1) : 0; 2373 soft_max_level = mask ? (fls(mask) - 1) : 0;
2270 2374
2375 if (soft_max_level >= data->dpm_table.mem_table.count) {
2376 pr_err("Clock level specified %d is over max allowed %d\n",
2377 soft_max_level,
2378 data->dpm_table.mem_table.count - 1);
2379 return -EINVAL;
2380 }
2381
2271 data->dpm_table.mem_table.dpm_state.soft_min_level = 2382 data->dpm_table.mem_table.dpm_state.soft_min_level =
2272 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2383 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2273 data->dpm_table.mem_table.dpm_state.soft_max_level = 2384 data->dpm_table.mem_table.dpm_state.soft_max_level =
@@ -2285,6 +2396,84 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2285 2396
2286 break; 2397 break;
2287 2398
2399 case PP_SOCCLK:
2400 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2401 soft_max_level = mask ? (fls(mask) - 1) : 0;
2402
2403 if (soft_max_level >= data->dpm_table.soc_table.count) {
2404 pr_err("Clock level specified %d is over max allowed %d\n",
2405 soft_max_level,
2406 data->dpm_table.soc_table.count - 1);
2407 return -EINVAL;
2408 }
2409
2410 data->dpm_table.soc_table.dpm_state.soft_min_level =
2411 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2412 data->dpm_table.soc_table.dpm_state.soft_max_level =
2413 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2414
2415 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2416 PP_ASSERT_WITH_CODE(!ret,
2417 "Failed to upload boot level to lowest!",
2418 return ret);
2419
2420 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2421 PP_ASSERT_WITH_CODE(!ret,
2422 "Failed to upload dpm max level to highest!",
2423 return ret);
2424
2425 break;
2426
2427 case PP_FCLK:
2428 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2429 soft_max_level = mask ? (fls(mask) - 1) : 0;
2430
2431 if (soft_max_level >= data->dpm_table.fclk_table.count) {
2432 pr_err("Clock level specified %d is over max allowed %d\n",
2433 soft_max_level,
2434 data->dpm_table.fclk_table.count - 1);
2435 return -EINVAL;
2436 }
2437
2438 data->dpm_table.fclk_table.dpm_state.soft_min_level =
2439 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2440 data->dpm_table.fclk_table.dpm_state.soft_max_level =
2441 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2442
2443 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2444 PP_ASSERT_WITH_CODE(!ret,
2445 "Failed to upload boot level to lowest!",
2446 return ret);
2447
2448 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2449 PP_ASSERT_WITH_CODE(!ret,
2450 "Failed to upload dpm max level to highest!",
2451 return ret);
2452
2453 break;
2454
2455 case PP_DCEFCLK:
2456 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2457
2458 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2459 pr_err("Clock level specified %d is over max allowed %d\n",
2460 hard_min_level,
2461 data->dpm_table.dcef_table.count - 1);
2462 return -EINVAL;
2463 }
2464
2465 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2466 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2467
2468 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2469 PP_ASSERT_WITH_CODE(!ret,
2470 "Failed to upload boot level to lowest!",
2471 return ret);
2472
2473 //TODO: Setting DCEFCLK max dpm level is not supported
2474
2475 break;
2476
2288 case PP_PCIE: 2477 case PP_PCIE:
2289 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2478 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2290 soft_max_level = mask ? (fls(mask) - 1) : 0; 2479 soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -2335,6 +2524,7 @@ static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2335 return ret; 2524 return ret;
2336 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); 2525 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2337 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); 2526 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2527 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2338 break; 2528 break;
2339 2529
2340 case AMD_DPM_FORCED_LEVEL_MANUAL: 2530 case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2765,6 +2955,108 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2765 return 0; 2955 return 0;
2766} 2956}
2767 2957
2958static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
2959{
2960 static const char *ppfeature_name[] = {
2961 "DPM_PREFETCHER",
2962 "GFXCLK_DPM",
2963 "UCLK_DPM",
2964 "SOCCLK_DPM",
2965 "UVD_DPM",
2966 "VCE_DPM",
2967 "ULV",
2968 "MP0CLK_DPM",
2969 "LINK_DPM",
2970 "DCEFCLK_DPM",
2971 "GFXCLK_DS",
2972 "SOCCLK_DS",
2973 "LCLK_DS",
2974 "PPT",
2975 "TDC",
2976 "THERMAL",
2977 "GFX_PER_CU_CG",
2978 "RM",
2979 "DCEFCLK_DS",
2980 "ACDC",
2981 "VR0HOT",
2982 "VR1HOT",
2983 "FW_CTF",
2984 "LED_DISPLAY",
2985 "FAN_CONTROL",
2986 "GFX_EDC",
2987 "GFXOFF",
2988 "CG",
2989 "FCLK_DPM",
2990 "FCLK_DS",
2991 "MP1CLK_DS",
2992 "MP0CLK_DS",
2993 "XGMI"};
2994 static const char *output_title[] = {
2995 "FEATURES",
2996 "BITMASK",
2997 "ENABLEMENT"};
2998 uint64_t features_enabled;
2999 int i;
3000 int ret = 0;
3001 int size = 0;
3002
3003 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3004 PP_ASSERT_WITH_CODE(!ret,
3005 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3006 return ret);
3007
3008 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
3009 size += sprintf(buf + size, "%-19s %-22s %s\n",
3010 output_title[0],
3011 output_title[1],
3012 output_title[2]);
3013 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3014 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
3015 ppfeature_name[i],
3016 1ULL << i,
3017 (features_enabled & (1ULL << i)) ? "Y" : "N");
3018 }
3019
3020 return size;
3021}
3022
3023static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3024{
3025 uint64_t features_enabled;
3026 uint64_t features_to_enable;
3027 uint64_t features_to_disable;
3028 int ret = 0;
3029
3030 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3031 return -EINVAL;
3032
3033 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3034 if (ret)
3035 return ret;
3036
3037 features_to_disable =
3038 (features_enabled ^ new_ppfeature_masks) & features_enabled;
3039 features_to_enable =
3040 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
3041
3042 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3043 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
3044
3045 if (features_to_disable) {
3046 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3047 if (ret)
3048 return ret;
3049 }
3050
3051 if (features_to_enable) {
3052 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3053 if (ret)
3054 return ret;
3055 }
3056
3057 return 0;
3058}
3059
2768static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, 3060static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
2769 enum pp_clock_type type, char *buf) 3061 enum pp_clock_type type, char *buf)
2770{ 3062{
@@ -2779,6 +3071,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
2779 PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; 3071 PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
2780 struct amdgpu_device *adev = hwmgr->adev; 3072 struct amdgpu_device *adev = hwmgr->adev;
2781 struct pp_clock_levels_with_latency clocks; 3073 struct pp_clock_levels_with_latency clocks;
3074 struct vega20_single_dpm_table *fclk_dpm_table =
3075 &(data->dpm_table.fclk_table);
2782 int i, now, size = 0; 3076 int i, now, size = 0;
2783 int ret = 0; 3077 int ret = 0;
2784 uint32_t gen_speed, lane_width; 3078 uint32_t gen_speed, lane_width;
@@ -2818,6 +3112,52 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
2818 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3112 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
2819 break; 3113 break;
2820 3114
3115 case PP_SOCCLK:
3116 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3117 PP_ASSERT_WITH_CODE(!ret,
3118 "Attempt to get current socclk freq Failed!",
3119 return ret);
3120
3121 ret = vega20_get_socclocks(hwmgr, &clocks);
3122 PP_ASSERT_WITH_CODE(!ret,
3123 "Attempt to get soc clk levels Failed!",
3124 return ret);
3125
3126 for (i = 0; i < clocks.num_levels; i++)
3127 size += sprintf(buf + size, "%d: %uMhz %s\n",
3128 i, clocks.data[i].clocks_in_khz / 1000,
3129 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3130 break;
3131
3132 case PP_FCLK:
3133 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3134 PP_ASSERT_WITH_CODE(!ret,
3135 "Attempt to get current fclk freq Failed!",
3136 return ret);
3137
3138 for (i = 0; i < fclk_dpm_table->count; i++)
3139 size += sprintf(buf + size, "%d: %uMhz %s\n",
3140 i, fclk_dpm_table->dpm_levels[i].value,
3141 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3142 break;
3143
3144 case PP_DCEFCLK:
3145 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3146 PP_ASSERT_WITH_CODE(!ret,
3147 "Attempt to get current dcefclk freq Failed!",
3148 return ret);
3149
3150 ret = vega20_get_dcefclocks(hwmgr, &clocks);
3151 PP_ASSERT_WITH_CODE(!ret,
3152 "Attempt to get dcefclk levels Failed!",
3153 return ret);
3154
3155 for (i = 0; i < clocks.num_levels; i++)
3156 size += sprintf(buf + size, "%d: %uMhz %s\n",
3157 i, clocks.data[i].clocks_in_khz / 1000,
3158 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3159 break;
3160
2821 case PP_PCIE: 3161 case PP_PCIE:
2822 gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3162 gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2823 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3163 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
@@ -3261,6 +3601,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
3261 int pplib_workload = 0; 3601 int pplib_workload = 0;
3262 3602
3263 switch (power_profile) { 3603 switch (power_profile) {
3604 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3605 pplib_workload = WORKLOAD_DEFAULT_BIT;
3606 break;
3264 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3607 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3265 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3608 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3266 break; 3609 break;
@@ -3290,6 +3633,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3290 uint32_t i, size = 0; 3633 uint32_t i, size = 0;
3291 uint16_t workload_type = 0; 3634 uint16_t workload_type = 0;
3292 static const char *profile_name[] = { 3635 static const char *profile_name[] = {
3636 "BOOTUP_DEFAULT",
3293 "3D_FULL_SCREEN", 3637 "3D_FULL_SCREEN",
3294 "POWER_SAVING", 3638 "POWER_SAVING",
3295 "VIDEO", 3639 "VIDEO",
@@ -3557,6 +3901,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
3557 .force_clock_level = vega20_force_clock_level, 3901 .force_clock_level = vega20_force_clock_level,
3558 .print_clock_levels = vega20_print_clock_levels, 3902 .print_clock_levels = vega20_print_clock_levels,
3559 .read_sensor = vega20_read_sensor, 3903 .read_sensor = vega20_read_sensor,
3904 .get_ppfeature_status = vega20_get_ppfeature_status,
3905 .set_ppfeature_status = vega20_set_ppfeature_status,
3560 /* powergate related */ 3906 /* powergate related */
3561 .powergate_uvd = vega20_power_gate_uvd, 3907 .powergate_uvd = vega20_power_gate_uvd,
3562 .powergate_vce = vega20_power_gate_vce, 3908 .powergate_vce = vega20_power_gate_vce,
@@ -3577,6 +3923,10 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
3577 /* smu memory related */ 3923 /* smu memory related */
3578 .notify_cac_buffer_info = vega20_notify_cac_buffer_info, 3924 .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
3579 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, 3925 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
3926 /* BACO related */
3927 .get_asic_baco_capability = vega20_baco_get_capability,
3928 .get_asic_baco_state = vega20_baco_get_state,
3929 .set_asic_baco_state = vega20_baco_set_state,
3580}; 3930};
3581 3931
3582int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) 3932int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 25faaa5c5b10..37f5f5e657da 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -520,6 +520,9 @@ struct vega20_hwmgr {
520 /* ---- Gfxoff ---- */ 520 /* ---- Gfxoff ---- */
521 bool gfxoff_allowed; 521 bool gfxoff_allowed;
522 uint32_t counter_gfxoff; 522 uint32_t counter_gfxoff;
523
524 unsigned long metrics_time;
525 SmuMetrics_t metrics_table;
523}; 526};
524 527
525#define VEGA20_DPM2_NEAR_TDP_DEC 10 528#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
index 6738bad53602..613cb1989b3d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
@@ -31,5 +31,6 @@
31#include "asic_reg/mp/mp_9_0_sh_mask.h" 31#include "asic_reg/mp/mp_9_0_sh_mask.h"
32 32
33#include "asic_reg/nbio/nbio_7_4_offset.h" 33#include "asic_reg/nbio/nbio_7_4_offset.h"
34#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
34 35
35#endif 36#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index f4dab979a3a1..6e0be6027705 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -397,7 +397,6 @@ struct phm_odn_clock_levels {
397}; 397};
398 398
399extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); 399extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
400extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
401extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); 400extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
402extern int phm_setup_asic(struct pp_hwmgr *hwmgr); 401extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
403extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); 402extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d298a0409f5..bac3d85e3b82 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -47,6 +47,11 @@ enum DISPLAY_GAP {
47}; 47};
48typedef enum DISPLAY_GAP DISPLAY_GAP; 48typedef enum DISPLAY_GAP DISPLAY_GAP;
49 49
50enum BACO_STATE {
51 BACO_STATE_OUT = 0,
52 BACO_STATE_IN,
53};
54
50struct vi_dpm_level { 55struct vi_dpm_level {
51 bool enabled; 56 bool enabled;
52 uint32_t value; 57 uint32_t value;
@@ -251,7 +256,6 @@ struct pp_hwmgr_func {
251 uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); 256 uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
252 int (*power_state_set)(struct pp_hwmgr *hwmgr, 257 int (*power_state_set)(struct pp_hwmgr *hwmgr,
253 const void *state); 258 const void *state);
254 int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
255 int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); 259 int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
256 int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); 260 int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr);
257 int (*display_config_changed)(struct pp_hwmgr *hwmgr); 261 int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -334,6 +338,11 @@ struct pp_hwmgr_func {
334 int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); 338 int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
335 int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); 339 int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
336 int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); 340 int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
341 int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
342 int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
343 int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
344 int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
345 int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks);
337}; 346};
338 347
339struct pp_table_func { 348struct pp_table_func {
@@ -678,6 +687,7 @@ struct pp_advance_fan_control_parameters {
678 uint32_t ulTargetGfxClk; 687 uint32_t ulTargetGfxClk;
679 uint16_t usZeroRPMStartTemperature; 688 uint16_t usZeroRPMStartTemperature;
680 uint16_t usZeroRPMStopTemperature; 689 uint16_t usZeroRPMStopTemperature;
690 uint16_t usMGpuThrottlingRPMLimit;
681}; 691};
682 692
683struct pp_thermal_controller_info { 693struct pp_thermal_controller_info {
@@ -705,7 +715,7 @@ enum PP_TABLE_VERSION {
705/** 715/**
706 * The main hardware manager structure. 716 * The main hardware manager structure.
707 */ 717 */
708#define Workload_Policy_Max 5 718#define Workload_Policy_Max 6
709 719
710struct pp_hwmgr { 720struct pp_hwmgr {
711 void *adev; 721 void *adev;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 49a6763693f1..67ae26602024 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -109,16 +109,19 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
109 } 109 }
110 110
111 /* block scheduler */ 111 /* block scheduler */
112 kthread_park(gpu->sched.thread); 112 drm_sched_stop(&gpu->sched);
113 drm_sched_hw_job_reset(&gpu->sched, sched_job); 113
114 if(sched_job)
115 drm_sched_increase_karma(sched_job);
114 116
115 /* get the GPU back into the init state */ 117 /* get the GPU back into the init state */
116 etnaviv_core_dump(gpu); 118 etnaviv_core_dump(gpu);
117 etnaviv_gpu_recover_hang(gpu); 119 etnaviv_gpu_recover_hang(gpu);
118 120
121 drm_sched_resubmit_jobs(&gpu->sched);
122
119 /* restart scheduler after GPU is usable again */ 123 /* restart scheduler after GPU is usable again */
120 drm_sched_job_recovery(&gpu->sched); 124 drm_sched_start(&gpu->sched, true);
121 kthread_unpark(gpu->sched.thread);
122} 125}
123 126
124static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) 127static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5676 u16 data_offset, size; 5676 u16 data_offset, size;
5677 u8 frev, crev; 5677 u8 frev, crev;
5678 struct ci_power_info *pi; 5678 struct ci_power_info *pi;
5679 enum pci_bus_speed speed_cap; 5679 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5680 struct pci_dev *root = rdev->pdev->bus->self; 5680 struct pci_dev *root = rdev->pdev->bus->self;
5681 int ret; 5681 int ret;
5682 5682
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
5685 return -ENOMEM; 5685 return -ENOMEM;
5686 rdev->pm.dpm.priv = pi; 5686 rdev->pm.dpm.priv = pi;
5687 5687
5688 speed_cap = pcie_get_speed_cap(root); 5688 if (!pci_is_root_bus(rdev->pdev->bus))
5689 speed_cap = pcie_get_speed_cap(root);
5689 if (speed_cap == PCI_SPEED_UNKNOWN) { 5690 if (speed_cap == PCI_SPEED_UNKNOWN) {
5690 pi->sys_pcie_mask = 0; 5691 pi->sys_pcie_mask = 0;
5691 } else { 5692 } else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
6899 struct ni_power_info *ni_pi; 6899 struct ni_power_info *ni_pi;
6900 struct si_power_info *si_pi; 6900 struct si_power_info *si_pi;
6901 struct atom_clock_dividers dividers; 6901 struct atom_clock_dividers dividers;
6902 enum pci_bus_speed speed_cap; 6902 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
6903 struct pci_dev *root = rdev->pdev->bus->self; 6903 struct pci_dev *root = rdev->pdev->bus->self;
6904 int ret; 6904 int ret;
6905 6905
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
6911 eg_pi = &ni_pi->eg; 6911 eg_pi = &ni_pi->eg;
6912 pi = &eg_pi->rv7xx; 6912 pi = &eg_pi->rv7xx;
6913 6913
6914 speed_cap = pcie_get_speed_cap(root); 6914 if (!pci_is_root_bus(rdev->pdev->bus))
6915 speed_cap = pcie_get_speed_cap(root);
6915 if (speed_cap == PCI_SPEED_UNKNOWN) { 6916 if (speed_cap == PCI_SPEED_UNKNOWN) {
6916 si_pi->sys_pcie_mask = 0; 6917 si_pi->sys_pcie_mask = 0;
6917 } else { 6918 } else {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index dbb69063b3d5..19fc601c9eeb 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
60 60
61static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); 61static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62 62
63static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
64
65/** 63/**
66 * drm_sched_rq_init - initialize a given run queue struct 64 * drm_sched_rq_init - initialize a given run queue struct
67 * 65 *
@@ -286,8 +284,6 @@ static void drm_sched_job_finish(struct work_struct *work)
286 cancel_delayed_work_sync(&sched->work_tdr); 284 cancel_delayed_work_sync(&sched->work_tdr);
287 285
288 spin_lock_irqsave(&sched->job_list_lock, flags); 286 spin_lock_irqsave(&sched->job_list_lock, flags);
289 /* remove job from ring_mirror_list */
290 list_del_init(&s_job->node);
291 /* queue TDR for next job */ 287 /* queue TDR for next job */
292 drm_sched_start_timeout(sched); 288 drm_sched_start_timeout(sched);
293 spin_unlock_irqrestore(&sched->job_list_lock, flags); 289 spin_unlock_irqrestore(&sched->job_list_lock, flags);
@@ -295,22 +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
295 sched->ops->free_job(s_job); 291 sched->ops->free_job(s_job);
296} 292}
297 293
298static void drm_sched_job_finish_cb(struct dma_fence *f,
299 struct dma_fence_cb *cb)
300{
301 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
302 finish_cb);
303 schedule_work(&job->finish_work);
304}
305
306static void drm_sched_job_begin(struct drm_sched_job *s_job) 294static void drm_sched_job_begin(struct drm_sched_job *s_job)
307{ 295{
308 struct drm_gpu_scheduler *sched = s_job->sched; 296 struct drm_gpu_scheduler *sched = s_job->sched;
309 unsigned long flags; 297 unsigned long flags;
310 298
311 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
312 drm_sched_job_finish_cb);
313
314 spin_lock_irqsave(&sched->job_list_lock, flags); 299 spin_lock_irqsave(&sched->job_list_lock, flags);
315 list_add_tail(&s_job->node, &sched->ring_mirror_list); 300 list_add_tail(&s_job->node, &sched->ring_mirror_list);
316 drm_sched_start_timeout(sched); 301 drm_sched_start_timeout(sched);
@@ -335,6 +320,51 @@ static void drm_sched_job_timedout(struct work_struct *work)
335 spin_unlock_irqrestore(&sched->job_list_lock, flags); 320 spin_unlock_irqrestore(&sched->job_list_lock, flags);
336} 321}
337 322
323 /**
324 * drm_sched_increase_karma - Update sched_entity guilty flag
325 *
326 * @bad: The job guilty of time out
327 *
328 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
329 * limit of the scheduler then the respective sched entity is marked guilty and
330 * jobs from it will not be scheduled further
331 */
332void drm_sched_increase_karma(struct drm_sched_job *bad)
333{
334 int i;
335 struct drm_sched_entity *tmp;
336 struct drm_sched_entity *entity;
337 struct drm_gpu_scheduler *sched = bad->sched;
338
339 /* don't increase @bad's karma if it's from KERNEL RQ,
340 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
341 * corrupt but keep in mind that kernel jobs always considered good.
342 */
343 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
344 atomic_inc(&bad->karma);
345 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
346 i++) {
347 struct drm_sched_rq *rq = &sched->sched_rq[i];
348
349 spin_lock(&rq->lock);
350 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
351 if (bad->s_fence->scheduled.context ==
352 entity->fence_context) {
353 if (atomic_read(&bad->karma) >
354 bad->sched->hang_limit)
355 if (entity->guilty)
356 atomic_set(entity->guilty, 1);
357 break;
358 }
359 }
360 spin_unlock(&rq->lock);
361 if (&entity->list != &rq->entities)
362 break;
363 }
364 }
365}
366EXPORT_SYMBOL(drm_sched_increase_karma);
367
338/** 368/**
339 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job 369 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
340 * 370 *
@@ -342,50 +372,42 @@ static void drm_sched_job_timedout(struct work_struct *work)
342 * @bad: bad scheduler job 372 * @bad: bad scheduler job
343 * 373 *
344 */ 374 */
345void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 375void drm_sched_stop(struct drm_gpu_scheduler *sched)
346{ 376{
347 struct drm_sched_job *s_job; 377 struct drm_sched_job *s_job;
348 struct drm_sched_entity *entity, *tmp;
349 unsigned long flags; 378 unsigned long flags;
350 int i; 379 struct dma_fence *last_fence = NULL;
351 380
381 kthread_park(sched->thread);
382
383 /*
384 * Verify all the signaled jobs in mirror list are removed from the ring
385 * by waiting for the latest job to enter the list. This should insure that
386 * also all the previous jobs that were in flight also already singaled
387 * and removed from the list.
388 */
352 spin_lock_irqsave(&sched->job_list_lock, flags); 389 spin_lock_irqsave(&sched->job_list_lock, flags);
353 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { 390 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
354 if (s_job->s_fence->parent && 391 if (s_job->s_fence->parent &&
355 dma_fence_remove_callback(s_job->s_fence->parent, 392 dma_fence_remove_callback(s_job->s_fence->parent,
356 &s_job->s_fence->cb)) { 393 &s_job->cb)) {
357 dma_fence_put(s_job->s_fence->parent); 394 dma_fence_put(s_job->s_fence->parent);
358 s_job->s_fence->parent = NULL; 395 s_job->s_fence->parent = NULL;
359 atomic_dec(&sched->hw_rq_count); 396 atomic_dec(&sched->hw_rq_count);
397 } else {
398 last_fence = dma_fence_get(&s_job->s_fence->finished);
399 break;
360 } 400 }
361 } 401 }
362 spin_unlock_irqrestore(&sched->job_list_lock, flags); 402 spin_unlock_irqrestore(&sched->job_list_lock, flags);
363 403
364 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 404 if (last_fence) {
365 atomic_inc(&bad->karma); 405 dma_fence_wait(last_fence, false);
366 /* don't increase @bad's karma if it's from KERNEL RQ, 406 dma_fence_put(last_fence);
367 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
368 * corrupt but keep in mind that kernel jobs always considered good.
369 */
370 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
371 struct drm_sched_rq *rq = &sched->sched_rq[i];
372
373 spin_lock(&rq->lock);
374 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
375 if (bad->s_fence->scheduled.context == entity->fence_context) {
376 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
377 if (entity->guilty)
378 atomic_set(entity->guilty, 1);
379 break;
380 }
381 }
382 spin_unlock(&rq->lock);
383 if (&entity->list != &rq->entities)
384 break;
385 }
386 } 407 }
387} 408}
388EXPORT_SYMBOL(drm_sched_hw_job_reset); 409
410EXPORT_SYMBOL(drm_sched_stop);
389 411
390/** 412/**
391 * drm_sched_job_recovery - recover jobs after a reset 413 * drm_sched_job_recovery - recover jobs after a reset
@@ -393,18 +415,58 @@ EXPORT_SYMBOL(drm_sched_hw_job_reset);
393 * @sched: scheduler instance 415 * @sched: scheduler instance
394 * 416 *
395 */ 417 */
396void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) 418void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
397{ 419{
398 struct drm_sched_job *s_job, *tmp; 420 struct drm_sched_job *s_job, *tmp;
399 bool found_guilty = false;
400 unsigned long flags;
401 int r; 421 int r;
402 422
403 spin_lock_irqsave(&sched->job_list_lock, flags); 423 if (!full_recovery)
424 goto unpark;
425
426 /*
427 * Locking the list is not required here as the sched thread is parked
428 * so no new jobs are being pushed in to HW and in drm_sched_stop we
429 * flushed all the jobs who were still in mirror list but who already
430 * signaled and removed them self from the list. Also concurrent
431 * GPU recovers can't run in parallel.
432 */
433 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
434 struct dma_fence *fence = s_job->s_fence->parent;
435
436 if (fence) {
437 r = dma_fence_add_callback(fence, &s_job->cb,
438 drm_sched_process_job);
439 if (r == -ENOENT)
440 drm_sched_process_job(fence, &s_job->cb);
441 else if (r)
442 DRM_ERROR("fence add callback failed (%d)\n",
443 r);
444 } else
445 drm_sched_process_job(NULL, &s_job->cb);
446 }
447
448 drm_sched_start_timeout(sched);
449
450unpark:
451 kthread_unpark(sched->thread);
452}
453EXPORT_SYMBOL(drm_sched_start);
454
455/**
456 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
457 *
458 * @sched: scheduler instance
459 *
460 */
461void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
462{
463 struct drm_sched_job *s_job, *tmp;
464 uint64_t guilty_context;
465 bool found_guilty = false;
466
467 /*TODO DO we need spinlock here ? */
404 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 468 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
405 struct drm_sched_fence *s_fence = s_job->s_fence; 469 struct drm_sched_fence *s_fence = s_job->s_fence;
406 struct dma_fence *fence;
407 uint64_t guilty_context;
408 470
409 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 471 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
410 found_guilty = true; 472 found_guilty = true;
@@ -414,31 +476,11 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
414 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 476 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
415 dma_fence_set_error(&s_fence->finished, -ECANCELED); 477 dma_fence_set_error(&s_fence->finished, -ECANCELED);
416 478
417 spin_unlock_irqrestore(&sched->job_list_lock, flags); 479 s_job->s_fence->parent = sched->ops->run_job(s_job);
418 fence = sched->ops->run_job(s_job);
419 atomic_inc(&sched->hw_rq_count); 480 atomic_inc(&sched->hw_rq_count);
420
421 if (fence) {
422 s_fence->parent = dma_fence_get(fence);
423 r = dma_fence_add_callback(fence, &s_fence->cb,
424 drm_sched_process_job);
425 if (r == -ENOENT)
426 drm_sched_process_job(fence, &s_fence->cb);
427 else if (r)
428 DRM_ERROR("fence add callback failed (%d)\n",
429 r);
430 dma_fence_put(fence);
431 } else {
432 if (s_fence->finished.error < 0)
433 drm_sched_expel_job_unlocked(s_job);
434 drm_sched_process_job(NULL, &s_fence->cb);
435 }
436 spin_lock_irqsave(&sched->job_list_lock, flags);
437 } 481 }
438 drm_sched_start_timeout(sched);
439 spin_unlock_irqrestore(&sched->job_list_lock, flags);
440} 482}
441EXPORT_SYMBOL(drm_sched_job_recovery); 483EXPORT_SYMBOL(drm_sched_resubmit_jobs);
442 484
443/** 485/**
444 * drm_sched_job_init - init a scheduler job 486 * drm_sched_job_init - init a scheduler job
@@ -552,18 +594,27 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
552 */ 594 */
553static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) 595static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
554{ 596{
555 struct drm_sched_fence *s_fence = 597 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
556 container_of(cb, struct drm_sched_fence, cb); 598 struct drm_sched_fence *s_fence = s_job->s_fence;
557 struct drm_gpu_scheduler *sched = s_fence->sched; 599 struct drm_gpu_scheduler *sched = s_fence->sched;
600 unsigned long flags;
601
602 cancel_delayed_work(&sched->work_tdr);
558 603
559 dma_fence_get(&s_fence->finished);
560 atomic_dec(&sched->hw_rq_count); 604 atomic_dec(&sched->hw_rq_count);
561 atomic_dec(&sched->num_jobs); 605 atomic_dec(&sched->num_jobs);
606
607 spin_lock_irqsave(&sched->job_list_lock, flags);
608 /* remove job from ring_mirror_list */
609 list_del_init(&s_job->node);
610 spin_unlock_irqrestore(&sched->job_list_lock, flags);
611
562 drm_sched_fence_finished(s_fence); 612 drm_sched_fence_finished(s_fence);
563 613
564 trace_drm_sched_process_job(s_fence); 614 trace_drm_sched_process_job(s_fence);
565 dma_fence_put(&s_fence->finished);
566 wake_up_interruptible(&sched->wake_up_worker); 615 wake_up_interruptible(&sched->wake_up_worker);
616
617 schedule_work(&s_job->finish_work);
567} 618}
568 619
569/** 620/**
@@ -626,34 +677,22 @@ static int drm_sched_main(void *param)
626 677
627 if (fence) { 678 if (fence) {
628 s_fence->parent = dma_fence_get(fence); 679 s_fence->parent = dma_fence_get(fence);
629 r = dma_fence_add_callback(fence, &s_fence->cb, 680 r = dma_fence_add_callback(fence, &sched_job->cb,
630 drm_sched_process_job); 681 drm_sched_process_job);
631 if (r == -ENOENT) 682 if (r == -ENOENT)
632 drm_sched_process_job(fence, &s_fence->cb); 683 drm_sched_process_job(fence, &sched_job->cb);
633 else if (r) 684 else if (r)
634 DRM_ERROR("fence add callback failed (%d)\n", 685 DRM_ERROR("fence add callback failed (%d)\n",
635 r); 686 r);
636 dma_fence_put(fence); 687 dma_fence_put(fence);
637 } else { 688 } else
638 if (s_fence->finished.error < 0) 689 drm_sched_process_job(NULL, &sched_job->cb);
639 drm_sched_expel_job_unlocked(sched_job);
640 drm_sched_process_job(NULL, &s_fence->cb);
641 }
642 690
643 wake_up(&sched->job_scheduled); 691 wake_up(&sched->job_scheduled);
644 } 692 }
645 return 0; 693 return 0;
646} 694}
647 695
648static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
649{
650 struct drm_gpu_scheduler *sched = s_job->sched;
651
652 spin_lock(&sched->job_list_lock);
653 list_del_init(&s_job->node);
654 spin_unlock(&sched->job_list_lock);
655}
656
657/** 696/**
658 * drm_sched_init - Init a gpu scheduler instance 697 * drm_sched_init - Init a gpu scheduler instance
659 * 698 *
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0ec08394e17a..de088c8070fb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -198,19 +198,22 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
198 198
199void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 199void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
200{ 200{
201 struct ttm_bo_device *bdev = bo->bdev;
202 bool notify = false;
203
201 if (!list_empty(&bo->swap)) { 204 if (!list_empty(&bo->swap)) {
202 list_del_init(&bo->swap); 205 list_del_init(&bo->swap);
203 kref_put(&bo->list_kref, ttm_bo_ref_bug); 206 kref_put(&bo->list_kref, ttm_bo_ref_bug);
207 notify = true;
204 } 208 }
205 if (!list_empty(&bo->lru)) { 209 if (!list_empty(&bo->lru)) {
206 list_del_init(&bo->lru); 210 list_del_init(&bo->lru);
207 kref_put(&bo->list_kref, ttm_bo_ref_bug); 211 kref_put(&bo->list_kref, ttm_bo_ref_bug);
212 notify = true;
208 } 213 }
209 214
210 /* 215 if (notify && bdev->driver->del_from_lru_notify)
211 * TODO: Add a driver hook to delete from 216 bdev->driver->del_from_lru_notify(bo);
212 * driver-specific LRU's here.
213 */
214} 217}
215 218
216void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 219void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a1d977fbade5..e86a29a1e51f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
71 ttm_bo_get(bo); 71 ttm_bo_get(bo);
72 up_read(&vmf->vma->vm_mm->mmap_sem); 72 up_read(&vmf->vma->vm_mm->mmap_sem);
73 (void) dma_fence_wait(bo->moving, true); 73 (void) dma_fence_wait(bo->moving, true);
74 ttm_bo_unreserve(bo); 74 reservation_object_unlock(bo->resv);
75 ttm_bo_put(bo); 75 ttm_bo_put(bo);
76 goto out_unlock; 76 goto out_unlock;
77 } 77 }
@@ -131,11 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
131 * for reserve, and if it fails, retry the fault after waiting 131 * for reserve, and if it fails, retry the fault after waiting
132 * for the buffer to become unreserved. 132 * for the buffer to become unreserved.
133 */ 133 */
134 err = ttm_bo_reserve(bo, true, true, NULL); 134 if (unlikely(!reservation_object_trylock(bo->resv))) {
135 if (unlikely(err != 0)) {
136 if (err != -EBUSY)
137 return VM_FAULT_NOPAGE;
138
139 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 135 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
140 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 136 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
141 ttm_bo_get(bo); 137 ttm_bo_get(bo);
@@ -165,6 +161,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
165 } 161 }
166 162
167 if (bdev->driver->fault_reserve_notify) { 163 if (bdev->driver->fault_reserve_notify) {
164 struct dma_fence *moving = dma_fence_get(bo->moving);
165
168 err = bdev->driver->fault_reserve_notify(bo); 166 err = bdev->driver->fault_reserve_notify(bo);
169 switch (err) { 167 switch (err) {
170 case 0: 168 case 0:
@@ -177,6 +175,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
177 ret = VM_FAULT_SIGBUS; 175 ret = VM_FAULT_SIGBUS;
178 goto out_unlock; 176 goto out_unlock;
179 } 177 }
178
179 if (bo->moving != moving) {
180 spin_lock(&bdev->glob->lru_lock);
181 ttm_bo_move_to_lru_tail(bo, NULL);
182 spin_unlock(&bdev->glob->lru_lock);
183 }
184 dma_fence_put(moving);
180 } 185 }
181 186
182 /* 187 /*
@@ -291,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
291out_io_unlock: 296out_io_unlock:
292 ttm_mem_io_unlock(man); 297 ttm_mem_io_unlock(man);
293out_unlock: 298out_unlock:
294 ttm_bo_unreserve(bo); 299 reservation_object_unlock(bo->resv);
295 return ret; 300 return ret;
296} 301}
297 302
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index f7508e907536..4704b2df3688 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -234,18 +234,21 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
234 for (q = 0; q < V3D_MAX_QUEUES; q++) { 234 for (q = 0; q < V3D_MAX_QUEUES; q++) {
235 struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; 235 struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
236 236
237 kthread_park(sched->thread); 237 drm_sched_stop(sched);
238 drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? 238
239 sched_job : NULL)); 239 if(sched_job)
240 drm_sched_increase_karma(sched_job);
240 } 241 }
241 242
242 /* get the GPU back into the init state */ 243 /* get the GPU back into the init state */
243 v3d_reset(v3d); 244 v3d_reset(v3d);
244 245
246 for (q = 0; q < V3D_MAX_QUEUES; q++)
247 drm_sched_resubmit_jobs(sched_job->sched);
248
245 /* Unblock schedulers and restart their jobs. */ 249 /* Unblock schedulers and restart their jobs. */
246 for (q = 0; q < V3D_MAX_QUEUES; q++) { 250 for (q = 0; q < V3D_MAX_QUEUES; q++) {
247 drm_sched_job_recovery(&v3d->queue[q].sched); 251 drm_sched_start(&v3d->queue[q].sched, true);
248 kthread_unpark(v3d->queue[q].sched.thread);
249 } 252 }
250 253
251 mutex_unlock(&v3d->reset_lock); 254 mutex_unlock(&v3d->reset_lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 47e19796c450..0daca4d8dad9 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -138,10 +138,6 @@ struct drm_sched_fence {
138 struct dma_fence finished; 138 struct dma_fence finished;
139 139
140 /** 140 /**
141 * @cb: the callback for the parent fence below.
142 */
143 struct dma_fence_cb cb;
144 /**
145 * @parent: the fence returned by &drm_sched_backend_ops.run_job 141 * @parent: the fence returned by &drm_sched_backend_ops.run_job
146 * when scheduling the job on hardware. We signal the 142 * when scheduling the job on hardware. We signal the
147 * &drm_sched_fence.finished fence once parent is signalled. 143 * &drm_sched_fence.finished fence once parent is signalled.
@@ -181,6 +177,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
181 * be scheduled further. 177 * be scheduled further.
182 * @s_priority: the priority of the job. 178 * @s_priority: the priority of the job.
183 * @entity: the entity to which this job belongs. 179 * @entity: the entity to which this job belongs.
180 * @cb: the callback for the parent fence in s_fence.
184 * 181 *
185 * A job is created by the driver using drm_sched_job_init(), and 182 * A job is created by the driver using drm_sched_job_init(), and
186 * should call drm_sched_entity_push_job() once it wants the scheduler 183 * should call drm_sched_entity_push_job() once it wants the scheduler
@@ -197,6 +194,7 @@ struct drm_sched_job {
197 atomic_t karma; 194 atomic_t karma;
198 enum drm_sched_priority s_priority; 195 enum drm_sched_priority s_priority;
199 struct drm_sched_entity *entity; 196 struct drm_sched_entity *entity;
197 struct dma_fence_cb cb;
200}; 198};
201 199
202static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 200static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -298,9 +296,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
298 void *owner); 296 void *owner);
299void drm_sched_job_cleanup(struct drm_sched_job *job); 297void drm_sched_job_cleanup(struct drm_sched_job *job);
300void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 298void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
301void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, 299void drm_sched_stop(struct drm_gpu_scheduler *sched);
302 struct drm_sched_job *job); 300void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
303void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); 301void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
302void drm_sched_increase_karma(struct drm_sched_job *bad);
304bool drm_sched_dependency_optimized(struct dma_fence* fence, 303bool drm_sched_dependency_optimized(struct dma_fence* fence,
305 struct drm_sched_entity *entity); 304 struct drm_sched_entity *entity);
306void drm_sched_fault(struct drm_gpu_scheduler *sched); 305void drm_sched_fault(struct drm_gpu_scheduler *sched);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1021106438b2..15829b24277c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -381,6 +381,15 @@ struct ttm_bo_driver {
381 */ 381 */
382 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, 382 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
383 void *buf, int len, int write); 383 void *buf, int len, int write);
384
385 /**
386 * struct ttm_bo_driver member del_from_lru_notify
387 *
388 * @bo: the buffer object deleted from lru
389 *
390 * notify driver that a BO was deleted from LRU.
391 */
392 void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
384}; 393};
385 394
386/** 395/**